spa_misc.c revision 297074
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 */ 29 30#include <sys/zfs_context.h> 31#include <sys/spa_impl.h> 32#include <sys/spa_boot.h> 33#include <sys/zio.h> 34#include <sys/zio_checksum.h> 35#include <sys/zio_compress.h> 36#include <sys/dmu.h> 37#include <sys/dmu_tx.h> 38#include <sys/zap.h> 39#include <sys/zil.h> 40#include <sys/vdev_impl.h> 41#include <sys/metaslab.h> 42#include <sys/uberblock_impl.h> 43#include <sys/txg.h> 44#include <sys/avl.h> 45#include <sys/unique.h> 46#include <sys/dsl_pool.h> 47#include <sys/dsl_dir.h> 48#include <sys/dsl_prop.h> 49#include <sys/dsl_scan.h> 50#include <sys/fs/zfs.h> 51#include <sys/metaslab_impl.h> 52#include <sys/arc.h> 53#include <sys/ddt.h> 54#include "zfs_prop.h" 55#include <sys/zfeature.h> 56 57/* 58 * SPA locking 59 * 60 * There are four basic locks for managing spa_t structures: 61 * 62 * spa_namespace_lock (global mutex) 63 * 64 * This lock must be acquired to do any of the following: 65 * 66 * - Lookup a spa_t by name 67 * - Add or remove a spa_t from the namespace 68 * - Increase spa_refcount from non-zero 69 * - Check if spa_refcount is zero 70 * - Rename a spa_t 71 * - add/remove/attach/detach devices 72 * - Held for the duration of create/destroy/import/export 73 * 74 * It does not need to handle recursion. A create or destroy may 75 * reference objects (files or zvols) in other pools, but by 76 * definition they must have an existing reference, and will never need 77 * to lookup a spa_t by name. 78 * 79 * spa_refcount (per-spa refcount_t protected by mutex) 80 * 81 * This reference count keep track of any active users of the spa_t. The 82 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 83 * the refcount is never really 'zero' - opening a pool implicitly keeps 84 * some references in the DMU. Internally we check against spa_minref, but 85 * present the image of a zero/non-zero value to consumers. 86 * 87 * spa_config_lock[] (per-spa array of rwlocks) 88 * 89 * This protects the spa_t from config changes, and must be held in 90 * the following circumstances: 91 * 92 * - RW_READER to perform I/O to the spa 93 * - RW_WRITER to change the vdev config 94 * 95 * The locking order is fairly straightforward: 96 * 97 * spa_namespace_lock -> spa_refcount 98 * 99 * The namespace lock must be acquired to increase the refcount from 0 100 * or to check if it is zero. 101 * 102 * spa_refcount -> spa_config_lock[] 103 * 104 * There must be at least one valid reference on the spa_t to acquire 105 * the config lock. 106 * 107 * spa_namespace_lock -> spa_config_lock[] 108 * 109 * The namespace lock must always be taken before the config lock. 110 * 111 * 112 * The spa_namespace_lock can be acquired directly and is globally visible. 113 * 114 * The namespace is manipulated using the following functions, all of which 115 * require the spa_namespace_lock to be held. 116 * 117 * spa_lookup() Lookup a spa_t by name. 118 * 119 * spa_add() Create a new spa_t in the namespace. 120 * 121 * spa_remove() Remove a spa_t from the namespace. This also 122 * frees up any memory associated with the spa_t. 123 * 124 * spa_next() Returns the next spa_t in the system, or the 125 * first if NULL is passed. 126 * 127 * spa_evict_all() Shutdown and remove all spa_t structures in 128 * the system. 129 * 130 * spa_guid_exists() Determine whether a pool/device guid exists. 131 * 132 * The spa_refcount is manipulated using the following functions: 133 * 134 * spa_open_ref() Adds a reference to the given spa_t. Must be 135 * called with spa_namespace_lock held if the 136 * refcount is currently zero. 137 * 138 * spa_close() Remove a reference from the spa_t. This will 139 * not free the spa_t or remove it from the 140 * namespace. No locking is required. 141 * 142 * spa_refcount_zero() Returns true if the refcount is currently 143 * zero. Must be called with spa_namespace_lock 144 * held. 145 * 146 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 147 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 148 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 149 * 150 * To read the configuration, it suffices to hold one of these locks as reader. 151 * To modify the configuration, you must hold all locks as writer. To modify 152 * vdev state without altering the vdev tree's topology (e.g. online/offline), 153 * you must hold SCL_STATE and SCL_ZIO as writer. 154 * 155 * We use these distinct config locks to avoid recursive lock entry. 156 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 157 * block allocations (SCL_ALLOC), which may require reading space maps 158 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 159 * 160 * The spa config locks cannot be normal rwlocks because we need the 161 * ability to hand off ownership. For example, SCL_ZIO is acquired 162 * by the issuing thread and later released by an interrupt thread. 163 * They do, however, obey the usual write-wanted semantics to prevent 164 * writer (i.e. system administrator) starvation. 165 * 166 * The lock acquisition rules are as follows: 167 * 168 * SCL_CONFIG 169 * Protects changes to the vdev tree topology, such as vdev 170 * add/remove/attach/detach. Protects the dirty config list 171 * (spa_config_dirty_list) and the set of spares and l2arc devices. 172 * 173 * SCL_STATE 174 * Protects changes to pool state and vdev state, such as vdev 175 * online/offline/fault/degrade/clear. Protects the dirty state list 176 * (spa_state_dirty_list) and global pool state (spa_state). 177 * 178 * SCL_ALLOC 179 * Protects changes to metaslab groups and classes. 180 * Held as reader by metaslab_alloc() and metaslab_claim(). 181 * 182 * SCL_ZIO 183 * Held by bp-level zios (those which have no io_vd upon entry) 184 * to prevent changes to the vdev tree. The bp-level zio implicitly 185 * protects all of its vdev child zios, which do not hold SCL_ZIO. 186 * 187 * SCL_FREE 188 * Protects changes to metaslab groups and classes. 189 * Held as reader by metaslab_free(). SCL_FREE is distinct from 190 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 191 * blocks in zio_done() while another i/o that holds either 192 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 193 * 194 * SCL_VDEV 195 * Held as reader to prevent changes to the vdev tree during trivial 196 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 197 * other locks, and lower than all of them, to ensure that it's safe 198 * to acquire regardless of caller context. 199 * 200 * In addition, the following rules apply: 201 * 202 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 203 * The lock ordering is SCL_CONFIG > spa_props_lock. 204 * 205 * (b) I/O operations on leaf vdevs. For any zio operation that takes 206 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 207 * or zio_write_phys() -- the caller must ensure that the config cannot 208 * cannot change in the interim, and that the vdev cannot be reopened. 209 * SCL_STATE as reader suffices for both. 210 * 211 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 212 * 213 * spa_vdev_enter() Acquire the namespace lock and the config lock 214 * for writing. 215 * 216 * spa_vdev_exit() Release the config lock, wait for all I/O 217 * to complete, sync the updated configs to the 218 * cache, and release the namespace lock. 219 * 220 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 221 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 222 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 223 * 224 * spa_rename() is also implemented within this file since it requires 225 * manipulation of the namespace. 226 */ 227 228static avl_tree_t spa_namespace_avl; 229kmutex_t spa_namespace_lock; 230static kcondvar_t spa_namespace_cv; 231static int spa_active_count; 232int spa_max_replication_override = SPA_DVAS_PER_BP; 233 234static kmutex_t spa_spare_lock; 235static avl_tree_t spa_spare_avl; 236static kmutex_t spa_l2cache_lock; 237static avl_tree_t spa_l2cache_avl; 238 239kmem_cache_t *spa_buffer_pool; 240int spa_mode_global; 241 242#ifdef ZFS_DEBUG 243/* Everything except dprintf and spa is on by default in debug builds */ 244int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); 245#else 246int zfs_flags = 0; 247#endif 248SYSCTL_DECL(_debug); 249TUNABLE_INT("debug.zfs_flags", &zfs_flags); 250SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0, 251 "ZFS debug flags."); 252 253/* 254 * zfs_recover can be set to nonzero to attempt to recover from 255 * otherwise-fatal errors, typically caused by on-disk corruption. When 256 * set, calls to zfs_panic_recover() will turn into warning messages. 257 * This should only be used as a last resort, as it typically results 258 * in leaked space, or worse. 259 */ 260boolean_t zfs_recover = B_FALSE; 261SYSCTL_DECL(_vfs_zfs); 262TUNABLE_INT("vfs.zfs.recover", &zfs_recover); 263SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 264 "Try to recover from otherwise-fatal errors."); 265 266static int 267sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 268{ 269 int err, val; 270 271 val = zfs_flags; 272 err = sysctl_handle_int(oidp, &val, 0, req); 273 if (err != 0 || req->newptr == NULL) 274 return (err); 275 276 /* 277 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 278 * arc buffers in the system have the necessary additional 279 * checksum data. However, it is safe to disable at any 280 * time. 281 */ 282 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 283 val &= ~ZFS_DEBUG_MODIFY; 284 zfs_flags = val; 285 286 return (0); 287} 288TUNABLE_INT("vfs.zfs.debug_flags", &zfs_flags); 289SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 290 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 291 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 292 293/* 294 * If destroy encounters an EIO while reading metadata (e.g. indirect 295 * blocks), space referenced by the missing metadata can not be freed. 296 * Normally this causes the background destroy to become "stalled", as 297 * it is unable to make forward progress. While in this stalled state, 298 * all remaining space to free from the error-encountering filesystem is 299 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 300 * permanently leak the space from indirect blocks that can not be read, 301 * and continue to free everything else that it can. 302 * 303 * The default, "stalling" behavior is useful if the storage partially 304 * fails (i.e. some but not all i/os fail), and then later recovers. In 305 * this case, we will be able to continue pool operations while it is 306 * partially failed, and when it recovers, we can continue to free the 307 * space, with no leaks. However, note that this case is actually 308 * fairly rare. 309 * 310 * Typically pools either (a) fail completely (but perhaps temporarily, 311 * e.g. a top-level vdev going offline), or (b) have localized, 312 * permanent errors (e.g. disk returns the wrong data due to bit flip or 313 * firmware bug). In case (a), this setting does not matter because the 314 * pool will be suspended and the sync thread will not be able to make 315 * forward progress regardless. In case (b), because the error is 316 * permanent, the best we can do is leak the minimum amount of space, 317 * which is what setting this flag will do. Therefore, it is reasonable 318 * for this flag to normally be set, but we chose the more conservative 319 * approach of not setting it, so that there is no possibility of 320 * leaking space in the "partial temporary" failure case. 321 */ 322boolean_t zfs_free_leak_on_eio = B_FALSE; 323 324/* 325 * Expiration time in milliseconds. This value has two meanings. First it is 326 * used to determine when the spa_deadman() logic should fire. By default the 327 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 328 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 329 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 330 * in a system panic. 331 */ 332uint64_t zfs_deadman_synctime_ms = 1000000ULL; 333TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms); 334SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 335 &zfs_deadman_synctime_ms, 0, 336 "Stalled ZFS I/O expiration time in milliseconds"); 337 338/* 339 * Check time in milliseconds. This defines the frequency at which we check 340 * for hung I/O. 341 */ 342uint64_t zfs_deadman_checktime_ms = 5000ULL; 343TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms); 344SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 345 &zfs_deadman_checktime_ms, 0, 346 "Period of checks for stalled ZFS I/O in milliseconds"); 347 348/* 349 * Default value of -1 for zfs_deadman_enabled is resolved in 350 * zfs_deadman_init() 351 */ 352int zfs_deadman_enabled = -1; 353TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled); 354SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 355 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 356 357/* 358 * The worst case is single-sector max-parity RAID-Z blocks, in which 359 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 360 * times the size; so just assume that. Add to this the fact that 361 * we can have up to 3 DVAs per bp, and one more factor of 2 because 362 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 363 * the worst case is: 364 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 365 */ 366int spa_asize_inflation = 24; 367TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation); 368SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 369 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 370 371#ifndef illumos 372#ifdef _KERNEL 373static void 374zfs_deadman_init() 375{ 376 /* 377 * If we are not i386 or amd64 or in a virtual machine, 378 * disable ZFS deadman thread by default 379 */ 380 if (zfs_deadman_enabled == -1) { 381#if defined(__amd64__) || defined(__i386__) 382 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 383#else 384 zfs_deadman_enabled = 0; 385#endif 386 } 387} 388#endif /* _KERNEL */ 389#endif /* !illumos */ 390 391/* 392 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 393 * the pool to be consumed. This ensures that we don't run the pool 394 * completely out of space, due to unaccounted changes (e.g. to the MOS). 395 * It also limits the worst-case time to allocate space. If we have 396 * less than this amount of free space, most ZPL operations (e.g. write, 397 * create) will return ENOSPC. 398 * 399 * Certain operations (e.g. file removal, most administrative actions) can 400 * use half the slop space. They will only return ENOSPC if less than half 401 * the slop space is free. Typically, once the pool has less than the slop 402 * space free, the user will use these operations to free up space in the pool. 403 * These are the operations that call dsl_pool_adjustedsize() with the netfree 404 * argument set to TRUE. 405 * 406 * A very restricted set of operations are always permitted, regardless of 407 * the amount of free space. These are the operations that call 408 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these 409 * operations result in a net increase in the amount of space used, 410 * it is possible to run the pool completely out of space, causing it to 411 * be permanently read-only. 412 * 413 * See also the comments in zfs_space_check_t. 414 */ 415int spa_slop_shift = 5; 416SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 417 &spa_slop_shift, 0, 418 "Shift value of reserved space (1/(2^spa_slop_shift))."); 419 420/* 421 * ========================================================================== 422 * SPA config locking 423 * ========================================================================== 424 */ 425static void 426spa_config_lock_init(spa_t *spa) 427{ 428 for (int i = 0; i < SCL_LOCKS; i++) { 429 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 430 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 431 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 432 refcount_create_untracked(&scl->scl_count); 433 scl->scl_writer = NULL; 434 scl->scl_write_wanted = 0; 435 } 436} 437 438static void 439spa_config_lock_destroy(spa_t *spa) 440{ 441 for (int i = 0; i < SCL_LOCKS; i++) { 442 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 443 mutex_destroy(&scl->scl_lock); 444 cv_destroy(&scl->scl_cv); 445 refcount_destroy(&scl->scl_count); 446 ASSERT(scl->scl_writer == NULL); 447 ASSERT(scl->scl_write_wanted == 0); 448 } 449} 450 451int 452spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 453{ 454 for (int i = 0; i < SCL_LOCKS; i++) { 455 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 456 if (!(locks & (1 << i))) 457 continue; 458 mutex_enter(&scl->scl_lock); 459 if (rw == RW_READER) { 460 if (scl->scl_writer || scl->scl_write_wanted) { 461 mutex_exit(&scl->scl_lock); 462 spa_config_exit(spa, locks ^ (1 << i), tag); 463 return (0); 464 } 465 } else { 466 ASSERT(scl->scl_writer != curthread); 467 if (!refcount_is_zero(&scl->scl_count)) { 468 mutex_exit(&scl->scl_lock); 469 spa_config_exit(spa, locks ^ (1 << i), tag); 470 return (0); 471 } 472 scl->scl_writer = curthread; 473 } 474 (void) refcount_add(&scl->scl_count, tag); 475 mutex_exit(&scl->scl_lock); 476 } 477 return (1); 478} 479 480void 481spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 482{ 483 int wlocks_held = 0; 484 485 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 486 487 for (int i = 0; i < SCL_LOCKS; i++) { 488 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 489 if (scl->scl_writer == curthread) 490 wlocks_held |= (1 << i); 491 if (!(locks & (1 << i))) 492 continue; 493 mutex_enter(&scl->scl_lock); 494 if (rw == RW_READER) { 495 while (scl->scl_writer || scl->scl_write_wanted) { 496 cv_wait(&scl->scl_cv, &scl->scl_lock); 497 } 498 } else { 499 ASSERT(scl->scl_writer != curthread); 500 while (!refcount_is_zero(&scl->scl_count)) { 501 scl->scl_write_wanted++; 502 cv_wait(&scl->scl_cv, &scl->scl_lock); 503 scl->scl_write_wanted--; 504 } 505 scl->scl_writer = curthread; 506 } 507 (void) refcount_add(&scl->scl_count, tag); 508 mutex_exit(&scl->scl_lock); 509 } 510 ASSERT(wlocks_held <= locks); 511} 512 513void 514spa_config_exit(spa_t *spa, int locks, void *tag) 515{ 516 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 517 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 518 if (!(locks & (1 << i))) 519 continue; 520 mutex_enter(&scl->scl_lock); 521 ASSERT(!refcount_is_zero(&scl->scl_count)); 522 if (refcount_remove(&scl->scl_count, tag) == 0) { 523 ASSERT(scl->scl_writer == NULL || 524 scl->scl_writer == curthread); 525 scl->scl_writer = NULL; /* OK in either case */ 526 cv_broadcast(&scl->scl_cv); 527 } 528 mutex_exit(&scl->scl_lock); 529 } 530} 531 532int 533spa_config_held(spa_t *spa, int locks, krw_t rw) 534{ 535 int locks_held = 0; 536 537 for (int i = 0; i < SCL_LOCKS; i++) { 538 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 539 if (!(locks & (1 << i))) 540 continue; 541 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 542 (rw == RW_WRITER && scl->scl_writer == curthread)) 543 locks_held |= 1 << i; 544 } 545 546 return (locks_held); 547} 548 549/* 550 * ========================================================================== 551 * SPA namespace functions 552 * ========================================================================== 553 */ 554 555/* 556 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 557 * Returns NULL if no matching spa_t is found. 558 */ 559spa_t * 560spa_lookup(const char *name) 561{ 562 static spa_t search; /* spa_t is large; don't allocate on stack */ 563 spa_t *spa; 564 avl_index_t where; 565 char *cp; 566 567 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 568 569 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 570 571 /* 572 * If it's a full dataset name, figure out the pool name and 573 * just use that. 574 */ 575 cp = strpbrk(search.spa_name, "/@#"); 576 if (cp != NULL) 577 *cp = '\0'; 578 579 spa = avl_find(&spa_namespace_avl, &search, &where); 580 581 return (spa); 582} 583 584/* 585 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 586 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 587 * looking for potentially hung I/Os. 588 */ 589void 590spa_deadman(void *arg) 591{ 592 spa_t *spa = arg; 593 594 /* 595 * Disable the deadman timer if the pool is suspended. 596 */ 597 if (spa_suspended(spa)) { 598#ifdef illumos 599 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 600#else 601 /* Nothing. just don't schedule any future callouts. */ 602#endif 603 return; 604 } 605 606 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 607 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 608 ++spa->spa_deadman_calls); 609 if (zfs_deadman_enabled) 610 vdev_deadman(spa->spa_root_vdev); 611} 612 613/* 614 * Create an uninitialized spa_t with the given name. Requires 615 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 616 * exist by calling spa_lookup() first. 617 */ 618spa_t * 619spa_add(const char *name, nvlist_t *config, const char *altroot) 620{ 621 spa_t *spa; 622 spa_config_dirent_t *dp; 623#ifdef illumos 624 cyc_handler_t hdlr; 625 cyc_time_t when; 626#endif 627 628 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 629 630 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 631 632 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 633 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 634 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 635 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 636 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 637 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 638 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 639 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 640 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 641 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 642 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 643 644 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 645 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 646 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 647 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 648 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 649 650 for (int t = 0; t < TXG_SIZE; t++) 651 bplist_create(&spa->spa_free_bplist[t]); 652 653 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 654 spa->spa_state = POOL_STATE_UNINITIALIZED; 655 spa->spa_freeze_txg = UINT64_MAX; 656 spa->spa_final_txg = UINT64_MAX; 657 spa->spa_load_max_txg = UINT64_MAX; 658 spa->spa_proc = &p0; 659 spa->spa_proc_state = SPA_PROC_NONE; 660 661#ifdef illumos 662 hdlr.cyh_func = spa_deadman; 663 hdlr.cyh_arg = spa; 664 hdlr.cyh_level = CY_LOW_LEVEL; 665#endif 666 667 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 668 669#ifdef illumos 670 /* 671 * This determines how often we need to check for hung I/Os after 672 * the cyclic has already fired. Since checking for hung I/Os is 673 * an expensive operation we don't want to check too frequently. 674 * Instead wait for 5 seconds before checking again. 675 */ 676 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 677 when.cyt_when = CY_INFINITY; 678 mutex_enter(&cpu_lock); 679 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 680 mutex_exit(&cpu_lock); 681#else /* !illumos */ 682#ifdef _KERNEL 683 callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE); 684#endif 685#endif 686 refcount_create(&spa->spa_refcount); 687 spa_config_lock_init(spa); 688 689 avl_add(&spa_namespace_avl, spa); 690 691 /* 692 * Set the alternate root, if there is one. 693 */ 694 if (altroot) { 695 spa->spa_root = spa_strdup(altroot); 696 spa_active_count++; 697 } 698 699 /* 700 * Every pool starts with the default cachefile 701 */ 702 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 703 offsetof(spa_config_dirent_t, scd_link)); 704 705 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 706 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 707 list_insert_head(&spa->spa_config_list, dp); 708 709 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 710 KM_SLEEP) == 0); 711 712 if (config != NULL) { 713 nvlist_t *features; 714 715 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 716 &features) == 0) { 717 VERIFY(nvlist_dup(features, &spa->spa_label_features, 718 0) == 0); 719 } 720 721 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 722 } 723 724 if (spa->spa_label_features == NULL) { 725 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 726 KM_SLEEP) == 0); 727 } 728 729 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 730 731 spa->spa_min_ashift = INT_MAX; 732 spa->spa_max_ashift = 0; 733 734 /* 735 * As a pool is being created, treat all features as disabled by 736 * setting SPA_FEATURE_DISABLED for all entries in the feature 737 * refcount cache. 738 */ 739 for (int i = 0; i < SPA_FEATURES; i++) { 740 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 741 } 742 743 return (spa); 744} 745 746/* 747 * Removes a spa_t from the namespace, freeing up any memory used. Requires 748 * spa_namespace_lock. This is called only after the spa_t has been closed and 749 * deactivated. 750 */ 751void 752spa_remove(spa_t *spa) 753{ 754 spa_config_dirent_t *dp; 755 756 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 757 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 758 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 759 760 nvlist_free(spa->spa_config_splitting); 761 762 avl_remove(&spa_namespace_avl, spa); 763 cv_broadcast(&spa_namespace_cv); 764 765 if (spa->spa_root) { 766 spa_strfree(spa->spa_root); 767 spa_active_count--; 768 } 769 770 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 771 list_remove(&spa->spa_config_list, dp); 772 if (dp->scd_path != NULL) 773 spa_strfree(dp->scd_path); 774 kmem_free(dp, sizeof (spa_config_dirent_t)); 775 } 776 777 list_destroy(&spa->spa_config_list); 778 779 nvlist_free(spa->spa_label_features); 780 nvlist_free(spa->spa_load_info); 781 spa_config_set(spa, NULL); 782 783#ifdef illumos 784 mutex_enter(&cpu_lock); 785 if (spa->spa_deadman_cycid != CYCLIC_NONE) 786 cyclic_remove(spa->spa_deadman_cycid); 787 mutex_exit(&cpu_lock); 788 spa->spa_deadman_cycid = CYCLIC_NONE; 789#else /* !illumos */ 790#ifdef _KERNEL 791 callout_drain(&spa->spa_deadman_cycid); 792#endif 793#endif 794 795 refcount_destroy(&spa->spa_refcount); 796 797 spa_config_lock_destroy(spa); 798 799 for (int t = 0; t < TXG_SIZE; t++) 800 bplist_destroy(&spa->spa_free_bplist[t]); 801 802 zio_checksum_templates_free(spa); 803 804 cv_destroy(&spa->spa_async_cv); 805 cv_destroy(&spa->spa_evicting_os_cv); 806 cv_destroy(&spa->spa_proc_cv); 807 cv_destroy(&spa->spa_scrub_io_cv); 808 cv_destroy(&spa->spa_suspend_cv); 809 810 mutex_destroy(&spa->spa_async_lock); 811 mutex_destroy(&spa->spa_errlist_lock); 812 mutex_destroy(&spa->spa_errlog_lock); 813 mutex_destroy(&spa->spa_evicting_os_lock); 814 mutex_destroy(&spa->spa_history_lock); 815 mutex_destroy(&spa->spa_proc_lock); 816 mutex_destroy(&spa->spa_props_lock); 817 mutex_destroy(&spa->spa_cksum_tmpls_lock); 818 mutex_destroy(&spa->spa_scrub_lock); 819 mutex_destroy(&spa->spa_suspend_lock); 820 mutex_destroy(&spa->spa_vdev_top_lock); 821 822 kmem_free(spa, sizeof (spa_t)); 823} 824 825/* 826 * Given a pool, return the next pool in the namespace, or NULL if there is 827 * none. If 'prev' is NULL, return the first pool. 828 */ 829spa_t * 830spa_next(spa_t *prev) 831{ 832 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 833 834 if (prev) 835 return (AVL_NEXT(&spa_namespace_avl, prev)); 836 else 837 return (avl_first(&spa_namespace_avl)); 838} 839 840/* 841 * ========================================================================== 842 * SPA refcount functions 843 * ========================================================================== 844 */ 845 846/* 847 * Add a reference to the given spa_t. Must have at least one reference, or 848 * have the namespace lock held. 849 */ 850void 851spa_open_ref(spa_t *spa, void *tag) 852{ 853 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 854 MUTEX_HELD(&spa_namespace_lock)); 855 (void) refcount_add(&spa->spa_refcount, tag); 856} 857 858/* 859 * Remove a reference to the given spa_t. Must have at least one reference, or 860 * have the namespace lock held. 861 */ 862void 863spa_close(spa_t *spa, void *tag) 864{ 865 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 866 MUTEX_HELD(&spa_namespace_lock)); 867 (void) refcount_remove(&spa->spa_refcount, tag); 868} 869 870/* 871 * Remove a reference to the given spa_t held by a dsl dir that is 872 * being asynchronously released. Async releases occur from a taskq 873 * performing eviction of dsl datasets and dirs. The namespace lock 874 * isn't held and the hold by the object being evicted may contribute to 875 * spa_minref (e.g. dataset or directory released during pool export), 876 * so the asserts in spa_close() do not apply. 877 */ 878void 879spa_async_close(spa_t *spa, void *tag) 880{ 881 (void) refcount_remove(&spa->spa_refcount, tag); 882} 883 884/* 885 * Check to see if the spa refcount is zero. Must be called with 886 * spa_namespace_lock held. We really compare against spa_minref, which is the 887 * number of references acquired when opening a pool 888 */ 889boolean_t 890spa_refcount_zero(spa_t *spa) 891{ 892 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 893 894 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 895} 896 897/* 898 * ========================================================================== 899 * SPA spare and l2cache tracking 900 * ========================================================================== 901 */ 902 903/* 904 * Hot spares and cache devices are tracked using the same code below, 905 * for 'auxiliary' devices. 906 */ 907 908typedef struct spa_aux { 909 uint64_t aux_guid; 910 uint64_t aux_pool; 911 avl_node_t aux_avl; 912 int aux_count; 913} spa_aux_t; 914 915static int 916spa_aux_compare(const void *a, const void *b) 917{ 918 const spa_aux_t *sa = a; 919 const spa_aux_t *sb = b; 920 921 if (sa->aux_guid < sb->aux_guid) 922 return (-1); 923 else if (sa->aux_guid > sb->aux_guid) 924 return (1); 925 else 926 return (0); 927} 928 929void 930spa_aux_add(vdev_t *vd, avl_tree_t *avl) 931{ 932 avl_index_t where; 933 spa_aux_t search; 934 spa_aux_t *aux; 935 936 search.aux_guid = vd->vdev_guid; 937 if ((aux = avl_find(avl, &search, &where)) != NULL) { 938 aux->aux_count++; 939 } else { 940 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 941 aux->aux_guid = vd->vdev_guid; 942 aux->aux_count = 1; 943 avl_insert(avl, aux, where); 944 } 945} 946 947void 948spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 949{ 950 spa_aux_t search; 951 spa_aux_t *aux; 952 avl_index_t where; 953 954 search.aux_guid = vd->vdev_guid; 955 aux = avl_find(avl, &search, &where); 956 957 ASSERT(aux != NULL); 958 959 if (--aux->aux_count == 0) { 960 avl_remove(avl, aux); 961 kmem_free(aux, sizeof (spa_aux_t)); 962 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 963 aux->aux_pool = 0ULL; 964 } 965} 966 967boolean_t 968spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 969{ 970 spa_aux_t search, *found; 971 972 search.aux_guid = guid; 973 found = avl_find(avl, &search, NULL); 974 975 if (pool) { 976 if (found) 977 *pool = found->aux_pool; 978 else 979 *pool = 0ULL; 980 } 981 982 if (refcnt) { 983 if (found) 984 *refcnt = found->aux_count; 985 else 986 *refcnt = 0; 987 } 988 989 return (found != NULL); 990} 991 992void 993spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 994{ 995 spa_aux_t search, *found; 996 avl_index_t where; 997 998 search.aux_guid = vd->vdev_guid; 999 found = avl_find(avl, &search, &where); 1000 ASSERT(found != NULL); 1001 ASSERT(found->aux_pool == 0ULL); 1002 1003 found->aux_pool = spa_guid(vd->vdev_spa); 1004} 1005 1006/* 1007 * Spares are tracked globally due to the following constraints: 1008 * 1009 * - A spare may be part of multiple pools. 1010 * - A spare may be added to a pool even if it's actively in use within 1011 * another pool. 1012 * - A spare in use in any pool can only be the source of a replacement if 1013 * the target is a spare in the same pool. 1014 * 1015 * We keep track of all spares on the system through the use of a reference 1016 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1017 * spare, then we bump the reference count in the AVL tree. In addition, we set 1018 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1019 * inactive). When a spare is made active (used to replace a device in the 1020 * pool), we also keep track of which pool its been made a part of. 1021 * 1022 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1023 * called under the spa_namespace lock as part of vdev reconfiguration. The 1024 * separate spare lock exists for the status query path, which does not need to 1025 * be completely consistent with respect to other vdev configuration changes. 1026 */ 1027 1028static int 1029spa_spare_compare(const void *a, const void *b) 1030{ 1031 return (spa_aux_compare(a, b)); 1032} 1033 1034void 1035spa_spare_add(vdev_t *vd) 1036{ 1037 mutex_enter(&spa_spare_lock); 1038 ASSERT(!vd->vdev_isspare); 1039 spa_aux_add(vd, &spa_spare_avl); 1040 vd->vdev_isspare = B_TRUE; 1041 mutex_exit(&spa_spare_lock); 1042} 1043 1044void 1045spa_spare_remove(vdev_t *vd) 1046{ 1047 mutex_enter(&spa_spare_lock); 1048 ASSERT(vd->vdev_isspare); 1049 spa_aux_remove(vd, &spa_spare_avl); 1050 vd->vdev_isspare = B_FALSE; 1051 mutex_exit(&spa_spare_lock); 1052} 1053 1054boolean_t 1055spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1056{ 1057 boolean_t found; 1058 1059 mutex_enter(&spa_spare_lock); 1060 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1061 mutex_exit(&spa_spare_lock); 1062 1063 return (found); 1064} 1065 1066void 1067spa_spare_activate(vdev_t *vd) 1068{ 1069 mutex_enter(&spa_spare_lock); 1070 ASSERT(vd->vdev_isspare); 1071 spa_aux_activate(vd, &spa_spare_avl); 1072 mutex_exit(&spa_spare_lock); 1073} 1074 1075/* 1076 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1077 * Cache devices currently only support one pool per cache device, and so 1078 * for these devices the aux reference count is currently unused beyond 1. 1079 */ 1080 1081static int 1082spa_l2cache_compare(const void *a, const void *b) 1083{ 1084 return (spa_aux_compare(a, b)); 1085} 1086 1087void 1088spa_l2cache_add(vdev_t *vd) 1089{ 1090 mutex_enter(&spa_l2cache_lock); 1091 ASSERT(!vd->vdev_isl2cache); 1092 spa_aux_add(vd, &spa_l2cache_avl); 1093 vd->vdev_isl2cache = B_TRUE; 1094 mutex_exit(&spa_l2cache_lock); 1095} 1096 1097void 1098spa_l2cache_remove(vdev_t *vd) 1099{ 1100 mutex_enter(&spa_l2cache_lock); 1101 ASSERT(vd->vdev_isl2cache); 1102 spa_aux_remove(vd, &spa_l2cache_avl); 1103 vd->vdev_isl2cache = B_FALSE; 1104 mutex_exit(&spa_l2cache_lock); 1105} 1106 1107boolean_t 1108spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1109{ 1110 boolean_t found; 1111 1112 mutex_enter(&spa_l2cache_lock); 1113 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1114 mutex_exit(&spa_l2cache_lock); 1115 1116 return (found); 1117} 1118 1119void 1120spa_l2cache_activate(vdev_t *vd) 1121{ 1122 mutex_enter(&spa_l2cache_lock); 1123 ASSERT(vd->vdev_isl2cache); 1124 spa_aux_activate(vd, &spa_l2cache_avl); 1125 mutex_exit(&spa_l2cache_lock); 1126} 1127 1128/* 1129 * ========================================================================== 1130 * SPA vdev locking 1131 * ========================================================================== 1132 */ 1133 1134/* 1135 * Lock the given spa_t for the purpose of adding or removing a vdev. 1136 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1137 * It returns the next transaction group for the spa_t. 1138 */ 1139uint64_t 1140spa_vdev_enter(spa_t *spa) 1141{ 1142 mutex_enter(&spa->spa_vdev_top_lock); 1143 mutex_enter(&spa_namespace_lock); 1144 return (spa_vdev_config_enter(spa)); 1145} 1146 1147/* 1148 * Internal implementation for spa_vdev_enter(). Used when a vdev 1149 * operation requires multiple syncs (i.e. removing a device) while 1150 * keeping the spa_namespace_lock held. 1151 */ 1152uint64_t 1153spa_vdev_config_enter(spa_t *spa) 1154{ 1155 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1156 1157 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1158 1159 return (spa_last_synced_txg(spa) + 1); 1160} 1161 1162/* 1163 * Used in combination with spa_vdev_config_enter() to allow the syncing 1164 * of multiple transactions without releasing the spa_namespace_lock. 1165 */ 1166void 1167spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1168{ 1169 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1170 1171 int config_changed = B_FALSE; 1172 1173 ASSERT(txg > spa_last_synced_txg(spa)); 1174 1175 spa->spa_pending_vdev = NULL; 1176 1177 /* 1178 * Reassess the DTLs. 1179 */ 1180 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1181 1182 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1183 config_changed = B_TRUE; 1184 spa->spa_config_generation++; 1185 } 1186 1187 /* 1188 * Verify the metaslab classes. 1189 */ 1190 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1191 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1192 1193 spa_config_exit(spa, SCL_ALL, spa); 1194 1195 /* 1196 * Panic the system if the specified tag requires it. This 1197 * is useful for ensuring that configurations are updated 1198 * transactionally. 1199 */ 1200 if (zio_injection_enabled) 1201 zio_handle_panic_injection(spa, tag, 0); 1202 1203 /* 1204 * Note: this txg_wait_synced() is important because it ensures 1205 * that there won't be more than one config change per txg. 1206 * This allows us to use the txg as the generation number. 1207 */ 1208 if (error == 0) 1209 txg_wait_synced(spa->spa_dsl_pool, txg); 1210 1211 if (vd != NULL) { 1212 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1213 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1214 vdev_free(vd); 1215 spa_config_exit(spa, SCL_ALL, spa); 1216 } 1217 1218 /* 1219 * If the config changed, update the config cache. 1220 */ 1221 if (config_changed) 1222 spa_config_sync(spa, B_FALSE, B_TRUE); 1223} 1224 1225/* 1226 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1227 * locking of spa_vdev_enter(), we also want make sure the transactions have 1228 * synced to disk, and then update the global configuration cache with the new 1229 * information. 1230 */ 1231int 1232spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1233{ 1234 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1235 mutex_exit(&spa_namespace_lock); 1236 mutex_exit(&spa->spa_vdev_top_lock); 1237 1238 return (error); 1239} 1240 1241/* 1242 * Lock the given spa_t for the purpose of changing vdev state. 1243 */ 1244void 1245spa_vdev_state_enter(spa_t *spa, int oplocks) 1246{ 1247 int locks = SCL_STATE_ALL | oplocks; 1248 1249 /* 1250 * Root pools may need to read of the underlying devfs filesystem 1251 * when opening up a vdev. Unfortunately if we're holding the 1252 * SCL_ZIO lock it will result in a deadlock when we try to issue 1253 * the read from the root filesystem. Instead we "prefetch" 1254 * the associated vnodes that we need prior to opening the 1255 * underlying devices and cache them so that we can prevent 1256 * any I/O when we are doing the actual open. 1257 */ 1258 if (spa_is_root(spa)) { 1259 int low = locks & ~(SCL_ZIO - 1); 1260 int high = locks & ~low; 1261 1262 spa_config_enter(spa, high, spa, RW_WRITER); 1263 vdev_hold(spa->spa_root_vdev); 1264 spa_config_enter(spa, low, spa, RW_WRITER); 1265 } else { 1266 spa_config_enter(spa, locks, spa, RW_WRITER); 1267 } 1268 spa->spa_vdev_locks = locks; 1269} 1270 1271int 1272spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1273{ 1274 boolean_t config_changed = B_FALSE; 1275 1276 if (vd != NULL || error == 0) 1277 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1278 0, 0, B_FALSE); 1279 1280 if (vd != NULL) { 1281 vdev_state_dirty(vd->vdev_top); 1282 config_changed = B_TRUE; 1283 spa->spa_config_generation++; 1284 } 1285 1286 if (spa_is_root(spa)) 1287 vdev_rele(spa->spa_root_vdev); 1288 1289 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1290 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1291 1292 /* 1293 * If anything changed, wait for it to sync. This ensures that, 1294 * from the system administrator's perspective, zpool(1M) commands 1295 * are synchronous. This is important for things like zpool offline: 1296 * when the command completes, you expect no further I/O from ZFS. 1297 */ 1298 if (vd != NULL) 1299 txg_wait_synced(spa->spa_dsl_pool, 0); 1300 1301 /* 1302 * If the config changed, update the config cache. 1303 */ 1304 if (config_changed) { 1305 mutex_enter(&spa_namespace_lock); 1306 spa_config_sync(spa, B_FALSE, B_TRUE); 1307 mutex_exit(&spa_namespace_lock); 1308 } 1309 1310 return (error); 1311} 1312 1313/* 1314 * ========================================================================== 1315 * Miscellaneous functions 1316 * ========================================================================== 1317 */ 1318 1319void 1320spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1321{ 1322 if (!nvlist_exists(spa->spa_label_features, feature)) { 1323 fnvlist_add_boolean(spa->spa_label_features, feature); 1324 /* 1325 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1326 * dirty the vdev config because lock SCL_CONFIG is not held. 1327 * Thankfully, in this case we don't need to dirty the config 1328 * because it will be written out anyway when we finish 1329 * creating the pool. 1330 */ 1331 if (tx->tx_txg != TXG_INITIAL) 1332 vdev_config_dirty(spa->spa_root_vdev); 1333 } 1334} 1335 1336void 1337spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1338{ 1339 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1340 vdev_config_dirty(spa->spa_root_vdev); 1341} 1342 1343/* 1344 * Rename a spa_t. 1345 */ 1346int 1347spa_rename(const char *name, const char *newname) 1348{ 1349 spa_t *spa; 1350 int err; 1351 1352 /* 1353 * Lookup the spa_t and grab the config lock for writing. We need to 1354 * actually open the pool so that we can sync out the necessary labels. 1355 * It's OK to call spa_open() with the namespace lock held because we 1356 * allow recursive calls for other reasons. 1357 */ 1358 mutex_enter(&spa_namespace_lock); 1359 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1360 mutex_exit(&spa_namespace_lock); 1361 return (err); 1362 } 1363 1364 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1365 1366 avl_remove(&spa_namespace_avl, spa); 1367 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1368 avl_add(&spa_namespace_avl, spa); 1369 1370 /* 1371 * Sync all labels to disk with the new names by marking the root vdev 1372 * dirty and waiting for it to sync. It will pick up the new pool name 1373 * during the sync. 1374 */ 1375 vdev_config_dirty(spa->spa_root_vdev); 1376 1377 spa_config_exit(spa, SCL_ALL, FTAG); 1378 1379 txg_wait_synced(spa->spa_dsl_pool, 0); 1380 1381 /* 1382 * Sync the updated config cache. 1383 */ 1384 spa_config_sync(spa, B_FALSE, B_TRUE); 1385 1386 spa_close(spa, FTAG); 1387 1388 mutex_exit(&spa_namespace_lock); 1389 1390 return (0); 1391} 1392 1393/* 1394 * Return the spa_t associated with given pool_guid, if it exists. If 1395 * device_guid is non-zero, determine whether the pool exists *and* contains 1396 * a device with the specified device_guid. 1397 */ 1398spa_t * 1399spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1400{ 1401 spa_t *spa; 1402 avl_tree_t *t = &spa_namespace_avl; 1403 1404 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1405 1406 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1407 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1408 continue; 1409 if (spa->spa_root_vdev == NULL) 1410 continue; 1411 if (spa_guid(spa) == pool_guid) { 1412 if (device_guid == 0) 1413 break; 1414 1415 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1416 device_guid) != NULL) 1417 break; 1418 1419 /* 1420 * Check any devices we may be in the process of adding. 1421 */ 1422 if (spa->spa_pending_vdev) { 1423 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1424 device_guid) != NULL) 1425 break; 1426 } 1427 } 1428 } 1429 1430 return (spa); 1431} 1432 1433/* 1434 * Determine whether a pool with the given pool_guid exists. 1435 */ 1436boolean_t 1437spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1438{ 1439 return (spa_by_guid(pool_guid, device_guid) != NULL); 1440} 1441 1442char * 1443spa_strdup(const char *s) 1444{ 1445 size_t len; 1446 char *new; 1447 1448 len = strlen(s); 1449 new = kmem_alloc(len + 1, KM_SLEEP); 1450 bcopy(s, new, len); 1451 new[len] = '\0'; 1452 1453 return (new); 1454} 1455 1456void 1457spa_strfree(char *s) 1458{ 1459 kmem_free(s, strlen(s) + 1); 1460} 1461 1462uint64_t 1463spa_get_random(uint64_t range) 1464{ 1465 uint64_t r; 1466 1467 ASSERT(range != 0); 1468 1469 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1470 1471 return (r % range); 1472} 1473 1474uint64_t 1475spa_generate_guid(spa_t *spa) 1476{ 1477 uint64_t guid = spa_get_random(-1ULL); 1478 1479 if (spa != NULL) { 1480 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1481 guid = spa_get_random(-1ULL); 1482 } else { 1483 while (guid == 0 || spa_guid_exists(guid, 0)) 1484 guid = spa_get_random(-1ULL); 1485 } 1486 1487 return (guid); 1488} 1489 1490void 1491snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1492{ 1493 char type[256]; 1494 char *checksum = NULL; 1495 char *compress = NULL; 1496 1497 if (bp != NULL) { 1498 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1499 dmu_object_byteswap_t bswap = 1500 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1501 (void) snprintf(type, sizeof (type), "bswap %s %s", 1502 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1503 "metadata" : "data", 1504 dmu_ot_byteswap[bswap].ob_name); 1505 } else { 1506 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1507 sizeof (type)); 1508 } 1509 if (!BP_IS_EMBEDDED(bp)) { 1510 checksum = 1511 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1512 } 1513 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1514 } 1515 1516 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1517 compress); 1518} 1519 1520void 1521spa_freeze(spa_t *spa) 1522{ 1523 uint64_t freeze_txg = 0; 1524 1525 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1526 if (spa->spa_freeze_txg == UINT64_MAX) { 1527 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1528 spa->spa_freeze_txg = freeze_txg; 1529 } 1530 spa_config_exit(spa, SCL_ALL, FTAG); 1531 if (freeze_txg != 0) 1532 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1533} 1534 1535void 1536zfs_panic_recover(const char *fmt, ...) 1537{ 1538 va_list adx; 1539 1540 va_start(adx, fmt); 1541 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1542 va_end(adx); 1543} 1544 1545/* 1546 * This is a stripped-down version of strtoull, suitable only for converting 1547 * lowercase hexadecimal numbers that don't overflow. 1548 */ 1549uint64_t 1550zfs_strtonum(const char *str, char **nptr) 1551{ 1552 uint64_t val = 0; 1553 char c; 1554 int digit; 1555 1556 while ((c = *str) != '\0') { 1557 if (c >= '0' && c <= '9') 1558 digit = c - '0'; 1559 else if (c >= 'a' && c <= 'f') 1560 digit = 10 + c - 'a'; 1561 else 1562 break; 1563 1564 val *= 16; 1565 val += digit; 1566 1567 str++; 1568 } 1569 1570 if (nptr) 1571 *nptr = (char *)str; 1572 1573 return (val); 1574} 1575 1576/* 1577 * ========================================================================== 1578 * Accessor functions 1579 * ========================================================================== 1580 */ 1581 1582boolean_t 1583spa_shutting_down(spa_t *spa) 1584{ 1585 return (spa->spa_async_suspended); 1586} 1587 1588dsl_pool_t * 1589spa_get_dsl(spa_t *spa) 1590{ 1591 return (spa->spa_dsl_pool); 1592} 1593 1594boolean_t 1595spa_is_initializing(spa_t *spa) 1596{ 1597 return (spa->spa_is_initializing); 1598} 1599 1600blkptr_t * 1601spa_get_rootblkptr(spa_t *spa) 1602{ 1603 return (&spa->spa_ubsync.ub_rootbp); 1604} 1605 1606void 1607spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1608{ 1609 spa->spa_uberblock.ub_rootbp = *bp; 1610} 1611 1612void 1613spa_altroot(spa_t *spa, char *buf, size_t buflen) 1614{ 1615 if (spa->spa_root == NULL) 1616 buf[0] = '\0'; 1617 else 1618 (void) strncpy(buf, spa->spa_root, buflen); 1619} 1620 1621int 1622spa_sync_pass(spa_t *spa) 1623{ 1624 return (spa->spa_sync_pass); 1625} 1626 1627char * 1628spa_name(spa_t *spa) 1629{ 1630 return (spa->spa_name); 1631} 1632 1633uint64_t 1634spa_guid(spa_t *spa) 1635{ 1636 dsl_pool_t *dp = spa_get_dsl(spa); 1637 uint64_t guid; 1638 1639 /* 1640 * If we fail to parse the config during spa_load(), we can go through 1641 * the error path (which posts an ereport) and end up here with no root 1642 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1643 * this case. 1644 */ 1645 if (spa->spa_root_vdev == NULL) 1646 return (spa->spa_config_guid); 1647 1648 guid = spa->spa_last_synced_guid != 0 ? 1649 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1650 1651 /* 1652 * Return the most recently synced out guid unless we're 1653 * in syncing context. 1654 */ 1655 if (dp && dsl_pool_sync_context(dp)) 1656 return (spa->spa_root_vdev->vdev_guid); 1657 else 1658 return (guid); 1659} 1660 1661uint64_t 1662spa_load_guid(spa_t *spa) 1663{ 1664 /* 1665 * This is a GUID that exists solely as a reference for the 1666 * purposes of the arc. It is generated at load time, and 1667 * is never written to persistent storage. 1668 */ 1669 return (spa->spa_load_guid); 1670} 1671 1672uint64_t 1673spa_last_synced_txg(spa_t *spa) 1674{ 1675 return (spa->spa_ubsync.ub_txg); 1676} 1677 1678uint64_t 1679spa_first_txg(spa_t *spa) 1680{ 1681 return (spa->spa_first_txg); 1682} 1683 1684uint64_t 1685spa_syncing_txg(spa_t *spa) 1686{ 1687 return (spa->spa_syncing_txg); 1688} 1689 1690pool_state_t 1691spa_state(spa_t *spa) 1692{ 1693 return (spa->spa_state); 1694} 1695 1696spa_load_state_t 1697spa_load_state(spa_t *spa) 1698{ 1699 return (spa->spa_load_state); 1700} 1701 1702uint64_t 1703spa_freeze_txg(spa_t *spa) 1704{ 1705 return (spa->spa_freeze_txg); 1706} 1707 1708/* ARGSUSED */ 1709uint64_t 1710spa_get_asize(spa_t *spa, uint64_t lsize) 1711{ 1712 return (lsize * spa_asize_inflation); 1713} 1714 1715/* 1716 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1717 * or at least 32MB. 1718 * 1719 * See the comment above spa_slop_shift for details. 1720 */ 1721uint64_t 1722spa_get_slop_space(spa_t *spa) { 1723 uint64_t space = spa_get_dspace(spa); 1724 return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1)); 1725} 1726 1727uint64_t 1728spa_get_dspace(spa_t *spa) 1729{ 1730 return (spa->spa_dspace); 1731} 1732 1733void 1734spa_update_dspace(spa_t *spa) 1735{ 1736 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1737 ddt_get_dedup_dspace(spa); 1738} 1739 1740/* 1741 * Return the failure mode that has been set to this pool. The default 1742 * behavior will be to block all I/Os when a complete failure occurs. 1743 */ 1744uint8_t 1745spa_get_failmode(spa_t *spa) 1746{ 1747 return (spa->spa_failmode); 1748} 1749 1750boolean_t 1751spa_suspended(spa_t *spa) 1752{ 1753 return (spa->spa_suspended); 1754} 1755 1756uint64_t 1757spa_version(spa_t *spa) 1758{ 1759 return (spa->spa_ubsync.ub_version); 1760} 1761 1762boolean_t 1763spa_deflate(spa_t *spa) 1764{ 1765 return (spa->spa_deflate); 1766} 1767 1768metaslab_class_t * 1769spa_normal_class(spa_t *spa) 1770{ 1771 return (spa->spa_normal_class); 1772} 1773 1774metaslab_class_t * 1775spa_log_class(spa_t *spa) 1776{ 1777 return (spa->spa_log_class); 1778} 1779 1780void 1781spa_evicting_os_register(spa_t *spa, objset_t *os) 1782{ 1783 mutex_enter(&spa->spa_evicting_os_lock); 1784 list_insert_head(&spa->spa_evicting_os_list, os); 1785 mutex_exit(&spa->spa_evicting_os_lock); 1786} 1787 1788void 1789spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1790{ 1791 mutex_enter(&spa->spa_evicting_os_lock); 1792 list_remove(&spa->spa_evicting_os_list, os); 1793 cv_broadcast(&spa->spa_evicting_os_cv); 1794 mutex_exit(&spa->spa_evicting_os_lock); 1795} 1796 1797void 1798spa_evicting_os_wait(spa_t *spa) 1799{ 1800 mutex_enter(&spa->spa_evicting_os_lock); 1801 while (!list_is_empty(&spa->spa_evicting_os_list)) 1802 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1803 mutex_exit(&spa->spa_evicting_os_lock); 1804 1805 dmu_buf_user_evict_wait(); 1806} 1807 1808int 1809spa_max_replication(spa_t *spa) 1810{ 1811 /* 1812 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1813 * handle BPs with more than one DVA allocated. Set our max 1814 * replication level accordingly. 1815 */ 1816 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1817 return (1); 1818 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1819} 1820 1821int 1822spa_prev_software_version(spa_t *spa) 1823{ 1824 return (spa->spa_prev_software_version); 1825} 1826 1827uint64_t 1828spa_deadman_synctime(spa_t *spa) 1829{ 1830 return (spa->spa_deadman_synctime); 1831} 1832 1833uint64_t 1834dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1835{ 1836 uint64_t asize = DVA_GET_ASIZE(dva); 1837 uint64_t dsize = asize; 1838 1839 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1840 1841 if (asize != 0 && spa->spa_deflate) { 1842 uint64_t vdev = DVA_GET_VDEV(dva); 1843 vdev_t *vd = vdev_lookup_top(spa, vdev); 1844 if (vd == NULL) { 1845 panic( 1846 "dva_get_dsize_sync(): bad DVA %llu:%llu", 1847 (u_longlong_t)vdev, (u_longlong_t)asize); 1848 } 1849 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 1850 } 1851 1852 return (dsize); 1853} 1854 1855uint64_t 1856bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 1857{ 1858 uint64_t dsize = 0; 1859 1860 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1861 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1862 1863 return (dsize); 1864} 1865 1866uint64_t 1867bp_get_dsize(spa_t *spa, const blkptr_t *bp) 1868{ 1869 uint64_t dsize = 0; 1870 1871 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1872 1873 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 1874 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 1875 1876 spa_config_exit(spa, SCL_VDEV, FTAG); 1877 1878 return (dsize); 1879} 1880 1881/* 1882 * ========================================================================== 1883 * Initialization and Termination 1884 * ========================================================================== 1885 */ 1886 1887static int 1888spa_name_compare(const void *a1, const void *a2) 1889{ 1890 const spa_t *s1 = a1; 1891 const spa_t *s2 = a2; 1892 int s; 1893 1894 s = strcmp(s1->spa_name, s2->spa_name); 1895 if (s > 0) 1896 return (1); 1897 if (s < 0) 1898 return (-1); 1899 return (0); 1900} 1901 1902int 1903spa_busy(void) 1904{ 1905 return (spa_active_count); 1906} 1907 1908void 1909spa_boot_init() 1910{ 1911 spa_config_load(); 1912} 1913 1914#ifdef _KERNEL 1915EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 1916#endif 1917 1918void 1919spa_init(int mode) 1920{ 1921 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 1922 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 1923 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 1924 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 1925 1926 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 1927 offsetof(spa_t, spa_avl)); 1928 1929 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 1930 offsetof(spa_aux_t, aux_avl)); 1931 1932 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 1933 offsetof(spa_aux_t, aux_avl)); 1934 1935 spa_mode_global = mode; 1936 1937#ifdef illumos 1938#ifdef _KERNEL 1939 spa_arch_init(); 1940#else 1941 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 1942 arc_procfd = open("/proc/self/ctl", O_WRONLY); 1943 if (arc_procfd == -1) { 1944 perror("could not enable watchpoints: " 1945 "opening /proc/self/ctl failed: "); 1946 } else { 1947 arc_watch = B_TRUE; 1948 } 1949 } 1950#endif 1951#endif /* illumos */ 1952 refcount_sysinit(); 1953 unique_init(); 1954 range_tree_init(); 1955 zio_init(); 1956 lz4_init(); 1957 dmu_init(); 1958 zil_init(); 1959 vdev_cache_stat_init(); 1960 zfs_prop_init(); 1961 zpool_prop_init(); 1962 zpool_feature_init(); 1963 spa_config_load(); 1964 l2arc_start(); 1965#ifndef illumos 1966#ifdef _KERNEL 1967 zfs_deadman_init(); 1968#endif 1969#endif /* !illumos */ 1970} 1971 1972void 1973spa_fini(void) 1974{ 1975 l2arc_stop(); 1976 1977 spa_evict_all(); 1978 1979 vdev_cache_stat_fini(); 1980 zil_fini(); 1981 dmu_fini(); 1982 lz4_fini(); 1983 zio_fini(); 1984 range_tree_fini(); 1985 unique_fini(); 1986 refcount_fini(); 1987 1988 avl_destroy(&spa_namespace_avl); 1989 avl_destroy(&spa_spare_avl); 1990 avl_destroy(&spa_l2cache_avl); 1991 1992 cv_destroy(&spa_namespace_cv); 1993 mutex_destroy(&spa_namespace_lock); 1994 mutex_destroy(&spa_spare_lock); 1995 mutex_destroy(&spa_l2cache_lock); 1996} 1997 1998/* 1999 * Return whether this pool has slogs. No locking needed. 2000 * It's not a problem if the wrong answer is returned as it's only for 2001 * performance and not correctness 2002 */ 2003boolean_t 2004spa_has_slogs(spa_t *spa) 2005{ 2006 return (spa->spa_log_class->mc_rotor != NULL); 2007} 2008 2009spa_log_state_t 2010spa_get_log_state(spa_t *spa) 2011{ 2012 return (spa->spa_log_state); 2013} 2014 2015void 2016spa_set_log_state(spa_t *spa, spa_log_state_t state) 2017{ 2018 spa->spa_log_state = state; 2019} 2020 2021boolean_t 2022spa_is_root(spa_t *spa) 2023{ 2024 return (spa->spa_is_root); 2025} 2026 2027boolean_t 2028spa_writeable(spa_t *spa) 2029{ 2030 return (!!(spa->spa_mode & FWRITE)); 2031} 2032 2033/* 2034 * Returns true if there is a pending sync task in any of the current 2035 * syncing txg, the current quiescing txg, or the current open txg. 2036 */ 2037boolean_t 2038spa_has_pending_synctask(spa_t *spa) 2039{ 2040 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); 2041} 2042 2043int 2044spa_mode(spa_t *spa) 2045{ 2046 return (spa->spa_mode); 2047} 2048 2049uint64_t 2050spa_bootfs(spa_t *spa) 2051{ 2052 return (spa->spa_bootfs); 2053} 2054 2055uint64_t 2056spa_delegation(spa_t *spa) 2057{ 2058 return (spa->spa_delegation); 2059} 2060 2061objset_t * 2062spa_meta_objset(spa_t *spa) 2063{ 2064 return (spa->spa_meta_objset); 2065} 2066 2067enum zio_checksum 2068spa_dedup_checksum(spa_t *spa) 2069{ 2070 return (spa->spa_dedup_checksum); 2071} 2072 2073/* 2074 * Reset pool scan stat per scan pass (or reboot). 2075 */ 2076void 2077spa_scan_stat_init(spa_t *spa) 2078{ 2079 /* data not stored on disk */ 2080 spa->spa_scan_pass_start = gethrestime_sec(); 2081 spa->spa_scan_pass_exam = 0; 2082 vdev_scan_stat_init(spa->spa_root_vdev); 2083} 2084 2085/* 2086 * Get scan stats for zpool status reports 2087 */ 2088int 2089spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2090{ 2091 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2092 2093 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2094 return (SET_ERROR(ENOENT)); 2095 bzero(ps, sizeof (pool_scan_stat_t)); 2096 2097 /* data stored on disk */ 2098 ps->pss_func = scn->scn_phys.scn_func; 2099 ps->pss_start_time = scn->scn_phys.scn_start_time; 2100 ps->pss_end_time = scn->scn_phys.scn_end_time; 2101 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2102 ps->pss_examined = scn->scn_phys.scn_examined; 2103 ps->pss_to_process = scn->scn_phys.scn_to_process; 2104 ps->pss_processed = scn->scn_phys.scn_processed; 2105 ps->pss_errors = scn->scn_phys.scn_errors; 2106 ps->pss_state = scn->scn_phys.scn_state; 2107 2108 /* data not stored on disk */ 2109 ps->pss_pass_start = spa->spa_scan_pass_start; 2110 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2111 2112 return (0); 2113} 2114 2115boolean_t 2116spa_debug_enabled(spa_t *spa) 2117{ 2118 return (spa->spa_debug); 2119} 2120 2121int 2122spa_maxblocksize(spa_t *spa) 2123{ 2124 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2125 return (SPA_MAXBLOCKSIZE); 2126 else 2127 return (SPA_OLD_MAXBLOCKSIZE); 2128} 2129