spa_misc.c revision 339106
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 27 * Copyright 2013 Saso Kiselkov. All rights reserved. 28 * Copyright (c) 2014 Integros [integros.com] 29 * Copyright (c) 2017 Datto Inc. 30 */ 31 32#include <sys/zfs_context.h> 33#include <sys/spa_impl.h> 34#include <sys/spa_boot.h> 35#include <sys/zio.h> 36#include <sys/zio_checksum.h> 37#include <sys/zio_compress.h> 38#include <sys/dmu.h> 39#include <sys/dmu_tx.h> 40#include <sys/zap.h> 41#include <sys/zil.h> 42#include <sys/vdev_impl.h> 43#include <sys/vdev_file.h> 44#include <sys/metaslab.h> 45#include <sys/uberblock_impl.h> 46#include <sys/txg.h> 47#include <sys/avl.h> 48#include <sys/unique.h> 49#include <sys/dsl_pool.h> 50#include <sys/dsl_dir.h> 51#include <sys/dsl_prop.h> 52#include <sys/dsl_scan.h> 53#include <sys/fs/zfs.h> 54#include <sys/metaslab_impl.h> 55#include <sys/arc.h> 56#include <sys/ddt.h> 57#include "zfs_prop.h" 58#include <sys/zfeature.h> 59 60#if defined(__FreeBSD__) && defined(_KERNEL) 61#include <sys/types.h> 62#include <sys/sysctl.h> 63#endif 64 65/* 66 * SPA locking 67 * 68 * There are four basic locks for managing spa_t structures: 69 * 70 * spa_namespace_lock (global mutex) 71 * 72 * This lock must be acquired to do any of the following: 73 * 74 * - Lookup a spa_t by name 75 * - Add or remove a spa_t from the namespace 76 * - Increase spa_refcount from non-zero 77 * - Check if spa_refcount is zero 78 * - Rename a spa_t 79 * - add/remove/attach/detach devices 80 * - Held for the duration of create/destroy/import/export 81 * 82 * It does not need to handle recursion. A create or destroy may 83 * reference objects (files or zvols) in other pools, but by 84 * definition they must have an existing reference, and will never need 85 * to lookup a spa_t by name. 86 * 87 * spa_refcount (per-spa refcount_t protected by mutex) 88 * 89 * This reference count keep track of any active users of the spa_t. The 90 * spa_t cannot be destroyed or freed while this is non-zero. Internally, 91 * the refcount is never really 'zero' - opening a pool implicitly keeps 92 * some references in the DMU. Internally we check against spa_minref, but 93 * present the image of a zero/non-zero value to consumers. 94 * 95 * spa_config_lock[] (per-spa array of rwlocks) 96 * 97 * This protects the spa_t from config changes, and must be held in 98 * the following circumstances: 99 * 100 * - RW_READER to perform I/O to the spa 101 * - RW_WRITER to change the vdev config 102 * 103 * The locking order is fairly straightforward: 104 * 105 * spa_namespace_lock -> spa_refcount 106 * 107 * The namespace lock must be acquired to increase the refcount from 0 108 * or to check if it is zero. 109 * 110 * spa_refcount -> spa_config_lock[] 111 * 112 * There must be at least one valid reference on the spa_t to acquire 113 * the config lock. 114 * 115 * spa_namespace_lock -> spa_config_lock[] 116 * 117 * The namespace lock must always be taken before the config lock. 118 * 119 * 120 * The spa_namespace_lock can be acquired directly and is globally visible. 121 * 122 * The namespace is manipulated using the following functions, all of which 123 * require the spa_namespace_lock to be held. 124 * 125 * spa_lookup() Lookup a spa_t by name. 126 * 127 * spa_add() Create a new spa_t in the namespace. 128 * 129 * spa_remove() Remove a spa_t from the namespace. This also 130 * frees up any memory associated with the spa_t. 131 * 132 * spa_next() Returns the next spa_t in the system, or the 133 * first if NULL is passed. 134 * 135 * spa_evict_all() Shutdown and remove all spa_t structures in 136 * the system. 137 * 138 * spa_guid_exists() Determine whether a pool/device guid exists. 139 * 140 * The spa_refcount is manipulated using the following functions: 141 * 142 * spa_open_ref() Adds a reference to the given spa_t. Must be 143 * called with spa_namespace_lock held if the 144 * refcount is currently zero. 145 * 146 * spa_close() Remove a reference from the spa_t. This will 147 * not free the spa_t or remove it from the 148 * namespace. No locking is required. 149 * 150 * spa_refcount_zero() Returns true if the refcount is currently 151 * zero. Must be called with spa_namespace_lock 152 * held. 153 * 154 * The spa_config_lock[] is an array of rwlocks, ordered as follows: 155 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. 156 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). 157 * 158 * To read the configuration, it suffices to hold one of these locks as reader. 159 * To modify the configuration, you must hold all locks as writer. To modify 160 * vdev state without altering the vdev tree's topology (e.g. online/offline), 161 * you must hold SCL_STATE and SCL_ZIO as writer. 162 * 163 * We use these distinct config locks to avoid recursive lock entry. 164 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces 165 * block allocations (SCL_ALLOC), which may require reading space maps 166 * from disk (dmu_read() -> zio_read() -> SCL_ZIO). 167 * 168 * The spa config locks cannot be normal rwlocks because we need the 169 * ability to hand off ownership. For example, SCL_ZIO is acquired 170 * by the issuing thread and later released by an interrupt thread. 171 * They do, however, obey the usual write-wanted semantics to prevent 172 * writer (i.e. system administrator) starvation. 173 * 174 * The lock acquisition rules are as follows: 175 * 176 * SCL_CONFIG 177 * Protects changes to the vdev tree topology, such as vdev 178 * add/remove/attach/detach. Protects the dirty config list 179 * (spa_config_dirty_list) and the set of spares and l2arc devices. 180 * 181 * SCL_STATE 182 * Protects changes to pool state and vdev state, such as vdev 183 * online/offline/fault/degrade/clear. Protects the dirty state list 184 * (spa_state_dirty_list) and global pool state (spa_state). 185 * 186 * SCL_ALLOC 187 * Protects changes to metaslab groups and classes. 188 * Held as reader by metaslab_alloc() and metaslab_claim(). 189 * 190 * SCL_ZIO 191 * Held by bp-level zios (those which have no io_vd upon entry) 192 * to prevent changes to the vdev tree. The bp-level zio implicitly 193 * protects all of its vdev child zios, which do not hold SCL_ZIO. 194 * 195 * SCL_FREE 196 * Protects changes to metaslab groups and classes. 197 * Held as reader by metaslab_free(). SCL_FREE is distinct from 198 * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free 199 * blocks in zio_done() while another i/o that holds either 200 * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. 201 * 202 * SCL_VDEV 203 * Held as reader to prevent changes to the vdev tree during trivial 204 * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the 205 * other locks, and lower than all of them, to ensure that it's safe 206 * to acquire regardless of caller context. 207 * 208 * In addition, the following rules apply: 209 * 210 * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. 211 * The lock ordering is SCL_CONFIG > spa_props_lock. 212 * 213 * (b) I/O operations on leaf vdevs. For any zio operation that takes 214 * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), 215 * or zio_write_phys() -- the caller must ensure that the config cannot 216 * cannot change in the interim, and that the vdev cannot be reopened. 217 * SCL_STATE as reader suffices for both. 218 * 219 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). 220 * 221 * spa_vdev_enter() Acquire the namespace lock and the config lock 222 * for writing. 223 * 224 * spa_vdev_exit() Release the config lock, wait for all I/O 225 * to complete, sync the updated configs to the 226 * cache, and release the namespace lock. 227 * 228 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). 229 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual 230 * locking is, always, based on spa_namespace_lock and spa_config_lock[]. 231 * 232 * spa_rename() is also implemented within this file since it requires 233 * manipulation of the namespace. 234 */ 235 236static avl_tree_t spa_namespace_avl; 237kmutex_t spa_namespace_lock; 238static kcondvar_t spa_namespace_cv; 239static int spa_active_count; 240int spa_max_replication_override = SPA_DVAS_PER_BP; 241 242static kmutex_t spa_spare_lock; 243static avl_tree_t spa_spare_avl; 244static kmutex_t spa_l2cache_lock; 245static avl_tree_t spa_l2cache_avl; 246 247kmem_cache_t *spa_buffer_pool; 248int spa_mode_global; 249 250#ifdef ZFS_DEBUG 251/* 252 * Everything except dprintf, spa, and indirect_remap is on by default 253 * in debug builds. 254 */ 255int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP); 256#else 257int zfs_flags = 0; 258#endif 259 260/* 261 * zfs_recover can be set to nonzero to attempt to recover from 262 * otherwise-fatal errors, typically caused by on-disk corruption. When 263 * set, calls to zfs_panic_recover() will turn into warning messages. 264 * This should only be used as a last resort, as it typically results 265 * in leaked space, or worse. 266 */ 267boolean_t zfs_recover = B_FALSE; 268 269/* 270 * If destroy encounters an EIO while reading metadata (e.g. indirect 271 * blocks), space referenced by the missing metadata can not be freed. 272 * Normally this causes the background destroy to become "stalled", as 273 * it is unable to make forward progress. While in this stalled state, 274 * all remaining space to free from the error-encountering filesystem is 275 * "temporarily leaked". Set this flag to cause it to ignore the EIO, 276 * permanently leak the space from indirect blocks that can not be read, 277 * and continue to free everything else that it can. 278 * 279 * The default, "stalling" behavior is useful if the storage partially 280 * fails (i.e. some but not all i/os fail), and then later recovers. In 281 * this case, we will be able to continue pool operations while it is 282 * partially failed, and when it recovers, we can continue to free the 283 * space, with no leaks. However, note that this case is actually 284 * fairly rare. 285 * 286 * Typically pools either (a) fail completely (but perhaps temporarily, 287 * e.g. a top-level vdev going offline), or (b) have localized, 288 * permanent errors (e.g. disk returns the wrong data due to bit flip or 289 * firmware bug). In case (a), this setting does not matter because the 290 * pool will be suspended and the sync thread will not be able to make 291 * forward progress regardless. In case (b), because the error is 292 * permanent, the best we can do is leak the minimum amount of space, 293 * which is what setting this flag will do. Therefore, it is reasonable 294 * for this flag to normally be set, but we chose the more conservative 295 * approach of not setting it, so that there is no possibility of 296 * leaking space in the "partial temporary" failure case. 297 */ 298boolean_t zfs_free_leak_on_eio = B_FALSE; 299 300/* 301 * Expiration time in milliseconds. This value has two meanings. First it is 302 * used to determine when the spa_deadman() logic should fire. By default the 303 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. 304 * Secondly, the value determines if an I/O is considered "hung". Any I/O that 305 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting 306 * in a system panic. 307 */ 308uint64_t zfs_deadman_synctime_ms = 1000000ULL; 309 310/* 311 * Check time in milliseconds. This defines the frequency at which we check 312 * for hung I/O. 313 */ 314uint64_t zfs_deadman_checktime_ms = 5000ULL; 315 316/* 317 * Default value of -1 for zfs_deadman_enabled is resolved in 318 * zfs_deadman_init() 319 */ 320int zfs_deadman_enabled = -1; 321 322/* 323 * The worst case is single-sector max-parity RAID-Z blocks, in which 324 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) 325 * times the size; so just assume that. Add to this the fact that 326 * we can have up to 3 DVAs per bp, and one more factor of 2 because 327 * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, 328 * the worst case is: 329 * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 330 */ 331int spa_asize_inflation = 24; 332 333#if defined(__FreeBSD__) && defined(_KERNEL) 334SYSCTL_DECL(_vfs_zfs); 335SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0, 336 "Try to recover from otherwise-fatal errors."); 337 338static int 339sysctl_vfs_zfs_debug_flags(SYSCTL_HANDLER_ARGS) 340{ 341 int err, val; 342 343 val = zfs_flags; 344 err = sysctl_handle_int(oidp, &val, 0, req); 345 if (err != 0 || req->newptr == NULL) 346 return (err); 347 348 /* 349 * ZFS_DEBUG_MODIFY must be enabled prior to boot so all 350 * arc buffers in the system have the necessary additional 351 * checksum data. However, it is safe to disable at any 352 * time. 353 */ 354 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 355 val &= ~ZFS_DEBUG_MODIFY; 356 zfs_flags = val; 357 358 return (0); 359} 360 361SYSCTL_PROC(_vfs_zfs, OID_AUTO, debugflags, 362 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN, 0, sizeof(int), 363 sysctl_vfs_zfs_debug_flags, "IU", "Debug flags for ZFS testing."); 364SYSCTL_PROC(_vfs_zfs, OID_AUTO, debug_flags, 365 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(int), 366 sysctl_vfs_zfs_debug_flags, "IU", 367 "Debug flags for ZFS testing (deprecated, see vfs.zfs.debugflags)."); 368 369SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN, 370 &zfs_deadman_synctime_ms, 0, 371 "Stalled ZFS I/O expiration time in milliseconds"); 372SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN, 373 &zfs_deadman_checktime_ms, 0, 374 "Period of checks for stalled ZFS I/O in milliseconds"); 375SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN, 376 &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O"); 377SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN, 378 &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes"); 379#endif 380 381#ifndef illumos 382#ifdef _KERNEL 383static void 384zfs_deadman_init() 385{ 386 /* 387 * If we are not i386 or amd64 or in a virtual machine, 388 * disable ZFS deadman thread by default 389 */ 390 if (zfs_deadman_enabled == -1) { 391#if defined(__amd64__) || defined(__i386__) 392 zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0; 393#else 394 zfs_deadman_enabled = 0; 395#endif 396 } 397} 398#endif /* _KERNEL */ 399#endif /* !illumos */ 400 401/* 402 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in 403 * the pool to be consumed. This ensures that we don't run the pool 404 * completely out of space, due to unaccounted changes (e.g. to the MOS). 405 * It also limits the worst-case time to allocate space. If we have 406 * less than this amount of free space, most ZPL operations (e.g. write, 407 * create) will return ENOSPC. 408 * 409 * Certain operations (e.g. file removal, most administrative actions) can 410 * use half the slop space. They will only return ENOSPC if less than half 411 * the slop space is free. Typically, once the pool has less than the slop 412 * space free, the user will use these operations to free up space in the pool. 413 * These are the operations that call dsl_pool_adjustedsize() with the netfree 414 * argument set to TRUE. 415 * 416 * Operations that are almost guaranteed to free up space in the absence of 417 * a pool checkpoint can use up to three quarters of the slop space 418 * (e.g zfs destroy). 419 * 420 * A very restricted set of operations are always permitted, regardless of 421 * the amount of free space. These are the operations that call 422 * dsl_sync_task(ZFS_SPACE_CHECK_NONE). If these operations result in a net 423 * increase in the amount of space used, it is possible to run the pool 424 * completely out of space, causing it to be permanently read-only. 425 * 426 * Note that on very small pools, the slop space will be larger than 427 * 3.2%, in an effort to have it be at least spa_min_slop (128MB), 428 * but we never allow it to be more than half the pool size. 429 * 430 * See also the comments in zfs_space_check_t. 431 */ 432int spa_slop_shift = 5; 433SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN, 434 &spa_slop_shift, 0, 435 "Shift value of reserved space (1/(2^spa_slop_shift))."); 436uint64_t spa_min_slop = 128 * 1024 * 1024; 437SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, spa_min_slop, CTLFLAG_RWTUN, 438 &spa_min_slop, 0, 439 "Minimal value of reserved space"); 440 441int spa_allocators = 4; 442 443/*PRINTFLIKE2*/ 444void 445spa_load_failed(spa_t *spa, const char *fmt, ...) 446{ 447 va_list adx; 448 char buf[256]; 449 450 va_start(adx, fmt); 451 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 452 va_end(adx); 453 454 zfs_dbgmsg("spa_load(%s, config %s): FAILED: %s", spa->spa_name, 455 spa->spa_trust_config ? "trusted" : "untrusted", buf); 456} 457 458/*PRINTFLIKE2*/ 459void 460spa_load_note(spa_t *spa, const char *fmt, ...) 461{ 462 va_list adx; 463 char buf[256]; 464 465 va_start(adx, fmt); 466 (void) vsnprintf(buf, sizeof (buf), fmt, adx); 467 va_end(adx); 468 469 zfs_dbgmsg("spa_load(%s, config %s): %s", spa->spa_name, 470 spa->spa_trust_config ? "trusted" : "untrusted", buf); 471} 472 473/* 474 * ========================================================================== 475 * SPA config locking 476 * ========================================================================== 477 */ 478static void 479spa_config_lock_init(spa_t *spa) 480{ 481 for (int i = 0; i < SCL_LOCKS; i++) { 482 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 483 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); 484 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); 485 refcount_create_untracked(&scl->scl_count); 486 scl->scl_writer = NULL; 487 scl->scl_write_wanted = 0; 488 } 489} 490 491static void 492spa_config_lock_destroy(spa_t *spa) 493{ 494 for (int i = 0; i < SCL_LOCKS; i++) { 495 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 496 mutex_destroy(&scl->scl_lock); 497 cv_destroy(&scl->scl_cv); 498 refcount_destroy(&scl->scl_count); 499 ASSERT(scl->scl_writer == NULL); 500 ASSERT(scl->scl_write_wanted == 0); 501 } 502} 503 504int 505spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) 506{ 507 for (int i = 0; i < SCL_LOCKS; i++) { 508 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 509 if (!(locks & (1 << i))) 510 continue; 511 mutex_enter(&scl->scl_lock); 512 if (rw == RW_READER) { 513 if (scl->scl_writer || scl->scl_write_wanted) { 514 mutex_exit(&scl->scl_lock); 515 spa_config_exit(spa, locks & ((1 << i) - 1), 516 tag); 517 return (0); 518 } 519 } else { 520 ASSERT(scl->scl_writer != curthread); 521 if (!refcount_is_zero(&scl->scl_count)) { 522 mutex_exit(&scl->scl_lock); 523 spa_config_exit(spa, locks & ((1 << i) - 1), 524 tag); 525 return (0); 526 } 527 scl->scl_writer = curthread; 528 } 529 (void) refcount_add(&scl->scl_count, tag); 530 mutex_exit(&scl->scl_lock); 531 } 532 return (1); 533} 534 535void 536spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) 537{ 538 int wlocks_held = 0; 539 540 ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); 541 542 for (int i = 0; i < SCL_LOCKS; i++) { 543 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 544 if (scl->scl_writer == curthread) 545 wlocks_held |= (1 << i); 546 if (!(locks & (1 << i))) 547 continue; 548 mutex_enter(&scl->scl_lock); 549 if (rw == RW_READER) { 550 while (scl->scl_writer || scl->scl_write_wanted) { 551 cv_wait(&scl->scl_cv, &scl->scl_lock); 552 } 553 } else { 554 ASSERT(scl->scl_writer != curthread); 555 while (!refcount_is_zero(&scl->scl_count)) { 556 scl->scl_write_wanted++; 557 cv_wait(&scl->scl_cv, &scl->scl_lock); 558 scl->scl_write_wanted--; 559 } 560 scl->scl_writer = curthread; 561 } 562 (void) refcount_add(&scl->scl_count, tag); 563 mutex_exit(&scl->scl_lock); 564 } 565 ASSERT3U(wlocks_held, <=, locks); 566} 567 568void 569spa_config_exit(spa_t *spa, int locks, void *tag) 570{ 571 for (int i = SCL_LOCKS - 1; i >= 0; i--) { 572 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 573 if (!(locks & (1 << i))) 574 continue; 575 mutex_enter(&scl->scl_lock); 576 ASSERT(!refcount_is_zero(&scl->scl_count)); 577 if (refcount_remove(&scl->scl_count, tag) == 0) { 578 ASSERT(scl->scl_writer == NULL || 579 scl->scl_writer == curthread); 580 scl->scl_writer = NULL; /* OK in either case */ 581 cv_broadcast(&scl->scl_cv); 582 } 583 mutex_exit(&scl->scl_lock); 584 } 585} 586 587int 588spa_config_held(spa_t *spa, int locks, krw_t rw) 589{ 590 int locks_held = 0; 591 592 for (int i = 0; i < SCL_LOCKS; i++) { 593 spa_config_lock_t *scl = &spa->spa_config_lock[i]; 594 if (!(locks & (1 << i))) 595 continue; 596 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || 597 (rw == RW_WRITER && scl->scl_writer == curthread)) 598 locks_held |= 1 << i; 599 } 600 601 return (locks_held); 602} 603 604/* 605 * ========================================================================== 606 * SPA namespace functions 607 * ========================================================================== 608 */ 609 610/* 611 * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. 612 * Returns NULL if no matching spa_t is found. 613 */ 614spa_t * 615spa_lookup(const char *name) 616{ 617 static spa_t search; /* spa_t is large; don't allocate on stack */ 618 spa_t *spa; 619 avl_index_t where; 620 char *cp; 621 622 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 623 624 (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); 625 626 /* 627 * If it's a full dataset name, figure out the pool name and 628 * just use that. 629 */ 630 cp = strpbrk(search.spa_name, "/@#"); 631 if (cp != NULL) 632 *cp = '\0'; 633 634 spa = avl_find(&spa_namespace_avl, &search, &where); 635 636 return (spa); 637} 638 639/* 640 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. 641 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues 642 * looking for potentially hung I/Os. 643 */ 644static void 645spa_deadman(void *arg, int pending) 646{ 647 spa_t *spa = arg; 648 649 /* 650 * Disable the deadman timer if the pool is suspended. 651 */ 652 if (spa_suspended(spa)) { 653#ifdef illumos 654 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 655#else 656 /* Nothing. just don't schedule any future callouts. */ 657#endif 658 return; 659 } 660 661 zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", 662 (gethrtime() - spa->spa_sync_starttime) / NANOSEC, 663 ++spa->spa_deadman_calls); 664 if (zfs_deadman_enabled) 665 vdev_deadman(spa->spa_root_vdev); 666#ifdef __FreeBSD__ 667#ifdef _KERNEL 668 callout_schedule(&spa->spa_deadman_cycid, 669 hz * zfs_deadman_checktime_ms / MILLISEC); 670#endif 671#endif 672} 673 674#if defined(__FreeBSD__) && defined(_KERNEL) 675static void 676spa_deadman_timeout(void *arg) 677{ 678 spa_t *spa = arg; 679 680 taskqueue_enqueue(taskqueue_thread, &spa->spa_deadman_task); 681} 682#endif 683 684/* 685 * Create an uninitialized spa_t with the given name. Requires 686 * spa_namespace_lock. The caller must ensure that the spa_t doesn't already 687 * exist by calling spa_lookup() first. 688 */ 689spa_t * 690spa_add(const char *name, nvlist_t *config, const char *altroot) 691{ 692 spa_t *spa; 693 spa_config_dirent_t *dp; 694#ifdef illumos 695 cyc_handler_t hdlr; 696 cyc_time_t when; 697#endif 698 699 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 700 701 spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); 702 703 mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); 704 mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); 705 mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); 706 mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); 707 mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); 708 mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); 709 mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); 710 mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); 711 mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); 712 mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); 713 mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); 714 715 cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); 716 cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); 717 cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); 718 cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); 719 cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); 720 721 for (int t = 0; t < TXG_SIZE; t++) 722 bplist_create(&spa->spa_free_bplist[t]); 723 724 (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); 725 spa->spa_state = POOL_STATE_UNINITIALIZED; 726 spa->spa_freeze_txg = UINT64_MAX; 727 spa->spa_final_txg = UINT64_MAX; 728 spa->spa_load_max_txg = UINT64_MAX; 729 spa->spa_proc = &p0; 730 spa->spa_proc_state = SPA_PROC_NONE; 731 spa->spa_trust_config = B_TRUE; 732 733#ifdef illumos 734 hdlr.cyh_func = spa_deadman; 735 hdlr.cyh_arg = spa; 736 hdlr.cyh_level = CY_LOW_LEVEL; 737#endif 738 739 spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); 740 741#ifdef illumos 742 /* 743 * This determines how often we need to check for hung I/Os after 744 * the cyclic has already fired. Since checking for hung I/Os is 745 * an expensive operation we don't want to check too frequently. 746 * Instead wait for 5 seconds before checking again. 747 */ 748 when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms); 749 when.cyt_when = CY_INFINITY; 750 mutex_enter(&cpu_lock); 751 spa->spa_deadman_cycid = cyclic_add(&hdlr, &when); 752 mutex_exit(&cpu_lock); 753#else /* !illumos */ 754#ifdef _KERNEL 755 /* 756 * callout(9) does not provide a way to initialize a callout with 757 * a function and an argument, so we use callout_reset() to schedule 758 * the callout in the very distant future. Even if that event ever 759 * fires, it should be okayas we won't have any active zio-s. 760 * But normally spa_sync() will reschedule the callout with a proper 761 * timeout. 762 * callout(9) does not allow the callback function to sleep but 763 * vdev_deadman() needs to acquire vq_lock and illumos mutexes are 764 * emulated using sx(9). For this reason spa_deadman_timeout() 765 * will schedule spa_deadman() as task on a taskqueue that allows 766 * sleeping. 767 */ 768 TASK_INIT(&spa->spa_deadman_task, 0, spa_deadman, spa); 769 callout_init(&spa->spa_deadman_cycid, 1); 770 callout_reset_sbt(&spa->spa_deadman_cycid, SBT_MAX, 0, 771 spa_deadman_timeout, spa, 0); 772#endif 773#endif 774 refcount_create(&spa->spa_refcount); 775 spa_config_lock_init(spa); 776 777 avl_add(&spa_namespace_avl, spa); 778 779 /* 780 * Set the alternate root, if there is one. 781 */ 782 if (altroot) { 783 spa->spa_root = spa_strdup(altroot); 784 spa_active_count++; 785 } 786 787 spa->spa_alloc_count = spa_allocators; 788 spa->spa_alloc_locks = kmem_zalloc(spa->spa_alloc_count * 789 sizeof (kmutex_t), KM_SLEEP); 790 spa->spa_alloc_trees = kmem_zalloc(spa->spa_alloc_count * 791 sizeof (avl_tree_t), KM_SLEEP); 792 for (int i = 0; i < spa->spa_alloc_count; i++) { 793 mutex_init(&spa->spa_alloc_locks[i], NULL, MUTEX_DEFAULT, NULL); 794 avl_create(&spa->spa_alloc_trees[i], zio_bookmark_compare, 795 sizeof (zio_t), offsetof(zio_t, io_alloc_node)); 796 } 797 798 /* 799 * Every pool starts with the default cachefile 800 */ 801 list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), 802 offsetof(spa_config_dirent_t, scd_link)); 803 804 dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); 805 dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); 806 list_insert_head(&spa->spa_config_list, dp); 807 808 VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, 809 KM_SLEEP) == 0); 810 811 if (config != NULL) { 812 nvlist_t *features; 813 814 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 815 &features) == 0) { 816 VERIFY(nvlist_dup(features, &spa->spa_label_features, 817 0) == 0); 818 } 819 820 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); 821 } 822 823 if (spa->spa_label_features == NULL) { 824 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, 825 KM_SLEEP) == 0); 826 } 827 828 spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); 829 830 spa->spa_min_ashift = INT_MAX; 831 spa->spa_max_ashift = 0; 832 833 /* 834 * As a pool is being created, treat all features as disabled by 835 * setting SPA_FEATURE_DISABLED for all entries in the feature 836 * refcount cache. 837 */ 838 for (int i = 0; i < SPA_FEATURES; i++) { 839 spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; 840 } 841 842 return (spa); 843} 844 845/* 846 * Removes a spa_t from the namespace, freeing up any memory used. Requires 847 * spa_namespace_lock. This is called only after the spa_t has been closed and 848 * deactivated. 849 */ 850void 851spa_remove(spa_t *spa) 852{ 853 spa_config_dirent_t *dp; 854 855 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 856 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 857 ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); 858 859 nvlist_free(spa->spa_config_splitting); 860 861 avl_remove(&spa_namespace_avl, spa); 862 cv_broadcast(&spa_namespace_cv); 863 864 if (spa->spa_root) { 865 spa_strfree(spa->spa_root); 866 spa_active_count--; 867 } 868 869 while ((dp = list_head(&spa->spa_config_list)) != NULL) { 870 list_remove(&spa->spa_config_list, dp); 871 if (dp->scd_path != NULL) 872 spa_strfree(dp->scd_path); 873 kmem_free(dp, sizeof (spa_config_dirent_t)); 874 } 875 876 for (int i = 0; i < spa->spa_alloc_count; i++) { 877 avl_destroy(&spa->spa_alloc_trees[i]); 878 mutex_destroy(&spa->spa_alloc_locks[i]); 879 } 880 kmem_free(spa->spa_alloc_locks, spa->spa_alloc_count * 881 sizeof (kmutex_t)); 882 kmem_free(spa->spa_alloc_trees, spa->spa_alloc_count * 883 sizeof (avl_tree_t)); 884 885 list_destroy(&spa->spa_config_list); 886 887 nvlist_free(spa->spa_label_features); 888 nvlist_free(spa->spa_load_info); 889 spa_config_set(spa, NULL); 890 891#ifdef illumos 892 mutex_enter(&cpu_lock); 893 if (spa->spa_deadman_cycid != CYCLIC_NONE) 894 cyclic_remove(spa->spa_deadman_cycid); 895 mutex_exit(&cpu_lock); 896 spa->spa_deadman_cycid = CYCLIC_NONE; 897#else /* !illumos */ 898#ifdef _KERNEL 899 callout_drain(&spa->spa_deadman_cycid); 900 taskqueue_drain(taskqueue_thread, &spa->spa_deadman_task); 901#endif 902#endif 903 904 refcount_destroy(&spa->spa_refcount); 905 906 spa_config_lock_destroy(spa); 907 908 for (int t = 0; t < TXG_SIZE; t++) 909 bplist_destroy(&spa->spa_free_bplist[t]); 910 911 zio_checksum_templates_free(spa); 912 913 cv_destroy(&spa->spa_async_cv); 914 cv_destroy(&spa->spa_evicting_os_cv); 915 cv_destroy(&spa->spa_proc_cv); 916 cv_destroy(&spa->spa_scrub_io_cv); 917 cv_destroy(&spa->spa_suspend_cv); 918 919 mutex_destroy(&spa->spa_async_lock); 920 mutex_destroy(&spa->spa_errlist_lock); 921 mutex_destroy(&spa->spa_errlog_lock); 922 mutex_destroy(&spa->spa_evicting_os_lock); 923 mutex_destroy(&spa->spa_history_lock); 924 mutex_destroy(&spa->spa_proc_lock); 925 mutex_destroy(&spa->spa_props_lock); 926 mutex_destroy(&spa->spa_cksum_tmpls_lock); 927 mutex_destroy(&spa->spa_scrub_lock); 928 mutex_destroy(&spa->spa_suspend_lock); 929 mutex_destroy(&spa->spa_vdev_top_lock); 930 931 kmem_free(spa, sizeof (spa_t)); 932} 933 934/* 935 * Given a pool, return the next pool in the namespace, or NULL if there is 936 * none. If 'prev' is NULL, return the first pool. 937 */ 938spa_t * 939spa_next(spa_t *prev) 940{ 941 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 942 943 if (prev) 944 return (AVL_NEXT(&spa_namespace_avl, prev)); 945 else 946 return (avl_first(&spa_namespace_avl)); 947} 948 949/* 950 * ========================================================================== 951 * SPA refcount functions 952 * ========================================================================== 953 */ 954 955/* 956 * Add a reference to the given spa_t. Must have at least one reference, or 957 * have the namespace lock held. 958 */ 959void 960spa_open_ref(spa_t *spa, void *tag) 961{ 962 ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || 963 MUTEX_HELD(&spa_namespace_lock)); 964 (void) refcount_add(&spa->spa_refcount, tag); 965} 966 967/* 968 * Remove a reference to the given spa_t. Must have at least one reference, or 969 * have the namespace lock held. 970 */ 971void 972spa_close(spa_t *spa, void *tag) 973{ 974 ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || 975 MUTEX_HELD(&spa_namespace_lock)); 976 (void) refcount_remove(&spa->spa_refcount, tag); 977} 978 979/* 980 * Remove a reference to the given spa_t held by a dsl dir that is 981 * being asynchronously released. Async releases occur from a taskq 982 * performing eviction of dsl datasets and dirs. The namespace lock 983 * isn't held and the hold by the object being evicted may contribute to 984 * spa_minref (e.g. dataset or directory released during pool export), 985 * so the asserts in spa_close() do not apply. 986 */ 987void 988spa_async_close(spa_t *spa, void *tag) 989{ 990 (void) refcount_remove(&spa->spa_refcount, tag); 991} 992 993/* 994 * Check to see if the spa refcount is zero. Must be called with 995 * spa_namespace_lock held. We really compare against spa_minref, which is the 996 * number of references acquired when opening a pool 997 */ 998boolean_t 999spa_refcount_zero(spa_t *spa) 1000{ 1001 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1002 1003 return (refcount_count(&spa->spa_refcount) == spa->spa_minref); 1004} 1005 1006/* 1007 * ========================================================================== 1008 * SPA spare and l2cache tracking 1009 * ========================================================================== 1010 */ 1011 1012/* 1013 * Hot spares and cache devices are tracked using the same code below, 1014 * for 'auxiliary' devices. 1015 */ 1016 1017typedef struct spa_aux { 1018 uint64_t aux_guid; 1019 uint64_t aux_pool; 1020 avl_node_t aux_avl; 1021 int aux_count; 1022} spa_aux_t; 1023 1024static int 1025spa_aux_compare(const void *a, const void *b) 1026{ 1027 const spa_aux_t *sa = a; 1028 const spa_aux_t *sb = b; 1029 1030 if (sa->aux_guid < sb->aux_guid) 1031 return (-1); 1032 else if (sa->aux_guid > sb->aux_guid) 1033 return (1); 1034 else 1035 return (0); 1036} 1037 1038void 1039spa_aux_add(vdev_t *vd, avl_tree_t *avl) 1040{ 1041 avl_index_t where; 1042 spa_aux_t search; 1043 spa_aux_t *aux; 1044 1045 search.aux_guid = vd->vdev_guid; 1046 if ((aux = avl_find(avl, &search, &where)) != NULL) { 1047 aux->aux_count++; 1048 } else { 1049 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); 1050 aux->aux_guid = vd->vdev_guid; 1051 aux->aux_count = 1; 1052 avl_insert(avl, aux, where); 1053 } 1054} 1055 1056void 1057spa_aux_remove(vdev_t *vd, avl_tree_t *avl) 1058{ 1059 spa_aux_t search; 1060 spa_aux_t *aux; 1061 avl_index_t where; 1062 1063 search.aux_guid = vd->vdev_guid; 1064 aux = avl_find(avl, &search, &where); 1065 1066 ASSERT(aux != NULL); 1067 1068 if (--aux->aux_count == 0) { 1069 avl_remove(avl, aux); 1070 kmem_free(aux, sizeof (spa_aux_t)); 1071 } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { 1072 aux->aux_pool = 0ULL; 1073 } 1074} 1075 1076boolean_t 1077spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) 1078{ 1079 spa_aux_t search, *found; 1080 1081 search.aux_guid = guid; 1082 found = avl_find(avl, &search, NULL); 1083 1084 if (pool) { 1085 if (found) 1086 *pool = found->aux_pool; 1087 else 1088 *pool = 0ULL; 1089 } 1090 1091 if (refcnt) { 1092 if (found) 1093 *refcnt = found->aux_count; 1094 else 1095 *refcnt = 0; 1096 } 1097 1098 return (found != NULL); 1099} 1100 1101void 1102spa_aux_activate(vdev_t *vd, avl_tree_t *avl) 1103{ 1104 spa_aux_t search, *found; 1105 avl_index_t where; 1106 1107 search.aux_guid = vd->vdev_guid; 1108 found = avl_find(avl, &search, &where); 1109 ASSERT(found != NULL); 1110 ASSERT(found->aux_pool == 0ULL); 1111 1112 found->aux_pool = spa_guid(vd->vdev_spa); 1113} 1114 1115/* 1116 * Spares are tracked globally due to the following constraints: 1117 * 1118 * - A spare may be part of multiple pools. 1119 * - A spare may be added to a pool even if it's actively in use within 1120 * another pool. 1121 * - A spare in use in any pool can only be the source of a replacement if 1122 * the target is a spare in the same pool. 1123 * 1124 * We keep track of all spares on the system through the use of a reference 1125 * counted AVL tree. When a vdev is added as a spare, or used as a replacement 1126 * spare, then we bump the reference count in the AVL tree. In addition, we set 1127 * the 'vdev_isspare' member to indicate that the device is a spare (active or 1128 * inactive). When a spare is made active (used to replace a device in the 1129 * pool), we also keep track of which pool its been made a part of. 1130 * 1131 * The 'spa_spare_lock' protects the AVL tree. These functions are normally 1132 * called under the spa_namespace lock as part of vdev reconfiguration. The 1133 * separate spare lock exists for the status query path, which does not need to 1134 * be completely consistent with respect to other vdev configuration changes. 1135 */ 1136 1137static int 1138spa_spare_compare(const void *a, const void *b) 1139{ 1140 return (spa_aux_compare(a, b)); 1141} 1142 1143void 1144spa_spare_add(vdev_t *vd) 1145{ 1146 mutex_enter(&spa_spare_lock); 1147 ASSERT(!vd->vdev_isspare); 1148 spa_aux_add(vd, &spa_spare_avl); 1149 vd->vdev_isspare = B_TRUE; 1150 mutex_exit(&spa_spare_lock); 1151} 1152 1153void 1154spa_spare_remove(vdev_t *vd) 1155{ 1156 mutex_enter(&spa_spare_lock); 1157 ASSERT(vd->vdev_isspare); 1158 spa_aux_remove(vd, &spa_spare_avl); 1159 vd->vdev_isspare = B_FALSE; 1160 mutex_exit(&spa_spare_lock); 1161} 1162 1163boolean_t 1164spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) 1165{ 1166 boolean_t found; 1167 1168 mutex_enter(&spa_spare_lock); 1169 found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); 1170 mutex_exit(&spa_spare_lock); 1171 1172 return (found); 1173} 1174 1175void 1176spa_spare_activate(vdev_t *vd) 1177{ 1178 mutex_enter(&spa_spare_lock); 1179 ASSERT(vd->vdev_isspare); 1180 spa_aux_activate(vd, &spa_spare_avl); 1181 mutex_exit(&spa_spare_lock); 1182} 1183 1184/* 1185 * Level 2 ARC devices are tracked globally for the same reasons as spares. 1186 * Cache devices currently only support one pool per cache device, and so 1187 * for these devices the aux reference count is currently unused beyond 1. 1188 */ 1189 1190static int 1191spa_l2cache_compare(const void *a, const void *b) 1192{ 1193 return (spa_aux_compare(a, b)); 1194} 1195 1196void 1197spa_l2cache_add(vdev_t *vd) 1198{ 1199 mutex_enter(&spa_l2cache_lock); 1200 ASSERT(!vd->vdev_isl2cache); 1201 spa_aux_add(vd, &spa_l2cache_avl); 1202 vd->vdev_isl2cache = B_TRUE; 1203 mutex_exit(&spa_l2cache_lock); 1204} 1205 1206void 1207spa_l2cache_remove(vdev_t *vd) 1208{ 1209 mutex_enter(&spa_l2cache_lock); 1210 ASSERT(vd->vdev_isl2cache); 1211 spa_aux_remove(vd, &spa_l2cache_avl); 1212 vd->vdev_isl2cache = B_FALSE; 1213 mutex_exit(&spa_l2cache_lock); 1214} 1215 1216boolean_t 1217spa_l2cache_exists(uint64_t guid, uint64_t *pool) 1218{ 1219 boolean_t found; 1220 1221 mutex_enter(&spa_l2cache_lock); 1222 found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); 1223 mutex_exit(&spa_l2cache_lock); 1224 1225 return (found); 1226} 1227 1228void 1229spa_l2cache_activate(vdev_t *vd) 1230{ 1231 mutex_enter(&spa_l2cache_lock); 1232 ASSERT(vd->vdev_isl2cache); 1233 spa_aux_activate(vd, &spa_l2cache_avl); 1234 mutex_exit(&spa_l2cache_lock); 1235} 1236 1237/* 1238 * ========================================================================== 1239 * SPA vdev locking 1240 * ========================================================================== 1241 */ 1242 1243/* 1244 * Lock the given spa_t for the purpose of adding or removing a vdev. 1245 * Grabs the global spa_namespace_lock plus the spa config lock for writing. 1246 * It returns the next transaction group for the spa_t. 1247 */ 1248uint64_t 1249spa_vdev_enter(spa_t *spa) 1250{ 1251 mutex_enter(&spa->spa_vdev_top_lock); 1252 mutex_enter(&spa_namespace_lock); 1253 return (spa_vdev_config_enter(spa)); 1254} 1255 1256/* 1257 * Internal implementation for spa_vdev_enter(). Used when a vdev 1258 * operation requires multiple syncs (i.e. removing a device) while 1259 * keeping the spa_namespace_lock held. 1260 */ 1261uint64_t 1262spa_vdev_config_enter(spa_t *spa) 1263{ 1264 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1265 1266 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1267 1268 return (spa_last_synced_txg(spa) + 1); 1269} 1270 1271/* 1272 * Used in combination with spa_vdev_config_enter() to allow the syncing 1273 * of multiple transactions without releasing the spa_namespace_lock. 1274 */ 1275void 1276spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) 1277{ 1278 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1279 1280 int config_changed = B_FALSE; 1281 1282 ASSERT(txg > spa_last_synced_txg(spa)); 1283 1284 spa->spa_pending_vdev = NULL; 1285 1286 /* 1287 * Reassess the DTLs. 1288 */ 1289 vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); 1290 1291 if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { 1292 config_changed = B_TRUE; 1293 spa->spa_config_generation++; 1294 } 1295 1296 /* 1297 * Verify the metaslab classes. 1298 */ 1299 ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); 1300 ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); 1301 1302 spa_config_exit(spa, SCL_ALL, spa); 1303 1304 /* 1305 * Panic the system if the specified tag requires it. This 1306 * is useful for ensuring that configurations are updated 1307 * transactionally. 1308 */ 1309 if (zio_injection_enabled) 1310 zio_handle_panic_injection(spa, tag, 0); 1311 1312 /* 1313 * Note: this txg_wait_synced() is important because it ensures 1314 * that there won't be more than one config change per txg. 1315 * This allows us to use the txg as the generation number. 1316 */ 1317 if (error == 0) 1318 txg_wait_synced(spa->spa_dsl_pool, txg); 1319 1320 if (vd != NULL) { 1321 ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); 1322 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); 1323 vdev_free(vd); 1324 spa_config_exit(spa, SCL_ALL, spa); 1325 } 1326 1327 /* 1328 * If the config changed, update the config cache. 1329 */ 1330 if (config_changed) 1331 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1332} 1333 1334/* 1335 * Unlock the spa_t after adding or removing a vdev. Besides undoing the 1336 * locking of spa_vdev_enter(), we also want make sure the transactions have 1337 * synced to disk, and then update the global configuration cache with the new 1338 * information. 1339 */ 1340int 1341spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) 1342{ 1343 spa_vdev_config_exit(spa, vd, txg, error, FTAG); 1344 mutex_exit(&spa_namespace_lock); 1345 mutex_exit(&spa->spa_vdev_top_lock); 1346 1347 return (error); 1348} 1349 1350/* 1351 * Lock the given spa_t for the purpose of changing vdev state. 1352 */ 1353void 1354spa_vdev_state_enter(spa_t *spa, int oplocks) 1355{ 1356 int locks = SCL_STATE_ALL | oplocks; 1357 1358 /* 1359 * Root pools may need to read of the underlying devfs filesystem 1360 * when opening up a vdev. Unfortunately if we're holding the 1361 * SCL_ZIO lock it will result in a deadlock when we try to issue 1362 * the read from the root filesystem. Instead we "prefetch" 1363 * the associated vnodes that we need prior to opening the 1364 * underlying devices and cache them so that we can prevent 1365 * any I/O when we are doing the actual open. 1366 */ 1367 if (spa_is_root(spa)) { 1368 int low = locks & ~(SCL_ZIO - 1); 1369 int high = locks & ~low; 1370 1371 spa_config_enter(spa, high, spa, RW_WRITER); 1372 vdev_hold(spa->spa_root_vdev); 1373 spa_config_enter(spa, low, spa, RW_WRITER); 1374 } else { 1375 spa_config_enter(spa, locks, spa, RW_WRITER); 1376 } 1377 spa->spa_vdev_locks = locks; 1378} 1379 1380int 1381spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) 1382{ 1383 boolean_t config_changed = B_FALSE; 1384 1385 if (vd != NULL || error == 0) 1386 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 1387 0, 0, B_FALSE); 1388 1389 if (vd != NULL) { 1390 vdev_state_dirty(vd->vdev_top); 1391 config_changed = B_TRUE; 1392 spa->spa_config_generation++; 1393 } 1394 1395 if (spa_is_root(spa)) 1396 vdev_rele(spa->spa_root_vdev); 1397 1398 ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); 1399 spa_config_exit(spa, spa->spa_vdev_locks, spa); 1400 1401 /* 1402 * If anything changed, wait for it to sync. This ensures that, 1403 * from the system administrator's perspective, zpool(1M) commands 1404 * are synchronous. This is important for things like zpool offline: 1405 * when the command completes, you expect no further I/O from ZFS. 1406 */ 1407 if (vd != NULL) 1408 txg_wait_synced(spa->spa_dsl_pool, 0); 1409 1410 /* 1411 * If the config changed, update the config cache. 1412 */ 1413 if (config_changed) { 1414 mutex_enter(&spa_namespace_lock); 1415 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1416 mutex_exit(&spa_namespace_lock); 1417 } 1418 1419 return (error); 1420} 1421 1422/* 1423 * ========================================================================== 1424 * Miscellaneous functions 1425 * ========================================================================== 1426 */ 1427 1428void 1429spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) 1430{ 1431 if (!nvlist_exists(spa->spa_label_features, feature)) { 1432 fnvlist_add_boolean(spa->spa_label_features, feature); 1433 /* 1434 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't 1435 * dirty the vdev config because lock SCL_CONFIG is not held. 1436 * Thankfully, in this case we don't need to dirty the config 1437 * because it will be written out anyway when we finish 1438 * creating the pool. 1439 */ 1440 if (tx->tx_txg != TXG_INITIAL) 1441 vdev_config_dirty(spa->spa_root_vdev); 1442 } 1443} 1444 1445void 1446spa_deactivate_mos_feature(spa_t *spa, const char *feature) 1447{ 1448 if (nvlist_remove_all(spa->spa_label_features, feature) == 0) 1449 vdev_config_dirty(spa->spa_root_vdev); 1450} 1451 1452/* 1453 * Rename a spa_t. 1454 */ 1455int 1456spa_rename(const char *name, const char *newname) 1457{ 1458 spa_t *spa; 1459 int err; 1460 1461 /* 1462 * Lookup the spa_t and grab the config lock for writing. We need to 1463 * actually open the pool so that we can sync out the necessary labels. 1464 * It's OK to call spa_open() with the namespace lock held because we 1465 * allow recursive calls for other reasons. 1466 */ 1467 mutex_enter(&spa_namespace_lock); 1468 if ((err = spa_open(name, &spa, FTAG)) != 0) { 1469 mutex_exit(&spa_namespace_lock); 1470 return (err); 1471 } 1472 1473 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1474 1475 avl_remove(&spa_namespace_avl, spa); 1476 (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); 1477 avl_add(&spa_namespace_avl, spa); 1478 1479 /* 1480 * Sync all labels to disk with the new names by marking the root vdev 1481 * dirty and waiting for it to sync. It will pick up the new pool name 1482 * during the sync. 1483 */ 1484 vdev_config_dirty(spa->spa_root_vdev); 1485 1486 spa_config_exit(spa, SCL_ALL, FTAG); 1487 1488 txg_wait_synced(spa->spa_dsl_pool, 0); 1489 1490 /* 1491 * Sync the updated config cache. 1492 */ 1493 spa_write_cachefile(spa, B_FALSE, B_TRUE); 1494 1495 spa_close(spa, FTAG); 1496 1497 mutex_exit(&spa_namespace_lock); 1498 1499 return (0); 1500} 1501 1502/* 1503 * Return the spa_t associated with given pool_guid, if it exists. If 1504 * device_guid is non-zero, determine whether the pool exists *and* contains 1505 * a device with the specified device_guid. 1506 */ 1507spa_t * 1508spa_by_guid(uint64_t pool_guid, uint64_t device_guid) 1509{ 1510 spa_t *spa; 1511 avl_tree_t *t = &spa_namespace_avl; 1512 1513 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1514 1515 for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { 1516 if (spa->spa_state == POOL_STATE_UNINITIALIZED) 1517 continue; 1518 if (spa->spa_root_vdev == NULL) 1519 continue; 1520 if (spa_guid(spa) == pool_guid) { 1521 if (device_guid == 0) 1522 break; 1523 1524 if (vdev_lookup_by_guid(spa->spa_root_vdev, 1525 device_guid) != NULL) 1526 break; 1527 1528 /* 1529 * Check any devices we may be in the process of adding. 1530 */ 1531 if (spa->spa_pending_vdev) { 1532 if (vdev_lookup_by_guid(spa->spa_pending_vdev, 1533 device_guid) != NULL) 1534 break; 1535 } 1536 } 1537 } 1538 1539 return (spa); 1540} 1541 1542/* 1543 * Determine whether a pool with the given pool_guid exists. 1544 */ 1545boolean_t 1546spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) 1547{ 1548 return (spa_by_guid(pool_guid, device_guid) != NULL); 1549} 1550 1551char * 1552spa_strdup(const char *s) 1553{ 1554 size_t len; 1555 char *new; 1556 1557 len = strlen(s); 1558 new = kmem_alloc(len + 1, KM_SLEEP); 1559 bcopy(s, new, len); 1560 new[len] = '\0'; 1561 1562 return (new); 1563} 1564 1565void 1566spa_strfree(char *s) 1567{ 1568 kmem_free(s, strlen(s) + 1); 1569} 1570 1571uint64_t 1572spa_get_random(uint64_t range) 1573{ 1574 uint64_t r; 1575 1576 ASSERT(range != 0); 1577 1578 (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); 1579 1580 return (r % range); 1581} 1582 1583uint64_t 1584spa_generate_guid(spa_t *spa) 1585{ 1586 uint64_t guid = spa_get_random(-1ULL); 1587 1588 if (spa != NULL) { 1589 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) 1590 guid = spa_get_random(-1ULL); 1591 } else { 1592 while (guid == 0 || spa_guid_exists(guid, 0)) 1593 guid = spa_get_random(-1ULL); 1594 } 1595 1596 return (guid); 1597} 1598 1599void 1600snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) 1601{ 1602 char type[256]; 1603 char *checksum = NULL; 1604 char *compress = NULL; 1605 1606 if (bp != NULL) { 1607 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { 1608 dmu_object_byteswap_t bswap = 1609 DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); 1610 (void) snprintf(type, sizeof (type), "bswap %s %s", 1611 DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? 1612 "metadata" : "data", 1613 dmu_ot_byteswap[bswap].ob_name); 1614 } else { 1615 (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, 1616 sizeof (type)); 1617 } 1618 if (!BP_IS_EMBEDDED(bp)) { 1619 checksum = 1620 zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; 1621 } 1622 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; 1623 } 1624 1625 SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, 1626 compress); 1627} 1628 1629void 1630spa_freeze(spa_t *spa) 1631{ 1632 uint64_t freeze_txg = 0; 1633 1634 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1635 if (spa->spa_freeze_txg == UINT64_MAX) { 1636 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; 1637 spa->spa_freeze_txg = freeze_txg; 1638 } 1639 spa_config_exit(spa, SCL_ALL, FTAG); 1640 if (freeze_txg != 0) 1641 txg_wait_synced(spa_get_dsl(spa), freeze_txg); 1642} 1643 1644void 1645zfs_panic_recover(const char *fmt, ...) 1646{ 1647 va_list adx; 1648 1649 va_start(adx, fmt); 1650 vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); 1651 va_end(adx); 1652} 1653 1654/* 1655 * This is a stripped-down version of strtoull, suitable only for converting 1656 * lowercase hexadecimal numbers that don't overflow. 1657 */ 1658uint64_t 1659zfs_strtonum(const char *str, char **nptr) 1660{ 1661 uint64_t val = 0; 1662 char c; 1663 int digit; 1664 1665 while ((c = *str) != '\0') { 1666 if (c >= '0' && c <= '9') 1667 digit = c - '0'; 1668 else if (c >= 'a' && c <= 'f') 1669 digit = 10 + c - 'a'; 1670 else 1671 break; 1672 1673 val *= 16; 1674 val += digit; 1675 1676 str++; 1677 } 1678 1679 if (nptr) 1680 *nptr = (char *)str; 1681 1682 return (val); 1683} 1684 1685/* 1686 * ========================================================================== 1687 * Accessor functions 1688 * ========================================================================== 1689 */ 1690 1691boolean_t 1692spa_shutting_down(spa_t *spa) 1693{ 1694 return (spa->spa_async_suspended); 1695} 1696 1697dsl_pool_t * 1698spa_get_dsl(spa_t *spa) 1699{ 1700 return (spa->spa_dsl_pool); 1701} 1702 1703boolean_t 1704spa_is_initializing(spa_t *spa) 1705{ 1706 return (spa->spa_is_initializing); 1707} 1708 1709boolean_t 1710spa_indirect_vdevs_loaded(spa_t *spa) 1711{ 1712 return (spa->spa_indirect_vdevs_loaded); 1713} 1714 1715blkptr_t * 1716spa_get_rootblkptr(spa_t *spa) 1717{ 1718 return (&spa->spa_ubsync.ub_rootbp); 1719} 1720 1721void 1722spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) 1723{ 1724 spa->spa_uberblock.ub_rootbp = *bp; 1725} 1726 1727void 1728spa_altroot(spa_t *spa, char *buf, size_t buflen) 1729{ 1730 if (spa->spa_root == NULL) 1731 buf[0] = '\0'; 1732 else 1733 (void) strncpy(buf, spa->spa_root, buflen); 1734} 1735 1736int 1737spa_sync_pass(spa_t *spa) 1738{ 1739 return (spa->spa_sync_pass); 1740} 1741 1742char * 1743spa_name(spa_t *spa) 1744{ 1745 return (spa->spa_name); 1746} 1747 1748uint64_t 1749spa_guid(spa_t *spa) 1750{ 1751 dsl_pool_t *dp = spa_get_dsl(spa); 1752 uint64_t guid; 1753 1754 /* 1755 * If we fail to parse the config during spa_load(), we can go through 1756 * the error path (which posts an ereport) and end up here with no root 1757 * vdev. We stash the original pool guid in 'spa_config_guid' to handle 1758 * this case. 1759 */ 1760 if (spa->spa_root_vdev == NULL) 1761 return (spa->spa_config_guid); 1762 1763 guid = spa->spa_last_synced_guid != 0 ? 1764 spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; 1765 1766 /* 1767 * Return the most recently synced out guid unless we're 1768 * in syncing context. 1769 */ 1770 if (dp && dsl_pool_sync_context(dp)) 1771 return (spa->spa_root_vdev->vdev_guid); 1772 else 1773 return (guid); 1774} 1775 1776uint64_t 1777spa_load_guid(spa_t *spa) 1778{ 1779 /* 1780 * This is a GUID that exists solely as a reference for the 1781 * purposes of the arc. It is generated at load time, and 1782 * is never written to persistent storage. 1783 */ 1784 return (spa->spa_load_guid); 1785} 1786 1787uint64_t 1788spa_last_synced_txg(spa_t *spa) 1789{ 1790 return (spa->spa_ubsync.ub_txg); 1791} 1792 1793uint64_t 1794spa_first_txg(spa_t *spa) 1795{ 1796 return (spa->spa_first_txg); 1797} 1798 1799uint64_t 1800spa_syncing_txg(spa_t *spa) 1801{ 1802 return (spa->spa_syncing_txg); 1803} 1804 1805/* 1806 * Return the last txg where data can be dirtied. The final txgs 1807 * will be used to just clear out any deferred frees that remain. 1808 */ 1809uint64_t 1810spa_final_dirty_txg(spa_t *spa) 1811{ 1812 return (spa->spa_final_txg - TXG_DEFER_SIZE); 1813} 1814 1815pool_state_t 1816spa_state(spa_t *spa) 1817{ 1818 return (spa->spa_state); 1819} 1820 1821spa_load_state_t 1822spa_load_state(spa_t *spa) 1823{ 1824 return (spa->spa_load_state); 1825} 1826 1827uint64_t 1828spa_freeze_txg(spa_t *spa) 1829{ 1830 return (spa->spa_freeze_txg); 1831} 1832 1833/* ARGSUSED */ 1834uint64_t 1835spa_get_worst_case_asize(spa_t *spa, uint64_t lsize) 1836{ 1837 return (lsize * spa_asize_inflation); 1838} 1839 1840/* 1841 * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), 1842 * or at least 128MB, unless that would cause it to be more than half the 1843 * pool size. 1844 * 1845 * See the comment above spa_slop_shift for details. 1846 */ 1847uint64_t 1848spa_get_slop_space(spa_t *spa) 1849{ 1850 uint64_t space = spa_get_dspace(spa); 1851 return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); 1852} 1853 1854uint64_t 1855spa_get_dspace(spa_t *spa) 1856{ 1857 return (spa->spa_dspace); 1858} 1859 1860uint64_t 1861spa_get_checkpoint_space(spa_t *spa) 1862{ 1863 return (spa->spa_checkpoint_info.sci_dspace); 1864} 1865 1866void 1867spa_update_dspace(spa_t *spa) 1868{ 1869 spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + 1870 ddt_get_dedup_dspace(spa); 1871 if (spa->spa_vdev_removal != NULL) { 1872 /* 1873 * We can't allocate from the removing device, so 1874 * subtract its size. This prevents the DMU/DSL from 1875 * filling up the (now smaller) pool while we are in the 1876 * middle of removing the device. 1877 * 1878 * Note that the DMU/DSL doesn't actually know or care 1879 * how much space is allocated (it does its own tracking 1880 * of how much space has been logically used). So it 1881 * doesn't matter that the data we are moving may be 1882 * allocated twice (on the old device and the new 1883 * device). 1884 */ 1885 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 1886 vdev_t *vd = 1887 vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1888 spa->spa_dspace -= spa_deflate(spa) ? 1889 vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space; 1890 spa_config_exit(spa, SCL_VDEV, FTAG); 1891 } 1892} 1893 1894/* 1895 * Return the failure mode that has been set to this pool. The default 1896 * behavior will be to block all I/Os when a complete failure occurs. 1897 */ 1898uint8_t 1899spa_get_failmode(spa_t *spa) 1900{ 1901 return (spa->spa_failmode); 1902} 1903 1904boolean_t 1905spa_suspended(spa_t *spa) 1906{ 1907 return (spa->spa_suspended); 1908} 1909 1910uint64_t 1911spa_version(spa_t *spa) 1912{ 1913 return (spa->spa_ubsync.ub_version); 1914} 1915 1916boolean_t 1917spa_deflate(spa_t *spa) 1918{ 1919 return (spa->spa_deflate); 1920} 1921 1922metaslab_class_t * 1923spa_normal_class(spa_t *spa) 1924{ 1925 return (spa->spa_normal_class); 1926} 1927 1928metaslab_class_t * 1929spa_log_class(spa_t *spa) 1930{ 1931 return (spa->spa_log_class); 1932} 1933 1934void 1935spa_evicting_os_register(spa_t *spa, objset_t *os) 1936{ 1937 mutex_enter(&spa->spa_evicting_os_lock); 1938 list_insert_head(&spa->spa_evicting_os_list, os); 1939 mutex_exit(&spa->spa_evicting_os_lock); 1940} 1941 1942void 1943spa_evicting_os_deregister(spa_t *spa, objset_t *os) 1944{ 1945 mutex_enter(&spa->spa_evicting_os_lock); 1946 list_remove(&spa->spa_evicting_os_list, os); 1947 cv_broadcast(&spa->spa_evicting_os_cv); 1948 mutex_exit(&spa->spa_evicting_os_lock); 1949} 1950 1951void 1952spa_evicting_os_wait(spa_t *spa) 1953{ 1954 mutex_enter(&spa->spa_evicting_os_lock); 1955 while (!list_is_empty(&spa->spa_evicting_os_list)) 1956 cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); 1957 mutex_exit(&spa->spa_evicting_os_lock); 1958 1959 dmu_buf_user_evict_wait(); 1960} 1961 1962int 1963spa_max_replication(spa_t *spa) 1964{ 1965 /* 1966 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to 1967 * handle BPs with more than one DVA allocated. Set our max 1968 * replication level accordingly. 1969 */ 1970 if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) 1971 return (1); 1972 return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); 1973} 1974 1975int 1976spa_prev_software_version(spa_t *spa) 1977{ 1978 return (spa->spa_prev_software_version); 1979} 1980 1981uint64_t 1982spa_deadman_synctime(spa_t *spa) 1983{ 1984 return (spa->spa_deadman_synctime); 1985} 1986 1987uint64_t 1988dva_get_dsize_sync(spa_t *spa, const dva_t *dva) 1989{ 1990 uint64_t asize = DVA_GET_ASIZE(dva); 1991 uint64_t dsize = asize; 1992 1993 ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); 1994 1995 if (asize != 0 && spa->spa_deflate) { 1996 uint64_t vdev = DVA_GET_VDEV(dva); 1997 vdev_t *vd = vdev_lookup_top(spa, vdev); 1998 if (vd == NULL) { 1999 panic( 2000 "dva_get_dsize_sync(): bad DVA %llu:%llu", 2001 (u_longlong_t)vdev, (u_longlong_t)asize); 2002 } 2003 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; 2004 } 2005 2006 return (dsize); 2007} 2008 2009uint64_t 2010bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) 2011{ 2012 uint64_t dsize = 0; 2013 2014 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2015 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2016 2017 return (dsize); 2018} 2019 2020uint64_t 2021bp_get_dsize(spa_t *spa, const blkptr_t *bp) 2022{ 2023 uint64_t dsize = 0; 2024 2025 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2026 2027 for (int d = 0; d < BP_GET_NDVAS(bp); d++) 2028 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); 2029 2030 spa_config_exit(spa, SCL_VDEV, FTAG); 2031 2032 return (dsize); 2033} 2034 2035/* 2036 * ========================================================================== 2037 * Initialization and Termination 2038 * ========================================================================== 2039 */ 2040 2041static int 2042spa_name_compare(const void *a1, const void *a2) 2043{ 2044 const spa_t *s1 = a1; 2045 const spa_t *s2 = a2; 2046 int s; 2047 2048 s = strcmp(s1->spa_name, s2->spa_name); 2049 if (s > 0) 2050 return (1); 2051 if (s < 0) 2052 return (-1); 2053 return (0); 2054} 2055 2056int 2057spa_busy(void) 2058{ 2059 return (spa_active_count); 2060} 2061 2062void 2063spa_boot_init() 2064{ 2065 spa_config_load(); 2066} 2067 2068#ifdef _KERNEL 2069EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0); 2070#endif 2071 2072void 2073spa_init(int mode) 2074{ 2075 mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); 2076 mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); 2077 mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); 2078 cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); 2079 2080 avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), 2081 offsetof(spa_t, spa_avl)); 2082 2083 avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), 2084 offsetof(spa_aux_t, aux_avl)); 2085 2086 avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), 2087 offsetof(spa_aux_t, aux_avl)); 2088 2089 spa_mode_global = mode; 2090 2091#ifdef illumos 2092#ifdef _KERNEL 2093 spa_arch_init(); 2094#else 2095 if (spa_mode_global != FREAD && dprintf_find_string("watch")) { 2096 arc_procfd = open("/proc/self/ctl", O_WRONLY); 2097 if (arc_procfd == -1) { 2098 perror("could not enable watchpoints: " 2099 "opening /proc/self/ctl failed: "); 2100 } else { 2101 arc_watch = B_TRUE; 2102 } 2103 } 2104#endif 2105#endif /* illumos */ 2106 refcount_sysinit(); 2107 unique_init(); 2108 range_tree_init(); 2109 metaslab_alloc_trace_init(); 2110 zio_init(); 2111 lz4_init(); 2112 dmu_init(); 2113 zil_init(); 2114 vdev_cache_stat_init(); 2115 vdev_file_init(); 2116 zfs_prop_init(); 2117 zpool_prop_init(); 2118 zpool_feature_init(); 2119 spa_config_load(); 2120 l2arc_start(); 2121 scan_init(); 2122 dsl_scan_global_init(); 2123#ifndef illumos 2124#ifdef _KERNEL 2125 zfs_deadman_init(); 2126#endif 2127#endif /* !illumos */ 2128} 2129 2130void 2131spa_fini(void) 2132{ 2133 l2arc_stop(); 2134 2135 spa_evict_all(); 2136 2137 vdev_file_fini(); 2138 vdev_cache_stat_fini(); 2139 zil_fini(); 2140 dmu_fini(); 2141 lz4_fini(); 2142 zio_fini(); 2143 metaslab_alloc_trace_fini(); 2144 range_tree_fini(); 2145 unique_fini(); 2146 refcount_fini(); 2147 scan_fini(); 2148 2149 avl_destroy(&spa_namespace_avl); 2150 avl_destroy(&spa_spare_avl); 2151 avl_destroy(&spa_l2cache_avl); 2152 2153 cv_destroy(&spa_namespace_cv); 2154 mutex_destroy(&spa_namespace_lock); 2155 mutex_destroy(&spa_spare_lock); 2156 mutex_destroy(&spa_l2cache_lock); 2157} 2158 2159/* 2160 * Return whether this pool has slogs. No locking needed. 2161 * It's not a problem if the wrong answer is returned as it's only for 2162 * performance and not correctness 2163 */ 2164boolean_t 2165spa_has_slogs(spa_t *spa) 2166{ 2167 return (spa->spa_log_class->mc_rotor != NULL); 2168} 2169 2170spa_log_state_t 2171spa_get_log_state(spa_t *spa) 2172{ 2173 return (spa->spa_log_state); 2174} 2175 2176void 2177spa_set_log_state(spa_t *spa, spa_log_state_t state) 2178{ 2179 spa->spa_log_state = state; 2180} 2181 2182boolean_t 2183spa_is_root(spa_t *spa) 2184{ 2185 return (spa->spa_is_root); 2186} 2187 2188boolean_t 2189spa_writeable(spa_t *spa) 2190{ 2191 return (!!(spa->spa_mode & FWRITE) && spa->spa_trust_config); 2192} 2193 2194/* 2195 * Returns true if there is a pending sync task in any of the current 2196 * syncing txg, the current quiescing txg, or the current open txg. 2197 */ 2198boolean_t 2199spa_has_pending_synctask(spa_t *spa) 2200{ 2201 return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks) || 2202 !txg_all_lists_empty(&spa->spa_dsl_pool->dp_early_sync_tasks)); 2203} 2204 2205int 2206spa_mode(spa_t *spa) 2207{ 2208 return (spa->spa_mode); 2209} 2210 2211uint64_t 2212spa_bootfs(spa_t *spa) 2213{ 2214 return (spa->spa_bootfs); 2215} 2216 2217uint64_t 2218spa_delegation(spa_t *spa) 2219{ 2220 return (spa->spa_delegation); 2221} 2222 2223objset_t * 2224spa_meta_objset(spa_t *spa) 2225{ 2226 return (spa->spa_meta_objset); 2227} 2228 2229enum zio_checksum 2230spa_dedup_checksum(spa_t *spa) 2231{ 2232 return (spa->spa_dedup_checksum); 2233} 2234 2235/* 2236 * Reset pool scan stat per scan pass (or reboot). 2237 */ 2238void 2239spa_scan_stat_init(spa_t *spa) 2240{ 2241 /* data not stored on disk */ 2242 spa->spa_scan_pass_start = gethrestime_sec(); 2243 if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan)) 2244 spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start; 2245 else 2246 spa->spa_scan_pass_scrub_pause = 0; 2247 spa->spa_scan_pass_scrub_spent_paused = 0; 2248 spa->spa_scan_pass_exam = 0; 2249 spa->spa_scan_pass_issued = 0; 2250 vdev_scan_stat_init(spa->spa_root_vdev); 2251} 2252 2253/* 2254 * Get scan stats for zpool status reports 2255 */ 2256int 2257spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) 2258{ 2259 dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; 2260 2261 if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) 2262 return (SET_ERROR(ENOENT)); 2263 bzero(ps, sizeof (pool_scan_stat_t)); 2264 2265 /* data stored on disk */ 2266 ps->pss_func = scn->scn_phys.scn_func; 2267 ps->pss_state = scn->scn_phys.scn_state; 2268 ps->pss_start_time = scn->scn_phys.scn_start_time; 2269 ps->pss_end_time = scn->scn_phys.scn_end_time; 2270 ps->pss_to_examine = scn->scn_phys.scn_to_examine; 2271 ps->pss_to_process = scn->scn_phys.scn_to_process; 2272 ps->pss_processed = scn->scn_phys.scn_processed; 2273 ps->pss_errors = scn->scn_phys.scn_errors; 2274 ps->pss_examined = scn->scn_phys.scn_examined; 2275 ps->pss_issued = 2276 scn->scn_issued_before_pass + spa->spa_scan_pass_issued; 2277 /* data not stored on disk */ 2278 ps->pss_pass_start = spa->spa_scan_pass_start; 2279 ps->pss_pass_exam = spa->spa_scan_pass_exam; 2280 ps->pss_pass_issued = spa->spa_scan_pass_issued; 2281 ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause; 2282 ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused; 2283 2284 return (0); 2285} 2286 2287boolean_t 2288spa_debug_enabled(spa_t *spa) 2289{ 2290 return (spa->spa_debug); 2291} 2292 2293int 2294spa_maxblocksize(spa_t *spa) 2295{ 2296 if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) 2297 return (SPA_MAXBLOCKSIZE); 2298 else 2299 return (SPA_OLD_MAXBLOCKSIZE); 2300} 2301 2302/* 2303 * Returns the txg that the last device removal completed. No indirect mappings 2304 * have been added since this txg. 2305 */ 2306uint64_t 2307spa_get_last_removal_txg(spa_t *spa) 2308{ 2309 uint64_t vdevid; 2310 uint64_t ret = -1ULL; 2311 2312 spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); 2313 /* 2314 * sr_prev_indirect_vdev is only modified while holding all the 2315 * config locks, so it is sufficient to hold SCL_VDEV as reader when 2316 * examining it. 2317 */ 2318 vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev; 2319 2320 while (vdevid != -1ULL) { 2321 vdev_t *vd = vdev_lookup_top(spa, vdevid); 2322 vdev_indirect_births_t *vib = vd->vdev_indirect_births; 2323 2324 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 2325 2326 /* 2327 * If the removal did not remap any data, we don't care. 2328 */ 2329 if (vdev_indirect_births_count(vib) != 0) { 2330 ret = vdev_indirect_births_last_entry_txg(vib); 2331 break; 2332 } 2333 2334 vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev; 2335 } 2336 spa_config_exit(spa, SCL_VDEV, FTAG); 2337 2338 IMPLY(ret != -1ULL, 2339 spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL)); 2340 2341 return (ret); 2342} 2343 2344boolean_t 2345spa_trust_config(spa_t *spa) 2346{ 2347 return (spa->spa_trust_config); 2348} 2349 2350uint64_t 2351spa_missing_tvds_allowed(spa_t *spa) 2352{ 2353 return (spa->spa_missing_tvds_allowed); 2354} 2355 2356void 2357spa_set_missing_tvds(spa_t *spa, uint64_t missing) 2358{ 2359 spa->spa_missing_tvds = missing; 2360} 2361 2362boolean_t 2363spa_top_vdevs_spacemap_addressable(spa_t *spa) 2364{ 2365 vdev_t *rvd = spa->spa_root_vdev; 2366 for (uint64_t c = 0; c < rvd->vdev_children; c++) { 2367 if (!vdev_is_spacemap_addressable(rvd->vdev_child[c])) 2368 return (B_FALSE); 2369 } 2370 return (B_TRUE); 2371} 2372 2373boolean_t 2374spa_has_checkpoint(spa_t *spa) 2375{ 2376 return (spa->spa_checkpoint_txg != 0); 2377} 2378 2379boolean_t 2380spa_importing_readonly_checkpoint(spa_t *spa) 2381{ 2382 return ((spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT) && 2383 spa->spa_mode == FREAD); 2384} 2385 2386uint64_t 2387spa_min_claim_txg(spa_t *spa) 2388{ 2389 uint64_t checkpoint_txg = spa->spa_uberblock.ub_checkpoint_txg; 2390 2391 if (checkpoint_txg != 0) 2392 return (checkpoint_txg + 1); 2393 2394 return (spa->spa_first_txg); 2395} 2396 2397/* 2398 * If there is a checkpoint, async destroys may consume more space from 2399 * the pool instead of freeing it. In an attempt to save the pool from 2400 * getting suspended when it is about to run out of space, we stop 2401 * processing async destroys. 2402 */ 2403boolean_t 2404spa_suspend_async_destroy(spa_t *spa) 2405{ 2406 dsl_pool_t *dp = spa_get_dsl(spa); 2407 2408 uint64_t unreserved = dsl_pool_unreserved_space(dp, 2409 ZFS_SPACE_CHECK_EXTRA_RESERVED); 2410 uint64_t used = dsl_dir_phys(dp->dp_root_dir)->dd_used_bytes; 2411 uint64_t avail = (unreserved > used) ? (unreserved - used) : 0; 2412 2413 if (spa_has_checkpoint(spa) && avail == 0) 2414 return (B_TRUE); 2415 2416 return (B_FALSE); 2417} 2418