spa_misc.c revision 290757
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright 2013 Saso Kiselkov. All rights reserved.
28 */
29
30#include <sys/zfs_context.h>
31#include <sys/spa_impl.h>
32#include <sys/spa_boot.h>
33#include <sys/zio.h>
34#include <sys/zio_checksum.h>
35#include <sys/zio_compress.h>
36#include <sys/dmu.h>
37#include <sys/dmu_tx.h>
38#include <sys/zap.h>
39#include <sys/zil.h>
40#include <sys/vdev_impl.h>
41#include <sys/metaslab.h>
42#include <sys/uberblock_impl.h>
43#include <sys/txg.h>
44#include <sys/avl.h>
45#include <sys/unique.h>
46#include <sys/dsl_pool.h>
47#include <sys/dsl_dir.h>
48#include <sys/dsl_prop.h>
49#include <sys/dsl_scan.h>
50#include <sys/fs/zfs.h>
51#include <sys/metaslab_impl.h>
52#include <sys/arc.h>
53#include <sys/ddt.h>
54#include "zfs_prop.h"
55#include <sys/zfeature.h>
56
57/*
58 * SPA locking
59 *
60 * There are four basic locks for managing spa_t structures:
61 *
62 * spa_namespace_lock (global mutex)
63 *
64 *	This lock must be acquired to do any of the following:
65 *
66 *		- Lookup a spa_t by name
67 *		- Add or remove a spa_t from the namespace
68 *		- Increase spa_refcount from non-zero
69 *		- Check if spa_refcount is zero
70 *		- Rename a spa_t
71 *		- add/remove/attach/detach devices
72 *		- Held for the duration of create/destroy/import/export
73 *
74 *	It does not need to handle recursion.  A create or destroy may
75 *	reference objects (files or zvols) in other pools, but by
76 *	definition they must have an existing reference, and will never need
77 *	to lookup a spa_t by name.
78 *
79 * spa_refcount (per-spa refcount_t protected by mutex)
80 *
81 *	This reference count keep track of any active users of the spa_t.  The
82 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
83 *	the refcount is never really 'zero' - opening a pool implicitly keeps
84 *	some references in the DMU.  Internally we check against spa_minref, but
85 *	present the image of a zero/non-zero value to consumers.
86 *
87 * spa_config_lock[] (per-spa array of rwlocks)
88 *
89 *	This protects the spa_t from config changes, and must be held in
90 *	the following circumstances:
91 *
92 *		- RW_READER to perform I/O to the spa
93 *		- RW_WRITER to change the vdev config
94 *
95 * The locking order is fairly straightforward:
96 *
97 *		spa_namespace_lock	->	spa_refcount
98 *
99 *	The namespace lock must be acquired to increase the refcount from 0
100 *	or to check if it is zero.
101 *
102 *		spa_refcount		->	spa_config_lock[]
103 *
104 *	There must be at least one valid reference on the spa_t to acquire
105 *	the config lock.
106 *
107 *		spa_namespace_lock	->	spa_config_lock[]
108 *
109 *	The namespace lock must always be taken before the config lock.
110 *
111 *
112 * The spa_namespace_lock can be acquired directly and is globally visible.
113 *
114 * The namespace is manipulated using the following functions, all of which
115 * require the spa_namespace_lock to be held.
116 *
117 *	spa_lookup()		Lookup a spa_t by name.
118 *
119 *	spa_add()		Create a new spa_t in the namespace.
120 *
121 *	spa_remove()		Remove a spa_t from the namespace.  This also
122 *				frees up any memory associated with the spa_t.
123 *
124 *	spa_next()		Returns the next spa_t in the system, or the
125 *				first if NULL is passed.
126 *
127 *	spa_evict_all()		Shutdown and remove all spa_t structures in
128 *				the system.
129 *
130 *	spa_guid_exists()	Determine whether a pool/device guid exists.
131 *
132 * The spa_refcount is manipulated using the following functions:
133 *
134 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
135 *				called with spa_namespace_lock held if the
136 *				refcount is currently zero.
137 *
138 *	spa_close()		Remove a reference from the spa_t.  This will
139 *				not free the spa_t or remove it from the
140 *				namespace.  No locking is required.
141 *
142 *	spa_refcount_zero()	Returns true if the refcount is currently
143 *				zero.  Must be called with spa_namespace_lock
144 *				held.
145 *
146 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
147 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
148 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
149 *
150 * To read the configuration, it suffices to hold one of these locks as reader.
151 * To modify the configuration, you must hold all locks as writer.  To modify
152 * vdev state without altering the vdev tree's topology (e.g. online/offline),
153 * you must hold SCL_STATE and SCL_ZIO as writer.
154 *
155 * We use these distinct config locks to avoid recursive lock entry.
156 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
157 * block allocations (SCL_ALLOC), which may require reading space maps
158 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
159 *
160 * The spa config locks cannot be normal rwlocks because we need the
161 * ability to hand off ownership.  For example, SCL_ZIO is acquired
162 * by the issuing thread and later released by an interrupt thread.
163 * They do, however, obey the usual write-wanted semantics to prevent
164 * writer (i.e. system administrator) starvation.
165 *
166 * The lock acquisition rules are as follows:
167 *
168 * SCL_CONFIG
169 *	Protects changes to the vdev tree topology, such as vdev
170 *	add/remove/attach/detach.  Protects the dirty config list
171 *	(spa_config_dirty_list) and the set of spares and l2arc devices.
172 *
173 * SCL_STATE
174 *	Protects changes to pool state and vdev state, such as vdev
175 *	online/offline/fault/degrade/clear.  Protects the dirty state list
176 *	(spa_state_dirty_list) and global pool state (spa_state).
177 *
178 * SCL_ALLOC
179 *	Protects changes to metaslab groups and classes.
180 *	Held as reader by metaslab_alloc() and metaslab_claim().
181 *
182 * SCL_ZIO
183 *	Held by bp-level zios (those which have no io_vd upon entry)
184 *	to prevent changes to the vdev tree.  The bp-level zio implicitly
185 *	protects all of its vdev child zios, which do not hold SCL_ZIO.
186 *
187 * SCL_FREE
188 *	Protects changes to metaslab groups and classes.
189 *	Held as reader by metaslab_free().  SCL_FREE is distinct from
190 *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
191 *	blocks in zio_done() while another i/o that holds either
192 *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
193 *
194 * SCL_VDEV
195 *	Held as reader to prevent changes to the vdev tree during trivial
196 *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
197 *	other locks, and lower than all of them, to ensure that it's safe
198 *	to acquire regardless of caller context.
199 *
200 * In addition, the following rules apply:
201 *
202 * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
203 *	The lock ordering is SCL_CONFIG > spa_props_lock.
204 *
205 * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
206 *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
207 *	or zio_write_phys() -- the caller must ensure that the config cannot
208 *	cannot change in the interim, and that the vdev cannot be reopened.
209 *	SCL_STATE as reader suffices for both.
210 *
211 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
212 *
213 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
214 *				for writing.
215 *
216 *	spa_vdev_exit()		Release the config lock, wait for all I/O
217 *				to complete, sync the updated configs to the
218 *				cache, and release the namespace lock.
219 *
220 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
221 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
222 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
223 *
224 * spa_rename() is also implemented within this file since it requires
225 * manipulation of the namespace.
226 */
227
228static avl_tree_t spa_namespace_avl;
229kmutex_t spa_namespace_lock;
230static kcondvar_t spa_namespace_cv;
231static int spa_active_count;
232int spa_max_replication_override = SPA_DVAS_PER_BP;
233
234static kmutex_t spa_spare_lock;
235static avl_tree_t spa_spare_avl;
236static kmutex_t spa_l2cache_lock;
237static avl_tree_t spa_l2cache_avl;
238
239kmem_cache_t *spa_buffer_pool;
240int spa_mode_global;
241
242#ifdef ZFS_DEBUG
243/* Everything except dprintf and spa is on by default in debug builds */
244int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
245#else
246int zfs_flags = 0;
247#endif
248SYSCTL_DECL(_debug);
249TUNABLE_INT("debug.zfs_flags", &zfs_flags);
250SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0,
251    "ZFS debug flags.");
252
253/*
254 * zfs_recover can be set to nonzero to attempt to recover from
255 * otherwise-fatal errors, typically caused by on-disk corruption.  When
256 * set, calls to zfs_panic_recover() will turn into warning messages.
257 * This should only be used as a last resort, as it typically results
258 * in leaked space, or worse.
259 */
260boolean_t zfs_recover = B_FALSE;
261SYSCTL_DECL(_vfs_zfs);
262TUNABLE_INT("vfs.zfs.recover", &zfs_recover);
263SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RWTUN, &zfs_recover, 0,
264    "Try to recover from otherwise-fatal errors.");
265
266/*
267 * If destroy encounters an EIO while reading metadata (e.g. indirect
268 * blocks), space referenced by the missing metadata can not be freed.
269 * Normally this causes the background destroy to become "stalled", as
270 * it is unable to make forward progress.  While in this stalled state,
271 * all remaining space to free from the error-encountering filesystem is
272 * "temporarily leaked".  Set this flag to cause it to ignore the EIO,
273 * permanently leak the space from indirect blocks that can not be read,
274 * and continue to free everything else that it can.
275 *
276 * The default, "stalling" behavior is useful if the storage partially
277 * fails (i.e. some but not all i/os fail), and then later recovers.  In
278 * this case, we will be able to continue pool operations while it is
279 * partially failed, and when it recovers, we can continue to free the
280 * space, with no leaks.  However, note that this case is actually
281 * fairly rare.
282 *
283 * Typically pools either (a) fail completely (but perhaps temporarily,
284 * e.g. a top-level vdev going offline), or (b) have localized,
285 * permanent errors (e.g. disk returns the wrong data due to bit flip or
286 * firmware bug).  In case (a), this setting does not matter because the
287 * pool will be suspended and the sync thread will not be able to make
288 * forward progress regardless.  In case (b), because the error is
289 * permanent, the best we can do is leak the minimum amount of space,
290 * which is what setting this flag will do.  Therefore, it is reasonable
291 * for this flag to normally be set, but we chose the more conservative
292 * approach of not setting it, so that there is no possibility of
293 * leaking space in the "partial temporary" failure case.
294 */
295boolean_t zfs_free_leak_on_eio = B_FALSE;
296
297/*
298 * Expiration time in milliseconds. This value has two meanings. First it is
299 * used to determine when the spa_deadman() logic should fire. By default the
300 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
301 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
302 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
303 * in a system panic.
304 */
305uint64_t zfs_deadman_synctime_ms = 1000000ULL;
306TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms);
307SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN,
308    &zfs_deadman_synctime_ms, 0,
309    "Stalled ZFS I/O expiration time in milliseconds");
310
311/*
312 * Check time in milliseconds. This defines the frequency at which we check
313 * for hung I/O.
314 */
315uint64_t zfs_deadman_checktime_ms = 5000ULL;
316TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms);
317SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN,
318    &zfs_deadman_checktime_ms, 0,
319    "Period of checks for stalled ZFS I/O in milliseconds");
320
321/*
322 * Default value of -1 for zfs_deadman_enabled is resolved in
323 * zfs_deadman_init()
324 */
325int zfs_deadman_enabled = -1;
326TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled);
327SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN,
328    &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O");
329
330/*
331 * The worst case is single-sector max-parity RAID-Z blocks, in which
332 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
333 * times the size; so just assume that.  Add to this the fact that
334 * we can have up to 3 DVAs per bp, and one more factor of 2 because
335 * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
336 * the worst case is:
337 *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
338 */
339int spa_asize_inflation = 24;
340TUNABLE_INT("vfs.zfs.spa_asize_inflation", &spa_asize_inflation);
341SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_asize_inflation, CTLFLAG_RWTUN,
342    &spa_asize_inflation, 0, "Worst case inflation factor for single sector writes");
343
344#ifndef illumos
345#ifdef _KERNEL
346static void
347zfs_deadman_init()
348{
349	/*
350	 * If we are not i386 or amd64 or in a virtual machine,
351	 * disable ZFS deadman thread by default
352	 */
353	if (zfs_deadman_enabled == -1) {
354#if defined(__amd64__) || defined(__i386__)
355		zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0;
356#else
357		zfs_deadman_enabled = 0;
358#endif
359	}
360}
361#endif	/* _KERNEL */
362#endif	/* !illumos */
363
364/*
365 * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
366 * the pool to be consumed.  This ensures that we don't run the pool
367 * completely out of space, due to unaccounted changes (e.g. to the MOS).
368 * It also limits the worst-case time to allocate space.  If we have
369 * less than this amount of free space, most ZPL operations (e.g. write,
370 * create) will return ENOSPC.
371 *
372 * Certain operations (e.g. file removal, most administrative actions) can
373 * use half the slop space.  They will only return ENOSPC if less than half
374 * the slop space is free.  Typically, once the pool has less than the slop
375 * space free, the user will use these operations to free up space in the pool.
376 * These are the operations that call dsl_pool_adjustedsize() with the netfree
377 * argument set to TRUE.
378 *
379 * A very restricted set of operations are always permitted, regardless of
380 * the amount of free space.  These are the operations that call
381 * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy".  If these
382 * operations result in a net increase in the amount of space used,
383 * it is possible to run the pool completely out of space, causing it to
384 * be permanently read-only.
385 *
386 * See also the comments in zfs_space_check_t.
387 */
388int spa_slop_shift = 5;
389SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_slop_shift, CTLFLAG_RWTUN,
390    &spa_slop_shift, 0,
391    "Shift value of reserved space (1/(2^spa_slop_shift)).");
392
393/*
394 * ==========================================================================
395 * SPA config locking
396 * ==========================================================================
397 */
398static void
399spa_config_lock_init(spa_t *spa)
400{
401	for (int i = 0; i < SCL_LOCKS; i++) {
402		spa_config_lock_t *scl = &spa->spa_config_lock[i];
403		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
404		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
405		refcount_create_untracked(&scl->scl_count);
406		scl->scl_writer = NULL;
407		scl->scl_write_wanted = 0;
408	}
409}
410
411static void
412spa_config_lock_destroy(spa_t *spa)
413{
414	for (int i = 0; i < SCL_LOCKS; i++) {
415		spa_config_lock_t *scl = &spa->spa_config_lock[i];
416		mutex_destroy(&scl->scl_lock);
417		cv_destroy(&scl->scl_cv);
418		refcount_destroy(&scl->scl_count);
419		ASSERT(scl->scl_writer == NULL);
420		ASSERT(scl->scl_write_wanted == 0);
421	}
422}
423
424int
425spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
426{
427	for (int i = 0; i < SCL_LOCKS; i++) {
428		spa_config_lock_t *scl = &spa->spa_config_lock[i];
429		if (!(locks & (1 << i)))
430			continue;
431		mutex_enter(&scl->scl_lock);
432		if (rw == RW_READER) {
433			if (scl->scl_writer || scl->scl_write_wanted) {
434				mutex_exit(&scl->scl_lock);
435				spa_config_exit(spa, locks ^ (1 << i), tag);
436				return (0);
437			}
438		} else {
439			ASSERT(scl->scl_writer != curthread);
440			if (!refcount_is_zero(&scl->scl_count)) {
441				mutex_exit(&scl->scl_lock);
442				spa_config_exit(spa, locks ^ (1 << i), tag);
443				return (0);
444			}
445			scl->scl_writer = curthread;
446		}
447		(void) refcount_add(&scl->scl_count, tag);
448		mutex_exit(&scl->scl_lock);
449	}
450	return (1);
451}
452
453void
454spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
455{
456	int wlocks_held = 0;
457
458	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
459
460	for (int i = 0; i < SCL_LOCKS; i++) {
461		spa_config_lock_t *scl = &spa->spa_config_lock[i];
462		if (scl->scl_writer == curthread)
463			wlocks_held |= (1 << i);
464		if (!(locks & (1 << i)))
465			continue;
466		mutex_enter(&scl->scl_lock);
467		if (rw == RW_READER) {
468			while (scl->scl_writer || scl->scl_write_wanted) {
469				cv_wait(&scl->scl_cv, &scl->scl_lock);
470			}
471		} else {
472			ASSERT(scl->scl_writer != curthread);
473			while (!refcount_is_zero(&scl->scl_count)) {
474				scl->scl_write_wanted++;
475				cv_wait(&scl->scl_cv, &scl->scl_lock);
476				scl->scl_write_wanted--;
477			}
478			scl->scl_writer = curthread;
479		}
480		(void) refcount_add(&scl->scl_count, tag);
481		mutex_exit(&scl->scl_lock);
482	}
483	ASSERT(wlocks_held <= locks);
484}
485
486void
487spa_config_exit(spa_t *spa, int locks, void *tag)
488{
489	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
490		spa_config_lock_t *scl = &spa->spa_config_lock[i];
491		if (!(locks & (1 << i)))
492			continue;
493		mutex_enter(&scl->scl_lock);
494		ASSERT(!refcount_is_zero(&scl->scl_count));
495		if (refcount_remove(&scl->scl_count, tag) == 0) {
496			ASSERT(scl->scl_writer == NULL ||
497			    scl->scl_writer == curthread);
498			scl->scl_writer = NULL;	/* OK in either case */
499			cv_broadcast(&scl->scl_cv);
500		}
501		mutex_exit(&scl->scl_lock);
502	}
503}
504
505int
506spa_config_held(spa_t *spa, int locks, krw_t rw)
507{
508	int locks_held = 0;
509
510	for (int i = 0; i < SCL_LOCKS; i++) {
511		spa_config_lock_t *scl = &spa->spa_config_lock[i];
512		if (!(locks & (1 << i)))
513			continue;
514		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
515		    (rw == RW_WRITER && scl->scl_writer == curthread))
516			locks_held |= 1 << i;
517	}
518
519	return (locks_held);
520}
521
522/*
523 * ==========================================================================
524 * SPA namespace functions
525 * ==========================================================================
526 */
527
528/*
529 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
530 * Returns NULL if no matching spa_t is found.
531 */
532spa_t *
533spa_lookup(const char *name)
534{
535	static spa_t search;	/* spa_t is large; don't allocate on stack */
536	spa_t *spa;
537	avl_index_t where;
538	char *cp;
539
540	ASSERT(MUTEX_HELD(&spa_namespace_lock));
541
542	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
543
544	/*
545	 * If it's a full dataset name, figure out the pool name and
546	 * just use that.
547	 */
548	cp = strpbrk(search.spa_name, "/@#");
549	if (cp != NULL)
550		*cp = '\0';
551
552	spa = avl_find(&spa_namespace_avl, &search, &where);
553
554	return (spa);
555}
556
557/*
558 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
559 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
560 * looking for potentially hung I/Os.
561 */
562void
563spa_deadman(void *arg)
564{
565	spa_t *spa = arg;
566
567	/*
568	 * Disable the deadman timer if the pool is suspended.
569	 */
570	if (spa_suspended(spa)) {
571#ifdef illumos
572		VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY));
573#else
574		/* Nothing.  just don't schedule any future callouts. */
575#endif
576		return;
577	}
578
579	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
580	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
581	    ++spa->spa_deadman_calls);
582	if (zfs_deadman_enabled)
583		vdev_deadman(spa->spa_root_vdev);
584}
585
586/*
587 * Create an uninitialized spa_t with the given name.  Requires
588 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
589 * exist by calling spa_lookup() first.
590 */
591spa_t *
592spa_add(const char *name, nvlist_t *config, const char *altroot)
593{
594	spa_t *spa;
595	spa_config_dirent_t *dp;
596#ifdef illumos
597	cyc_handler_t hdlr;
598	cyc_time_t when;
599#endif
600
601	ASSERT(MUTEX_HELD(&spa_namespace_lock));
602
603	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
604
605	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
606	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
607	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
608	mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
609	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
610	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
611	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
612	mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
613	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
614	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
615	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
616
617	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
618	cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
619	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
620	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
621	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
622
623	for (int t = 0; t < TXG_SIZE; t++)
624		bplist_create(&spa->spa_free_bplist[t]);
625
626	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
627	spa->spa_state = POOL_STATE_UNINITIALIZED;
628	spa->spa_freeze_txg = UINT64_MAX;
629	spa->spa_final_txg = UINT64_MAX;
630	spa->spa_load_max_txg = UINT64_MAX;
631	spa->spa_proc = &p0;
632	spa->spa_proc_state = SPA_PROC_NONE;
633
634#ifdef illumos
635	hdlr.cyh_func = spa_deadman;
636	hdlr.cyh_arg = spa;
637	hdlr.cyh_level = CY_LOW_LEVEL;
638#endif
639
640	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
641
642#ifdef illumos
643	/*
644	 * This determines how often we need to check for hung I/Os after
645	 * the cyclic has already fired. Since checking for hung I/Os is
646	 * an expensive operation we don't want to check too frequently.
647	 * Instead wait for 5 seconds before checking again.
648	 */
649	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
650	when.cyt_when = CY_INFINITY;
651	mutex_enter(&cpu_lock);
652	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
653	mutex_exit(&cpu_lock);
654#else	/* !illumos */
655#ifdef _KERNEL
656	callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE);
657#endif
658#endif
659	refcount_create(&spa->spa_refcount);
660	spa_config_lock_init(spa);
661
662	avl_add(&spa_namespace_avl, spa);
663
664	/*
665	 * Set the alternate root, if there is one.
666	 */
667	if (altroot) {
668		spa->spa_root = spa_strdup(altroot);
669		spa_active_count++;
670	}
671
672	/*
673	 * Every pool starts with the default cachefile
674	 */
675	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
676	    offsetof(spa_config_dirent_t, scd_link));
677
678	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
679	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
680	list_insert_head(&spa->spa_config_list, dp);
681
682	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
683	    KM_SLEEP) == 0);
684
685	if (config != NULL) {
686		nvlist_t *features;
687
688		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
689		    &features) == 0) {
690			VERIFY(nvlist_dup(features, &spa->spa_label_features,
691			    0) == 0);
692		}
693
694		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
695	}
696
697	if (spa->spa_label_features == NULL) {
698		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
699		    KM_SLEEP) == 0);
700	}
701
702	spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
703
704	spa->spa_min_ashift = INT_MAX;
705	spa->spa_max_ashift = 0;
706
707	/*
708	 * As a pool is being created, treat all features as disabled by
709	 * setting SPA_FEATURE_DISABLED for all entries in the feature
710	 * refcount cache.
711	 */
712	for (int i = 0; i < SPA_FEATURES; i++) {
713		spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
714	}
715
716	return (spa);
717}
718
719/*
720 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
721 * spa_namespace_lock.  This is called only after the spa_t has been closed and
722 * deactivated.
723 */
724void
725spa_remove(spa_t *spa)
726{
727	spa_config_dirent_t *dp;
728
729	ASSERT(MUTEX_HELD(&spa_namespace_lock));
730	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
731	ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
732
733	nvlist_free(spa->spa_config_splitting);
734
735	avl_remove(&spa_namespace_avl, spa);
736	cv_broadcast(&spa_namespace_cv);
737
738	if (spa->spa_root) {
739		spa_strfree(spa->spa_root);
740		spa_active_count--;
741	}
742
743	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
744		list_remove(&spa->spa_config_list, dp);
745		if (dp->scd_path != NULL)
746			spa_strfree(dp->scd_path);
747		kmem_free(dp, sizeof (spa_config_dirent_t));
748	}
749
750	list_destroy(&spa->spa_config_list);
751
752	nvlist_free(spa->spa_label_features);
753	nvlist_free(spa->spa_load_info);
754	spa_config_set(spa, NULL);
755
756#ifdef illumos
757	mutex_enter(&cpu_lock);
758	if (spa->spa_deadman_cycid != CYCLIC_NONE)
759		cyclic_remove(spa->spa_deadman_cycid);
760	mutex_exit(&cpu_lock);
761	spa->spa_deadman_cycid = CYCLIC_NONE;
762#else	/* !illumos */
763#ifdef _KERNEL
764	callout_drain(&spa->spa_deadman_cycid);
765#endif
766#endif
767
768	refcount_destroy(&spa->spa_refcount);
769
770	spa_config_lock_destroy(spa);
771
772	for (int t = 0; t < TXG_SIZE; t++)
773		bplist_destroy(&spa->spa_free_bplist[t]);
774
775	zio_checksum_templates_free(spa);
776
777	cv_destroy(&spa->spa_async_cv);
778	cv_destroy(&spa->spa_evicting_os_cv);
779	cv_destroy(&spa->spa_proc_cv);
780	cv_destroy(&spa->spa_scrub_io_cv);
781	cv_destroy(&spa->spa_suspend_cv);
782
783	mutex_destroy(&spa->spa_async_lock);
784	mutex_destroy(&spa->spa_errlist_lock);
785	mutex_destroy(&spa->spa_errlog_lock);
786	mutex_destroy(&spa->spa_evicting_os_lock);
787	mutex_destroy(&spa->spa_history_lock);
788	mutex_destroy(&spa->spa_proc_lock);
789	mutex_destroy(&spa->spa_props_lock);
790	mutex_destroy(&spa->spa_cksum_tmpls_lock);
791	mutex_destroy(&spa->spa_scrub_lock);
792	mutex_destroy(&spa->spa_suspend_lock);
793	mutex_destroy(&spa->spa_vdev_top_lock);
794
795	kmem_free(spa, sizeof (spa_t));
796}
797
798/*
799 * Given a pool, return the next pool in the namespace, or NULL if there is
800 * none.  If 'prev' is NULL, return the first pool.
801 */
802spa_t *
803spa_next(spa_t *prev)
804{
805	ASSERT(MUTEX_HELD(&spa_namespace_lock));
806
807	if (prev)
808		return (AVL_NEXT(&spa_namespace_avl, prev));
809	else
810		return (avl_first(&spa_namespace_avl));
811}
812
813/*
814 * ==========================================================================
815 * SPA refcount functions
816 * ==========================================================================
817 */
818
819/*
820 * Add a reference to the given spa_t.  Must have at least one reference, or
821 * have the namespace lock held.
822 */
823void
824spa_open_ref(spa_t *spa, void *tag)
825{
826	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
827	    MUTEX_HELD(&spa_namespace_lock));
828	(void) refcount_add(&spa->spa_refcount, tag);
829}
830
831/*
832 * Remove a reference to the given spa_t.  Must have at least one reference, or
833 * have the namespace lock held.
834 */
835void
836spa_close(spa_t *spa, void *tag)
837{
838	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
839	    MUTEX_HELD(&spa_namespace_lock));
840	(void) refcount_remove(&spa->spa_refcount, tag);
841}
842
843/*
844 * Remove a reference to the given spa_t held by a dsl dir that is
845 * being asynchronously released.  Async releases occur from a taskq
846 * performing eviction of dsl datasets and dirs.  The namespace lock
847 * isn't held and the hold by the object being evicted may contribute to
848 * spa_minref (e.g. dataset or directory released during pool export),
849 * so the asserts in spa_close() do not apply.
850 */
851void
852spa_async_close(spa_t *spa, void *tag)
853{
854	(void) refcount_remove(&spa->spa_refcount, tag);
855}
856
857/*
858 * Check to see if the spa refcount is zero.  Must be called with
859 * spa_namespace_lock held.  We really compare against spa_minref, which is the
860 * number of references acquired when opening a pool
861 */
862boolean_t
863spa_refcount_zero(spa_t *spa)
864{
865	ASSERT(MUTEX_HELD(&spa_namespace_lock));
866
867	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
868}
869
870/*
871 * ==========================================================================
872 * SPA spare and l2cache tracking
873 * ==========================================================================
874 */
875
876/*
877 * Hot spares and cache devices are tracked using the same code below,
878 * for 'auxiliary' devices.
879 */
880
881typedef struct spa_aux {
882	uint64_t	aux_guid;
883	uint64_t	aux_pool;
884	avl_node_t	aux_avl;
885	int		aux_count;
886} spa_aux_t;
887
888static int
889spa_aux_compare(const void *a, const void *b)
890{
891	const spa_aux_t *sa = a;
892	const spa_aux_t *sb = b;
893
894	if (sa->aux_guid < sb->aux_guid)
895		return (-1);
896	else if (sa->aux_guid > sb->aux_guid)
897		return (1);
898	else
899		return (0);
900}
901
902void
903spa_aux_add(vdev_t *vd, avl_tree_t *avl)
904{
905	avl_index_t where;
906	spa_aux_t search;
907	spa_aux_t *aux;
908
909	search.aux_guid = vd->vdev_guid;
910	if ((aux = avl_find(avl, &search, &where)) != NULL) {
911		aux->aux_count++;
912	} else {
913		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
914		aux->aux_guid = vd->vdev_guid;
915		aux->aux_count = 1;
916		avl_insert(avl, aux, where);
917	}
918}
919
920void
921spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
922{
923	spa_aux_t search;
924	spa_aux_t *aux;
925	avl_index_t where;
926
927	search.aux_guid = vd->vdev_guid;
928	aux = avl_find(avl, &search, &where);
929
930	ASSERT(aux != NULL);
931
932	if (--aux->aux_count == 0) {
933		avl_remove(avl, aux);
934		kmem_free(aux, sizeof (spa_aux_t));
935	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
936		aux->aux_pool = 0ULL;
937	}
938}
939
940boolean_t
941spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
942{
943	spa_aux_t search, *found;
944
945	search.aux_guid = guid;
946	found = avl_find(avl, &search, NULL);
947
948	if (pool) {
949		if (found)
950			*pool = found->aux_pool;
951		else
952			*pool = 0ULL;
953	}
954
955	if (refcnt) {
956		if (found)
957			*refcnt = found->aux_count;
958		else
959			*refcnt = 0;
960	}
961
962	return (found != NULL);
963}
964
965void
966spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
967{
968	spa_aux_t search, *found;
969	avl_index_t where;
970
971	search.aux_guid = vd->vdev_guid;
972	found = avl_find(avl, &search, &where);
973	ASSERT(found != NULL);
974	ASSERT(found->aux_pool == 0ULL);
975
976	found->aux_pool = spa_guid(vd->vdev_spa);
977}
978
979/*
980 * Spares are tracked globally due to the following constraints:
981 *
982 * 	- A spare may be part of multiple pools.
983 * 	- A spare may be added to a pool even if it's actively in use within
984 *	  another pool.
985 * 	- A spare in use in any pool can only be the source of a replacement if
986 *	  the target is a spare in the same pool.
987 *
988 * We keep track of all spares on the system through the use of a reference
989 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
990 * spare, then we bump the reference count in the AVL tree.  In addition, we set
991 * the 'vdev_isspare' member to indicate that the device is a spare (active or
992 * inactive).  When a spare is made active (used to replace a device in the
993 * pool), we also keep track of which pool its been made a part of.
994 *
995 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
996 * called under the spa_namespace lock as part of vdev reconfiguration.  The
997 * separate spare lock exists for the status query path, which does not need to
998 * be completely consistent with respect to other vdev configuration changes.
999 */
1000
1001static int
1002spa_spare_compare(const void *a, const void *b)
1003{
1004	return (spa_aux_compare(a, b));
1005}
1006
1007void
1008spa_spare_add(vdev_t *vd)
1009{
1010	mutex_enter(&spa_spare_lock);
1011	ASSERT(!vd->vdev_isspare);
1012	spa_aux_add(vd, &spa_spare_avl);
1013	vd->vdev_isspare = B_TRUE;
1014	mutex_exit(&spa_spare_lock);
1015}
1016
1017void
1018spa_spare_remove(vdev_t *vd)
1019{
1020	mutex_enter(&spa_spare_lock);
1021	ASSERT(vd->vdev_isspare);
1022	spa_aux_remove(vd, &spa_spare_avl);
1023	vd->vdev_isspare = B_FALSE;
1024	mutex_exit(&spa_spare_lock);
1025}
1026
1027boolean_t
1028spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
1029{
1030	boolean_t found;
1031
1032	mutex_enter(&spa_spare_lock);
1033	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
1034	mutex_exit(&spa_spare_lock);
1035
1036	return (found);
1037}
1038
1039void
1040spa_spare_activate(vdev_t *vd)
1041{
1042	mutex_enter(&spa_spare_lock);
1043	ASSERT(vd->vdev_isspare);
1044	spa_aux_activate(vd, &spa_spare_avl);
1045	mutex_exit(&spa_spare_lock);
1046}
1047
1048/*
1049 * Level 2 ARC devices are tracked globally for the same reasons as spares.
1050 * Cache devices currently only support one pool per cache device, and so
1051 * for these devices the aux reference count is currently unused beyond 1.
1052 */
1053
1054static int
1055spa_l2cache_compare(const void *a, const void *b)
1056{
1057	return (spa_aux_compare(a, b));
1058}
1059
1060void
1061spa_l2cache_add(vdev_t *vd)
1062{
1063	mutex_enter(&spa_l2cache_lock);
1064	ASSERT(!vd->vdev_isl2cache);
1065	spa_aux_add(vd, &spa_l2cache_avl);
1066	vd->vdev_isl2cache = B_TRUE;
1067	mutex_exit(&spa_l2cache_lock);
1068}
1069
1070void
1071spa_l2cache_remove(vdev_t *vd)
1072{
1073	mutex_enter(&spa_l2cache_lock);
1074	ASSERT(vd->vdev_isl2cache);
1075	spa_aux_remove(vd, &spa_l2cache_avl);
1076	vd->vdev_isl2cache = B_FALSE;
1077	mutex_exit(&spa_l2cache_lock);
1078}
1079
1080boolean_t
1081spa_l2cache_exists(uint64_t guid, uint64_t *pool)
1082{
1083	boolean_t found;
1084
1085	mutex_enter(&spa_l2cache_lock);
1086	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
1087	mutex_exit(&spa_l2cache_lock);
1088
1089	return (found);
1090}
1091
1092void
1093spa_l2cache_activate(vdev_t *vd)
1094{
1095	mutex_enter(&spa_l2cache_lock);
1096	ASSERT(vd->vdev_isl2cache);
1097	spa_aux_activate(vd, &spa_l2cache_avl);
1098	mutex_exit(&spa_l2cache_lock);
1099}
1100
1101/*
1102 * ==========================================================================
1103 * SPA vdev locking
1104 * ==========================================================================
1105 */
1106
1107/*
1108 * Lock the given spa_t for the purpose of adding or removing a vdev.
1109 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
1110 * It returns the next transaction group for the spa_t.
1111 */
1112uint64_t
1113spa_vdev_enter(spa_t *spa)
1114{
1115	mutex_enter(&spa->spa_vdev_top_lock);
1116	mutex_enter(&spa_namespace_lock);
1117	return (spa_vdev_config_enter(spa));
1118}
1119
1120/*
1121 * Internal implementation for spa_vdev_enter().  Used when a vdev
1122 * operation requires multiple syncs (i.e. removing a device) while
1123 * keeping the spa_namespace_lock held.
1124 */
1125uint64_t
1126spa_vdev_config_enter(spa_t *spa)
1127{
1128	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1129
1130	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1131
1132	return (spa_last_synced_txg(spa) + 1);
1133}
1134
1135/*
1136 * Used in combination with spa_vdev_config_enter() to allow the syncing
1137 * of multiple transactions without releasing the spa_namespace_lock.
1138 */
1139void
1140spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1141{
1142	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1143
1144	int config_changed = B_FALSE;
1145
1146	ASSERT(txg > spa_last_synced_txg(spa));
1147
1148	spa->spa_pending_vdev = NULL;
1149
1150	/*
1151	 * Reassess the DTLs.
1152	 */
1153	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1154
1155	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1156		config_changed = B_TRUE;
1157		spa->spa_config_generation++;
1158	}
1159
1160	/*
1161	 * Verify the metaslab classes.
1162	 */
1163	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1164	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1165
1166	spa_config_exit(spa, SCL_ALL, spa);
1167
1168	/*
1169	 * Panic the system if the specified tag requires it.  This
1170	 * is useful for ensuring that configurations are updated
1171	 * transactionally.
1172	 */
1173	if (zio_injection_enabled)
1174		zio_handle_panic_injection(spa, tag, 0);
1175
1176	/*
1177	 * Note: this txg_wait_synced() is important because it ensures
1178	 * that there won't be more than one config change per txg.
1179	 * This allows us to use the txg as the generation number.
1180	 */
1181	if (error == 0)
1182		txg_wait_synced(spa->spa_dsl_pool, txg);
1183
1184	if (vd != NULL) {
1185		ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
1186		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1187		vdev_free(vd);
1188		spa_config_exit(spa, SCL_ALL, spa);
1189	}
1190
1191	/*
1192	 * If the config changed, update the config cache.
1193	 */
1194	if (config_changed)
1195		spa_config_sync(spa, B_FALSE, B_TRUE);
1196}
1197
1198/*
1199 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1200 * locking of spa_vdev_enter(), we also want make sure the transactions have
1201 * synced to disk, and then update the global configuration cache with the new
1202 * information.
1203 */
1204int
1205spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1206{
1207	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1208	mutex_exit(&spa_namespace_lock);
1209	mutex_exit(&spa->spa_vdev_top_lock);
1210
1211	return (error);
1212}
1213
1214/*
1215 * Lock the given spa_t for the purpose of changing vdev state.
1216 */
1217void
1218spa_vdev_state_enter(spa_t *spa, int oplocks)
1219{
1220	int locks = SCL_STATE_ALL | oplocks;
1221
1222	/*
1223	 * Root pools may need to read of the underlying devfs filesystem
1224	 * when opening up a vdev.  Unfortunately if we're holding the
1225	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1226	 * the read from the root filesystem.  Instead we "prefetch"
1227	 * the associated vnodes that we need prior to opening the
1228	 * underlying devices and cache them so that we can prevent
1229	 * any I/O when we are doing the actual open.
1230	 */
1231	if (spa_is_root(spa)) {
1232		int low = locks & ~(SCL_ZIO - 1);
1233		int high = locks & ~low;
1234
1235		spa_config_enter(spa, high, spa, RW_WRITER);
1236		vdev_hold(spa->spa_root_vdev);
1237		spa_config_enter(spa, low, spa, RW_WRITER);
1238	} else {
1239		spa_config_enter(spa, locks, spa, RW_WRITER);
1240	}
1241	spa->spa_vdev_locks = locks;
1242}
1243
1244int
1245spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1246{
1247	boolean_t config_changed = B_FALSE;
1248
1249	if (vd != NULL || error == 0)
1250		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1251		    0, 0, B_FALSE);
1252
1253	if (vd != NULL) {
1254		vdev_state_dirty(vd->vdev_top);
1255		config_changed = B_TRUE;
1256		spa->spa_config_generation++;
1257	}
1258
1259	if (spa_is_root(spa))
1260		vdev_rele(spa->spa_root_vdev);
1261
1262	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1263	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1264
1265	/*
1266	 * If anything changed, wait for it to sync.  This ensures that,
1267	 * from the system administrator's perspective, zpool(1M) commands
1268	 * are synchronous.  This is important for things like zpool offline:
1269	 * when the command completes, you expect no further I/O from ZFS.
1270	 */
1271	if (vd != NULL)
1272		txg_wait_synced(spa->spa_dsl_pool, 0);
1273
1274	/*
1275	 * If the config changed, update the config cache.
1276	 */
1277	if (config_changed) {
1278		mutex_enter(&spa_namespace_lock);
1279		spa_config_sync(spa, B_FALSE, B_TRUE);
1280		mutex_exit(&spa_namespace_lock);
1281	}
1282
1283	return (error);
1284}
1285
1286/*
1287 * ==========================================================================
1288 * Miscellaneous functions
1289 * ==========================================================================
1290 */
1291
1292void
1293spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
1294{
1295	if (!nvlist_exists(spa->spa_label_features, feature)) {
1296		fnvlist_add_boolean(spa->spa_label_features, feature);
1297		/*
1298		 * When we are creating the pool (tx_txg==TXG_INITIAL), we can't
1299		 * dirty the vdev config because lock SCL_CONFIG is not held.
1300		 * Thankfully, in this case we don't need to dirty the config
1301		 * because it will be written out anyway when we finish
1302		 * creating the pool.
1303		 */
1304		if (tx->tx_txg != TXG_INITIAL)
1305			vdev_config_dirty(spa->spa_root_vdev);
1306	}
1307}
1308
1309void
1310spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1311{
1312	if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
1313		vdev_config_dirty(spa->spa_root_vdev);
1314}
1315
1316/*
1317 * Rename a spa_t.
1318 */
1319int
1320spa_rename(const char *name, const char *newname)
1321{
1322	spa_t *spa;
1323	int err;
1324
1325	/*
1326	 * Lookup the spa_t and grab the config lock for writing.  We need to
1327	 * actually open the pool so that we can sync out the necessary labels.
1328	 * It's OK to call spa_open() with the namespace lock held because we
1329	 * allow recursive calls for other reasons.
1330	 */
1331	mutex_enter(&spa_namespace_lock);
1332	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1333		mutex_exit(&spa_namespace_lock);
1334		return (err);
1335	}
1336
1337	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1338
1339	avl_remove(&spa_namespace_avl, spa);
1340	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1341	avl_add(&spa_namespace_avl, spa);
1342
1343	/*
1344	 * Sync all labels to disk with the new names by marking the root vdev
1345	 * dirty and waiting for it to sync.  It will pick up the new pool name
1346	 * during the sync.
1347	 */
1348	vdev_config_dirty(spa->spa_root_vdev);
1349
1350	spa_config_exit(spa, SCL_ALL, FTAG);
1351
1352	txg_wait_synced(spa->spa_dsl_pool, 0);
1353
1354	/*
1355	 * Sync the updated config cache.
1356	 */
1357	spa_config_sync(spa, B_FALSE, B_TRUE);
1358
1359	spa_close(spa, FTAG);
1360
1361	mutex_exit(&spa_namespace_lock);
1362
1363	return (0);
1364}
1365
1366/*
1367 * Return the spa_t associated with given pool_guid, if it exists.  If
1368 * device_guid is non-zero, determine whether the pool exists *and* contains
1369 * a device with the specified device_guid.
1370 */
1371spa_t *
1372spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1373{
1374	spa_t *spa;
1375	avl_tree_t *t = &spa_namespace_avl;
1376
1377	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1378
1379	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1380		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1381			continue;
1382		if (spa->spa_root_vdev == NULL)
1383			continue;
1384		if (spa_guid(spa) == pool_guid) {
1385			if (device_guid == 0)
1386				break;
1387
1388			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1389			    device_guid) != NULL)
1390				break;
1391
1392			/*
1393			 * Check any devices we may be in the process of adding.
1394			 */
1395			if (spa->spa_pending_vdev) {
1396				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1397				    device_guid) != NULL)
1398					break;
1399			}
1400		}
1401	}
1402
1403	return (spa);
1404}
1405
1406/*
1407 * Determine whether a pool with the given pool_guid exists.
1408 */
1409boolean_t
1410spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1411{
1412	return (spa_by_guid(pool_guid, device_guid) != NULL);
1413}
1414
1415char *
1416spa_strdup(const char *s)
1417{
1418	size_t len;
1419	char *new;
1420
1421	len = strlen(s);
1422	new = kmem_alloc(len + 1, KM_SLEEP);
1423	bcopy(s, new, len);
1424	new[len] = '\0';
1425
1426	return (new);
1427}
1428
1429void
1430spa_strfree(char *s)
1431{
1432	kmem_free(s, strlen(s) + 1);
1433}
1434
1435uint64_t
1436spa_get_random(uint64_t range)
1437{
1438	uint64_t r;
1439
1440	ASSERT(range != 0);
1441
1442	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1443
1444	return (r % range);
1445}
1446
1447uint64_t
1448spa_generate_guid(spa_t *spa)
1449{
1450	uint64_t guid = spa_get_random(-1ULL);
1451
1452	if (spa != NULL) {
1453		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1454			guid = spa_get_random(-1ULL);
1455	} else {
1456		while (guid == 0 || spa_guid_exists(guid, 0))
1457			guid = spa_get_random(-1ULL);
1458	}
1459
1460	return (guid);
1461}
1462
1463void
1464snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
1465{
1466	char type[256];
1467	char *checksum = NULL;
1468	char *compress = NULL;
1469
1470	if (bp != NULL) {
1471		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1472			dmu_object_byteswap_t bswap =
1473			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1474			(void) snprintf(type, sizeof (type), "bswap %s %s",
1475			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1476			    "metadata" : "data",
1477			    dmu_ot_byteswap[bswap].ob_name);
1478		} else {
1479			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1480			    sizeof (type));
1481		}
1482		if (!BP_IS_EMBEDDED(bp)) {
1483			checksum =
1484			    zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1485		}
1486		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1487	}
1488
1489	SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
1490	    compress);
1491}
1492
1493void
1494spa_freeze(spa_t *spa)
1495{
1496	uint64_t freeze_txg = 0;
1497
1498	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1499	if (spa->spa_freeze_txg == UINT64_MAX) {
1500		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1501		spa->spa_freeze_txg = freeze_txg;
1502	}
1503	spa_config_exit(spa, SCL_ALL, FTAG);
1504	if (freeze_txg != 0)
1505		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1506}
1507
1508void
1509zfs_panic_recover(const char *fmt, ...)
1510{
1511	va_list adx;
1512
1513	va_start(adx, fmt);
1514	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1515	va_end(adx);
1516}
1517
1518/*
1519 * This is a stripped-down version of strtoull, suitable only for converting
1520 * lowercase hexadecimal numbers that don't overflow.
1521 */
1522uint64_t
1523zfs_strtonum(const char *str, char **nptr)
1524{
1525	uint64_t val = 0;
1526	char c;
1527	int digit;
1528
1529	while ((c = *str) != '\0') {
1530		if (c >= '0' && c <= '9')
1531			digit = c - '0';
1532		else if (c >= 'a' && c <= 'f')
1533			digit = 10 + c - 'a';
1534		else
1535			break;
1536
1537		val *= 16;
1538		val += digit;
1539
1540		str++;
1541	}
1542
1543	if (nptr)
1544		*nptr = (char *)str;
1545
1546	return (val);
1547}
1548
1549/*
1550 * ==========================================================================
1551 * Accessor functions
1552 * ==========================================================================
1553 */
1554
1555boolean_t
1556spa_shutting_down(spa_t *spa)
1557{
1558	return (spa->spa_async_suspended);
1559}
1560
1561dsl_pool_t *
1562spa_get_dsl(spa_t *spa)
1563{
1564	return (spa->spa_dsl_pool);
1565}
1566
1567boolean_t
1568spa_is_initializing(spa_t *spa)
1569{
1570	return (spa->spa_is_initializing);
1571}
1572
1573blkptr_t *
1574spa_get_rootblkptr(spa_t *spa)
1575{
1576	return (&spa->spa_ubsync.ub_rootbp);
1577}
1578
1579void
1580spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1581{
1582	spa->spa_uberblock.ub_rootbp = *bp;
1583}
1584
1585void
1586spa_altroot(spa_t *spa, char *buf, size_t buflen)
1587{
1588	if (spa->spa_root == NULL)
1589		buf[0] = '\0';
1590	else
1591		(void) strncpy(buf, spa->spa_root, buflen);
1592}
1593
1594int
1595spa_sync_pass(spa_t *spa)
1596{
1597	return (spa->spa_sync_pass);
1598}
1599
1600char *
1601spa_name(spa_t *spa)
1602{
1603	return (spa->spa_name);
1604}
1605
1606uint64_t
1607spa_guid(spa_t *spa)
1608{
1609	dsl_pool_t *dp = spa_get_dsl(spa);
1610	uint64_t guid;
1611
1612	/*
1613	 * If we fail to parse the config during spa_load(), we can go through
1614	 * the error path (which posts an ereport) and end up here with no root
1615	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1616	 * this case.
1617	 */
1618	if (spa->spa_root_vdev == NULL)
1619		return (spa->spa_config_guid);
1620
1621	guid = spa->spa_last_synced_guid != 0 ?
1622	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1623
1624	/*
1625	 * Return the most recently synced out guid unless we're
1626	 * in syncing context.
1627	 */
1628	if (dp && dsl_pool_sync_context(dp))
1629		return (spa->spa_root_vdev->vdev_guid);
1630	else
1631		return (guid);
1632}
1633
1634uint64_t
1635spa_load_guid(spa_t *spa)
1636{
1637	/*
1638	 * This is a GUID that exists solely as a reference for the
1639	 * purposes of the arc.  It is generated at load time, and
1640	 * is never written to persistent storage.
1641	 */
1642	return (spa->spa_load_guid);
1643}
1644
1645uint64_t
1646spa_last_synced_txg(spa_t *spa)
1647{
1648	return (spa->spa_ubsync.ub_txg);
1649}
1650
1651uint64_t
1652spa_first_txg(spa_t *spa)
1653{
1654	return (spa->spa_first_txg);
1655}
1656
1657uint64_t
1658spa_syncing_txg(spa_t *spa)
1659{
1660	return (spa->spa_syncing_txg);
1661}
1662
1663pool_state_t
1664spa_state(spa_t *spa)
1665{
1666	return (spa->spa_state);
1667}
1668
1669spa_load_state_t
1670spa_load_state(spa_t *spa)
1671{
1672	return (spa->spa_load_state);
1673}
1674
1675uint64_t
1676spa_freeze_txg(spa_t *spa)
1677{
1678	return (spa->spa_freeze_txg);
1679}
1680
1681/* ARGSUSED */
1682uint64_t
1683spa_get_asize(spa_t *spa, uint64_t lsize)
1684{
1685	return (lsize * spa_asize_inflation);
1686}
1687
1688/*
1689 * Return the amount of slop space in bytes.  It is 1/32 of the pool (3.2%),
1690 * or at least 32MB.
1691 *
1692 * See the comment above spa_slop_shift for details.
1693 */
1694uint64_t
1695spa_get_slop_space(spa_t *spa) {
1696	uint64_t space = spa_get_dspace(spa);
1697	return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1));
1698}
1699
1700uint64_t
1701spa_get_dspace(spa_t *spa)
1702{
1703	return (spa->spa_dspace);
1704}
1705
1706void
1707spa_update_dspace(spa_t *spa)
1708{
1709	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1710	    ddt_get_dedup_dspace(spa);
1711}
1712
1713/*
1714 * Return the failure mode that has been set to this pool. The default
1715 * behavior will be to block all I/Os when a complete failure occurs.
1716 */
1717uint8_t
1718spa_get_failmode(spa_t *spa)
1719{
1720	return (spa->spa_failmode);
1721}
1722
1723boolean_t
1724spa_suspended(spa_t *spa)
1725{
1726	return (spa->spa_suspended);
1727}
1728
1729uint64_t
1730spa_version(spa_t *spa)
1731{
1732	return (spa->spa_ubsync.ub_version);
1733}
1734
1735boolean_t
1736spa_deflate(spa_t *spa)
1737{
1738	return (spa->spa_deflate);
1739}
1740
1741metaslab_class_t *
1742spa_normal_class(spa_t *spa)
1743{
1744	return (spa->spa_normal_class);
1745}
1746
1747metaslab_class_t *
1748spa_log_class(spa_t *spa)
1749{
1750	return (spa->spa_log_class);
1751}
1752
1753void
1754spa_evicting_os_register(spa_t *spa, objset_t *os)
1755{
1756	mutex_enter(&spa->spa_evicting_os_lock);
1757	list_insert_head(&spa->spa_evicting_os_list, os);
1758	mutex_exit(&spa->spa_evicting_os_lock);
1759}
1760
1761void
1762spa_evicting_os_deregister(spa_t *spa, objset_t *os)
1763{
1764	mutex_enter(&spa->spa_evicting_os_lock);
1765	list_remove(&spa->spa_evicting_os_list, os);
1766	cv_broadcast(&spa->spa_evicting_os_cv);
1767	mutex_exit(&spa->spa_evicting_os_lock);
1768}
1769
1770void
1771spa_evicting_os_wait(spa_t *spa)
1772{
1773	mutex_enter(&spa->spa_evicting_os_lock);
1774	while (!list_is_empty(&spa->spa_evicting_os_list))
1775		cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
1776	mutex_exit(&spa->spa_evicting_os_lock);
1777
1778	dmu_buf_user_evict_wait();
1779}
1780
1781int
1782spa_max_replication(spa_t *spa)
1783{
1784	/*
1785	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1786	 * handle BPs with more than one DVA allocated.  Set our max
1787	 * replication level accordingly.
1788	 */
1789	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1790		return (1);
1791	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1792}
1793
1794int
1795spa_prev_software_version(spa_t *spa)
1796{
1797	return (spa->spa_prev_software_version);
1798}
1799
1800uint64_t
1801spa_deadman_synctime(spa_t *spa)
1802{
1803	return (spa->spa_deadman_synctime);
1804}
1805
1806uint64_t
1807dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1808{
1809	uint64_t asize = DVA_GET_ASIZE(dva);
1810	uint64_t dsize = asize;
1811
1812	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1813
1814	if (asize != 0 && spa->spa_deflate) {
1815		uint64_t vdev = DVA_GET_VDEV(dva);
1816		vdev_t *vd = vdev_lookup_top(spa, vdev);
1817		if (vd == NULL) {
1818			panic(
1819			    "dva_get_dsize_sync(): bad DVA %llu:%llu",
1820			    (u_longlong_t)vdev, (u_longlong_t)asize);
1821		}
1822		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1823	}
1824
1825	return (dsize);
1826}
1827
1828uint64_t
1829bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1830{
1831	uint64_t dsize = 0;
1832
1833	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1834		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1835
1836	return (dsize);
1837}
1838
1839uint64_t
1840bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1841{
1842	uint64_t dsize = 0;
1843
1844	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1845
1846	for (int d = 0; d < BP_GET_NDVAS(bp); d++)
1847		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1848
1849	spa_config_exit(spa, SCL_VDEV, FTAG);
1850
1851	return (dsize);
1852}
1853
1854/*
1855 * ==========================================================================
1856 * Initialization and Termination
1857 * ==========================================================================
1858 */
1859
1860static int
1861spa_name_compare(const void *a1, const void *a2)
1862{
1863	const spa_t *s1 = a1;
1864	const spa_t *s2 = a2;
1865	int s;
1866
1867	s = strcmp(s1->spa_name, s2->spa_name);
1868	if (s > 0)
1869		return (1);
1870	if (s < 0)
1871		return (-1);
1872	return (0);
1873}
1874
1875int
1876spa_busy(void)
1877{
1878	return (spa_active_count);
1879}
1880
1881void
1882spa_boot_init()
1883{
1884	spa_config_load();
1885}
1886
1887#ifdef _KERNEL
1888EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0);
1889#endif
1890
1891void
1892spa_init(int mode)
1893{
1894	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1895	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1896	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1897	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1898
1899	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1900	    offsetof(spa_t, spa_avl));
1901
1902	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1903	    offsetof(spa_aux_t, aux_avl));
1904
1905	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1906	    offsetof(spa_aux_t, aux_avl));
1907
1908	spa_mode_global = mode;
1909
1910#ifdef illumos
1911#ifdef _KERNEL
1912	spa_arch_init();
1913#else
1914	if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1915		arc_procfd = open("/proc/self/ctl", O_WRONLY);
1916		if (arc_procfd == -1) {
1917			perror("could not enable watchpoints: "
1918			    "opening /proc/self/ctl failed: ");
1919		} else {
1920			arc_watch = B_TRUE;
1921		}
1922	}
1923#endif
1924#endif /* illumos */
1925	refcount_sysinit();
1926	unique_init();
1927	range_tree_init();
1928	zio_init();
1929	lz4_init();
1930	dmu_init();
1931	zil_init();
1932	vdev_cache_stat_init();
1933	zfs_prop_init();
1934	zpool_prop_init();
1935	zpool_feature_init();
1936	spa_config_load();
1937	l2arc_start();
1938#ifndef illumos
1939#ifdef _KERNEL
1940	zfs_deadman_init();
1941#endif
1942#endif	/* !illumos */
1943}
1944
1945void
1946spa_fini(void)
1947{
1948	l2arc_stop();
1949
1950	spa_evict_all();
1951
1952	vdev_cache_stat_fini();
1953	zil_fini();
1954	dmu_fini();
1955	lz4_fini();
1956	zio_fini();
1957	range_tree_fini();
1958	unique_fini();
1959	refcount_fini();
1960
1961	avl_destroy(&spa_namespace_avl);
1962	avl_destroy(&spa_spare_avl);
1963	avl_destroy(&spa_l2cache_avl);
1964
1965	cv_destroy(&spa_namespace_cv);
1966	mutex_destroy(&spa_namespace_lock);
1967	mutex_destroy(&spa_spare_lock);
1968	mutex_destroy(&spa_l2cache_lock);
1969}
1970
1971/*
1972 * Return whether this pool has slogs. No locking needed.
1973 * It's not a problem if the wrong answer is returned as it's only for
1974 * performance and not correctness
1975 */
1976boolean_t
1977spa_has_slogs(spa_t *spa)
1978{
1979	return (spa->spa_log_class->mc_rotor != NULL);
1980}
1981
1982spa_log_state_t
1983spa_get_log_state(spa_t *spa)
1984{
1985	return (spa->spa_log_state);
1986}
1987
1988void
1989spa_set_log_state(spa_t *spa, spa_log_state_t state)
1990{
1991	spa->spa_log_state = state;
1992}
1993
1994boolean_t
1995spa_is_root(spa_t *spa)
1996{
1997	return (spa->spa_is_root);
1998}
1999
2000boolean_t
2001spa_writeable(spa_t *spa)
2002{
2003	return (!!(spa->spa_mode & FWRITE));
2004}
2005
2006/*
2007 * Returns true if there is a pending sync task in any of the current
2008 * syncing txg, the current quiescing txg, or the current open txg.
2009 */
2010boolean_t
2011spa_has_pending_synctask(spa_t *spa)
2012{
2013	return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks));
2014}
2015
2016int
2017spa_mode(spa_t *spa)
2018{
2019	return (spa->spa_mode);
2020}
2021
2022uint64_t
2023spa_bootfs(spa_t *spa)
2024{
2025	return (spa->spa_bootfs);
2026}
2027
2028uint64_t
2029spa_delegation(spa_t *spa)
2030{
2031	return (spa->spa_delegation);
2032}
2033
2034objset_t *
2035spa_meta_objset(spa_t *spa)
2036{
2037	return (spa->spa_meta_objset);
2038}
2039
2040enum zio_checksum
2041spa_dedup_checksum(spa_t *spa)
2042{
2043	return (spa->spa_dedup_checksum);
2044}
2045
2046/*
2047 * Reset pool scan stat per scan pass (or reboot).
2048 */
2049void
2050spa_scan_stat_init(spa_t *spa)
2051{
2052	/* data not stored on disk */
2053	spa->spa_scan_pass_start = gethrestime_sec();
2054	spa->spa_scan_pass_exam = 0;
2055	vdev_scan_stat_init(spa->spa_root_vdev);
2056}
2057
2058/*
2059 * Get scan stats for zpool status reports
2060 */
2061int
2062spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
2063{
2064	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
2065
2066	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
2067		return (SET_ERROR(ENOENT));
2068	bzero(ps, sizeof (pool_scan_stat_t));
2069
2070	/* data stored on disk */
2071	ps->pss_func = scn->scn_phys.scn_func;
2072	ps->pss_start_time = scn->scn_phys.scn_start_time;
2073	ps->pss_end_time = scn->scn_phys.scn_end_time;
2074	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
2075	ps->pss_examined = scn->scn_phys.scn_examined;
2076	ps->pss_to_process = scn->scn_phys.scn_to_process;
2077	ps->pss_processed = scn->scn_phys.scn_processed;
2078	ps->pss_errors = scn->scn_phys.scn_errors;
2079	ps->pss_state = scn->scn_phys.scn_state;
2080
2081	/* data not stored on disk */
2082	ps->pss_pass_start = spa->spa_scan_pass_start;
2083	ps->pss_pass_exam = spa->spa_scan_pass_exam;
2084
2085	return (0);
2086}
2087
2088boolean_t
2089spa_debug_enabled(spa_t *spa)
2090{
2091	return (spa->spa_debug);
2092}
2093
2094int
2095spa_maxblocksize(spa_t *spa)
2096{
2097	if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
2098		return (SPA_MAXBLOCKSIZE);
2099	else
2100		return (SPA_OLD_MAXBLOCKSIZE);
2101}
2102