spa_misc.c revision 260763
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
25 * Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
26 */
27
28#include <sys/zfs_context.h>
29#include <sys/spa_impl.h>
30#include <sys/spa_boot.h>
31#include <sys/zio.h>
32#include <sys/zio_checksum.h>
33#include <sys/zio_compress.h>
34#include <sys/dmu.h>
35#include <sys/dmu_tx.h>
36#include <sys/zap.h>
37#include <sys/zil.h>
38#include <sys/vdev_impl.h>
39#include <sys/metaslab.h>
40#include <sys/uberblock_impl.h>
41#include <sys/txg.h>
42#include <sys/avl.h>
43#include <sys/unique.h>
44#include <sys/dsl_pool.h>
45#include <sys/dsl_dir.h>
46#include <sys/dsl_prop.h>
47#include <sys/dsl_scan.h>
48#include <sys/fs/zfs.h>
49#include <sys/metaslab_impl.h>
50#include <sys/arc.h>
51#include <sys/ddt.h>
52#include "zfs_prop.h"
53#include "zfeature_common.h"
54
55/*
56 * SPA locking
57 *
58 * There are four basic locks for managing spa_t structures:
59 *
60 * spa_namespace_lock (global mutex)
61 *
62 *	This lock must be acquired to do any of the following:
63 *
64 *		- Lookup a spa_t by name
65 *		- Add or remove a spa_t from the namespace
66 *		- Increase spa_refcount from non-zero
67 *		- Check if spa_refcount is zero
68 *		- Rename a spa_t
69 *		- add/remove/attach/detach devices
70 *		- Held for the duration of create/destroy/import/export
71 *
72 *	It does not need to handle recursion.  A create or destroy may
73 *	reference objects (files or zvols) in other pools, but by
74 *	definition they must have an existing reference, and will never need
75 *	to lookup a spa_t by name.
76 *
77 * spa_refcount (per-spa refcount_t protected by mutex)
78 *
79 *	This reference count keep track of any active users of the spa_t.  The
80 *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
81 *	the refcount is never really 'zero' - opening a pool implicitly keeps
82 *	some references in the DMU.  Internally we check against spa_minref, but
83 *	present the image of a zero/non-zero value to consumers.
84 *
85 * spa_config_lock[] (per-spa array of rwlocks)
86 *
87 *	This protects the spa_t from config changes, and must be held in
88 *	the following circumstances:
89 *
90 *		- RW_READER to perform I/O to the spa
91 *		- RW_WRITER to change the vdev config
92 *
93 * The locking order is fairly straightforward:
94 *
95 *		spa_namespace_lock	->	spa_refcount
96 *
97 *	The namespace lock must be acquired to increase the refcount from 0
98 *	or to check if it is zero.
99 *
100 *		spa_refcount		->	spa_config_lock[]
101 *
102 *	There must be at least one valid reference on the spa_t to acquire
103 *	the config lock.
104 *
105 *		spa_namespace_lock	->	spa_config_lock[]
106 *
107 *	The namespace lock must always be taken before the config lock.
108 *
109 *
110 * The spa_namespace_lock can be acquired directly and is globally visible.
111 *
112 * The namespace is manipulated using the following functions, all of which
113 * require the spa_namespace_lock to be held.
114 *
115 *	spa_lookup()		Lookup a spa_t by name.
116 *
117 *	spa_add()		Create a new spa_t in the namespace.
118 *
119 *	spa_remove()		Remove a spa_t from the namespace.  This also
120 *				frees up any memory associated with the spa_t.
121 *
122 *	spa_next()		Returns the next spa_t in the system, or the
123 *				first if NULL is passed.
124 *
125 *	spa_evict_all()		Shutdown and remove all spa_t structures in
126 *				the system.
127 *
128 *	spa_guid_exists()	Determine whether a pool/device guid exists.
129 *
130 * The spa_refcount is manipulated using the following functions:
131 *
132 *	spa_open_ref()		Adds a reference to the given spa_t.  Must be
133 *				called with spa_namespace_lock held if the
134 *				refcount is currently zero.
135 *
136 *	spa_close()		Remove a reference from the spa_t.  This will
137 *				not free the spa_t or remove it from the
138 *				namespace.  No locking is required.
139 *
140 *	spa_refcount_zero()	Returns true if the refcount is currently
141 *				zero.  Must be called with spa_namespace_lock
142 *				held.
143 *
144 * The spa_config_lock[] is an array of rwlocks, ordered as follows:
145 * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
146 * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
147 *
148 * To read the configuration, it suffices to hold one of these locks as reader.
149 * To modify the configuration, you must hold all locks as writer.  To modify
150 * vdev state without altering the vdev tree's topology (e.g. online/offline),
151 * you must hold SCL_STATE and SCL_ZIO as writer.
152 *
153 * We use these distinct config locks to avoid recursive lock entry.
154 * For example, spa_sync() (which holds SCL_CONFIG as reader) induces
155 * block allocations (SCL_ALLOC), which may require reading space maps
156 * from disk (dmu_read() -> zio_read() -> SCL_ZIO).
157 *
158 * The spa config locks cannot be normal rwlocks because we need the
159 * ability to hand off ownership.  For example, SCL_ZIO is acquired
160 * by the issuing thread and later released by an interrupt thread.
161 * They do, however, obey the usual write-wanted semantics to prevent
162 * writer (i.e. system administrator) starvation.
163 *
164 * The lock acquisition rules are as follows:
165 *
166 * SCL_CONFIG
167 *	Protects changes to the vdev tree topology, such as vdev
168 *	add/remove/attach/detach.  Protects the dirty config list
169 *	(spa_config_dirty_list) and the set of spares and l2arc devices.
170 *
171 * SCL_STATE
172 *	Protects changes to pool state and vdev state, such as vdev
173 *	online/offline/fault/degrade/clear.  Protects the dirty state list
174 *	(spa_state_dirty_list) and global pool state (spa_state).
175 *
176 * SCL_ALLOC
177 *	Protects changes to metaslab groups and classes.
178 *	Held as reader by metaslab_alloc() and metaslab_claim().
179 *
180 * SCL_ZIO
181 *	Held by bp-level zios (those which have no io_vd upon entry)
182 *	to prevent changes to the vdev tree.  The bp-level zio implicitly
183 *	protects all of its vdev child zios, which do not hold SCL_ZIO.
184 *
185 * SCL_FREE
186 *	Protects changes to metaslab groups and classes.
187 *	Held as reader by metaslab_free().  SCL_FREE is distinct from
188 *	SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
189 *	blocks in zio_done() while another i/o that holds either
190 *	SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
191 *
192 * SCL_VDEV
193 *	Held as reader to prevent changes to the vdev tree during trivial
194 *	inquiries such as bp_get_dsize().  SCL_VDEV is distinct from the
195 *	other locks, and lower than all of them, to ensure that it's safe
196 *	to acquire regardless of caller context.
197 *
198 * In addition, the following rules apply:
199 *
200 * (a)	spa_props_lock protects pool properties, spa_config and spa_config_list.
201 *	The lock ordering is SCL_CONFIG > spa_props_lock.
202 *
203 * (b)	I/O operations on leaf vdevs.  For any zio operation that takes
204 *	an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
205 *	or zio_write_phys() -- the caller must ensure that the config cannot
206 *	cannot change in the interim, and that the vdev cannot be reopened.
207 *	SCL_STATE as reader suffices for both.
208 *
209 * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
210 *
211 *	spa_vdev_enter()	Acquire the namespace lock and the config lock
212 *				for writing.
213 *
214 *	spa_vdev_exit()		Release the config lock, wait for all I/O
215 *				to complete, sync the updated configs to the
216 *				cache, and release the namespace lock.
217 *
218 * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
219 * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
220 * locking is, always, based on spa_namespace_lock and spa_config_lock[].
221 *
222 * spa_rename() is also implemented within this file since it requires
223 * manipulation of the namespace.
224 */
225
226static avl_tree_t spa_namespace_avl;
227kmutex_t spa_namespace_lock;
228static kcondvar_t spa_namespace_cv;
229static int spa_active_count;
230int spa_max_replication_override = SPA_DVAS_PER_BP;
231
232static kmutex_t spa_spare_lock;
233static avl_tree_t spa_spare_avl;
234static kmutex_t spa_l2cache_lock;
235static avl_tree_t spa_l2cache_avl;
236
237kmem_cache_t *spa_buffer_pool;
238int spa_mode_global;
239
240#ifdef ZFS_DEBUG
241/* Everything except dprintf and spa is on by default in debug builds */
242int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA);
243#else
244int zfs_flags = 0;
245#endif
246SYSCTL_DECL(_debug);
247TUNABLE_INT("debug.zfs_flags", &zfs_flags);
248SYSCTL_INT(_debug, OID_AUTO, zfs_flags, CTLFLAG_RWTUN, &zfs_flags, 0,
249    "ZFS debug flags.");
250
251/*
252 * zfs_recover can be set to nonzero to attempt to recover from
253 * otherwise-fatal errors, typically caused by on-disk corruption.  When
254 * set, calls to zfs_panic_recover() will turn into warning messages.
255 */
256int zfs_recover = 0;
257SYSCTL_DECL(_vfs_zfs);
258TUNABLE_INT("vfs.zfs.recover", &zfs_recover);
259SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0,
260    "Try to recover from otherwise-fatal errors.");
261
262/*
263 * Expiration time in milliseconds. This value has two meanings. First it is
264 * used to determine when the spa_deadman() logic should fire. By default the
265 * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds.
266 * Secondly, the value determines if an I/O is considered "hung". Any I/O that
267 * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
268 * in a system panic.
269 */
270uint64_t zfs_deadman_synctime_ms = 1000000ULL;
271TUNABLE_QUAD("vfs.zfs.deadman_synctime_ms", &zfs_deadman_synctime_ms);
272SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_synctime_ms, CTLFLAG_RDTUN,
273    &zfs_deadman_synctime_ms, 0,
274    "Stalled ZFS I/O expiration time in milliseconds");
275
276/*
277 * Check time in milliseconds. This defines the frequency at which we check
278 * for hung I/O.
279 */
280uint64_t zfs_deadman_checktime_ms = 5000ULL;
281TUNABLE_QUAD("vfs.zfs.deadman_checktime_ms", &zfs_deadman_checktime_ms);
282SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, deadman_checktime_ms, CTLFLAG_RDTUN,
283    &zfs_deadman_checktime_ms, 0,
284    "Period of checks for stalled ZFS I/O in milliseconds");
285
286/*
287 * Default value of -1 for zfs_deadman_enabled is resolved in
288 * zfs_deadman_init()
289 */
290int zfs_deadman_enabled = -1;
291TUNABLE_INT("vfs.zfs.deadman_enabled", &zfs_deadman_enabled);
292SYSCTL_INT(_vfs_zfs, OID_AUTO, deadman_enabled, CTLFLAG_RDTUN,
293    &zfs_deadman_enabled, 0, "Kernel panic on stalled ZFS I/O");
294
295/*
296 * The worst case is single-sector max-parity RAID-Z blocks, in which
297 * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
298 * times the size; so just assume that.  Add to this the fact that
299 * we can have up to 3 DVAs per bp, and one more factor of 2 because
300 * the block may be dittoed with up to 3 DVAs by ddt_sync().  All together,
301 * the worst case is:
302 *     (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
303 */
304int spa_asize_inflation = 24;
305
306#ifndef illumos
307#ifdef _KERNEL
308static void
309zfs_deadman_init()
310{
311	/*
312	 * If we are not i386 or amd64 or in a virtual machine,
313	 * disable ZFS deadman thread by default
314	 */
315	if (zfs_deadman_enabled == -1) {
316#if defined(__amd64__) || defined(__i386__)
317		zfs_deadman_enabled = (vm_guest == VM_GUEST_NO) ? 1 : 0;
318#else
319		zfs_deadman_enabled = 0;
320#endif
321	}
322}
323#endif	/* _KERNEL */
324#endif	/* !illumos */
325
326/*
327 * ==========================================================================
328 * SPA config locking
329 * ==========================================================================
330 */
331static void
332spa_config_lock_init(spa_t *spa)
333{
334	for (int i = 0; i < SCL_LOCKS; i++) {
335		spa_config_lock_t *scl = &spa->spa_config_lock[i];
336		mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
337		cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
338		refcount_create_untracked(&scl->scl_count);
339		scl->scl_writer = NULL;
340		scl->scl_write_wanted = 0;
341	}
342}
343
344static void
345spa_config_lock_destroy(spa_t *spa)
346{
347	for (int i = 0; i < SCL_LOCKS; i++) {
348		spa_config_lock_t *scl = &spa->spa_config_lock[i];
349		mutex_destroy(&scl->scl_lock);
350		cv_destroy(&scl->scl_cv);
351		refcount_destroy(&scl->scl_count);
352		ASSERT(scl->scl_writer == NULL);
353		ASSERT(scl->scl_write_wanted == 0);
354	}
355}
356
357int
358spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
359{
360	for (int i = 0; i < SCL_LOCKS; i++) {
361		spa_config_lock_t *scl = &spa->spa_config_lock[i];
362		if (!(locks & (1 << i)))
363			continue;
364		mutex_enter(&scl->scl_lock);
365		if (rw == RW_READER) {
366			if (scl->scl_writer || scl->scl_write_wanted) {
367				mutex_exit(&scl->scl_lock);
368				spa_config_exit(spa, locks ^ (1 << i), tag);
369				return (0);
370			}
371		} else {
372			ASSERT(scl->scl_writer != curthread);
373			if (!refcount_is_zero(&scl->scl_count)) {
374				mutex_exit(&scl->scl_lock);
375				spa_config_exit(spa, locks ^ (1 << i), tag);
376				return (0);
377			}
378			scl->scl_writer = curthread;
379		}
380		(void) refcount_add(&scl->scl_count, tag);
381		mutex_exit(&scl->scl_lock);
382	}
383	return (1);
384}
385
386void
387spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
388{
389	int wlocks_held = 0;
390
391	ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
392
393	for (int i = 0; i < SCL_LOCKS; i++) {
394		spa_config_lock_t *scl = &spa->spa_config_lock[i];
395		if (scl->scl_writer == curthread)
396			wlocks_held |= (1 << i);
397		if (!(locks & (1 << i)))
398			continue;
399		mutex_enter(&scl->scl_lock);
400		if (rw == RW_READER) {
401			while (scl->scl_writer || scl->scl_write_wanted) {
402				cv_wait(&scl->scl_cv, &scl->scl_lock);
403			}
404		} else {
405			ASSERT(scl->scl_writer != curthread);
406			while (!refcount_is_zero(&scl->scl_count)) {
407				scl->scl_write_wanted++;
408				cv_wait(&scl->scl_cv, &scl->scl_lock);
409				scl->scl_write_wanted--;
410			}
411			scl->scl_writer = curthread;
412		}
413		(void) refcount_add(&scl->scl_count, tag);
414		mutex_exit(&scl->scl_lock);
415	}
416	ASSERT(wlocks_held <= locks);
417}
418
419void
420spa_config_exit(spa_t *spa, int locks, void *tag)
421{
422	for (int i = SCL_LOCKS - 1; i >= 0; i--) {
423		spa_config_lock_t *scl = &spa->spa_config_lock[i];
424		if (!(locks & (1 << i)))
425			continue;
426		mutex_enter(&scl->scl_lock);
427		ASSERT(!refcount_is_zero(&scl->scl_count));
428		if (refcount_remove(&scl->scl_count, tag) == 0) {
429			ASSERT(scl->scl_writer == NULL ||
430			    scl->scl_writer == curthread);
431			scl->scl_writer = NULL;	/* OK in either case */
432			cv_broadcast(&scl->scl_cv);
433		}
434		mutex_exit(&scl->scl_lock);
435	}
436}
437
438int
439spa_config_held(spa_t *spa, int locks, krw_t rw)
440{
441	int locks_held = 0;
442
443	for (int i = 0; i < SCL_LOCKS; i++) {
444		spa_config_lock_t *scl = &spa->spa_config_lock[i];
445		if (!(locks & (1 << i)))
446			continue;
447		if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
448		    (rw == RW_WRITER && scl->scl_writer == curthread))
449			locks_held |= 1 << i;
450	}
451
452	return (locks_held);
453}
454
455/*
456 * ==========================================================================
457 * SPA namespace functions
458 * ==========================================================================
459 */
460
461/*
462 * Lookup the named spa_t in the AVL tree.  The spa_namespace_lock must be held.
463 * Returns NULL if no matching spa_t is found.
464 */
465spa_t *
466spa_lookup(const char *name)
467{
468	static spa_t search;	/* spa_t is large; don't allocate on stack */
469	spa_t *spa;
470	avl_index_t where;
471	char *cp;
472
473	ASSERT(MUTEX_HELD(&spa_namespace_lock));
474
475	(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
476
477	/*
478	 * If it's a full dataset name, figure out the pool name and
479	 * just use that.
480	 */
481	cp = strpbrk(search.spa_name, "/@");
482	if (cp != NULL)
483		*cp = '\0';
484
485	spa = avl_find(&spa_namespace_avl, &search, &where);
486
487	return (spa);
488}
489
490/*
491 * Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
492 * If the zfs_deadman_enabled flag is set then it inspects all vdev queues
493 * looking for potentially hung I/Os.
494 */
495void
496spa_deadman(void *arg)
497{
498	spa_t *spa = arg;
499
500	zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
501	    (gethrtime() - spa->spa_sync_starttime) / NANOSEC,
502	    ++spa->spa_deadman_calls);
503	if (zfs_deadman_enabled)
504		vdev_deadman(spa->spa_root_vdev);
505}
506
507/*
508 * Create an uninitialized spa_t with the given name.  Requires
509 * spa_namespace_lock.  The caller must ensure that the spa_t doesn't already
510 * exist by calling spa_lookup() first.
511 */
512spa_t *
513spa_add(const char *name, nvlist_t *config, const char *altroot)
514{
515	spa_t *spa;
516	spa_config_dirent_t *dp;
517#ifdef illumos
518	cyc_handler_t hdlr;
519	cyc_time_t when;
520#endif
521
522	ASSERT(MUTEX_HELD(&spa_namespace_lock));
523
524	spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
525
526	mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
527	mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
528	mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
529	mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
530	mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
531	mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
532	mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
533	mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
534	mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
535
536	cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
537	cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
538	cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
539	cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
540
541	for (int t = 0; t < TXG_SIZE; t++)
542		bplist_create(&spa->spa_free_bplist[t]);
543
544	(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
545	spa->spa_state = POOL_STATE_UNINITIALIZED;
546	spa->spa_freeze_txg = UINT64_MAX;
547	spa->spa_final_txg = UINT64_MAX;
548	spa->spa_load_max_txg = UINT64_MAX;
549	spa->spa_proc = &p0;
550	spa->spa_proc_state = SPA_PROC_NONE;
551
552#ifdef illumos
553	hdlr.cyh_func = spa_deadman;
554	hdlr.cyh_arg = spa;
555	hdlr.cyh_level = CY_LOW_LEVEL;
556#endif
557
558	spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
559
560#ifdef illumos
561	/*
562	 * This determines how often we need to check for hung I/Os after
563	 * the cyclic has already fired. Since checking for hung I/Os is
564	 * an expensive operation we don't want to check too frequently.
565	 * Instead wait for 5 seconds before checking again.
566	 */
567	when.cyt_interval = MSEC2NSEC(zfs_deadman_checktime_ms);
568	when.cyt_when = CY_INFINITY;
569	mutex_enter(&cpu_lock);
570	spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
571	mutex_exit(&cpu_lock);
572#else	/* !illumos */
573#ifdef _KERNEL
574	callout_init(&spa->spa_deadman_cycid, CALLOUT_MPSAFE);
575#endif
576#endif
577	refcount_create(&spa->spa_refcount);
578	spa_config_lock_init(spa);
579
580	avl_add(&spa_namespace_avl, spa);
581
582	/*
583	 * Set the alternate root, if there is one.
584	 */
585	if (altroot) {
586		spa->spa_root = spa_strdup(altroot);
587		spa_active_count++;
588	}
589
590	/*
591	 * Every pool starts with the default cachefile
592	 */
593	list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
594	    offsetof(spa_config_dirent_t, scd_link));
595
596	dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
597	dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
598	list_insert_head(&spa->spa_config_list, dp);
599
600	VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
601	    KM_SLEEP) == 0);
602
603	if (config != NULL) {
604		nvlist_t *features;
605
606		if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
607		    &features) == 0) {
608			VERIFY(nvlist_dup(features, &spa->spa_label_features,
609			    0) == 0);
610		}
611
612		VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
613	}
614
615	if (spa->spa_label_features == NULL) {
616		VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
617		    KM_SLEEP) == 0);
618	}
619
620	spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
621
622	return (spa);
623}
624
625/*
626 * Removes a spa_t from the namespace, freeing up any memory used.  Requires
627 * spa_namespace_lock.  This is called only after the spa_t has been closed and
628 * deactivated.
629 */
630void
631spa_remove(spa_t *spa)
632{
633	spa_config_dirent_t *dp;
634
635	ASSERT(MUTEX_HELD(&spa_namespace_lock));
636	ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
637
638	nvlist_free(spa->spa_config_splitting);
639
640	avl_remove(&spa_namespace_avl, spa);
641	cv_broadcast(&spa_namespace_cv);
642
643	if (spa->spa_root) {
644		spa_strfree(spa->spa_root);
645		spa_active_count--;
646	}
647
648	while ((dp = list_head(&spa->spa_config_list)) != NULL) {
649		list_remove(&spa->spa_config_list, dp);
650		if (dp->scd_path != NULL)
651			spa_strfree(dp->scd_path);
652		kmem_free(dp, sizeof (spa_config_dirent_t));
653	}
654
655	list_destroy(&spa->spa_config_list);
656
657	nvlist_free(spa->spa_label_features);
658	nvlist_free(spa->spa_load_info);
659	spa_config_set(spa, NULL);
660
661#ifdef illumos
662	mutex_enter(&cpu_lock);
663	if (spa->spa_deadman_cycid != CYCLIC_NONE)
664		cyclic_remove(spa->spa_deadman_cycid);
665	mutex_exit(&cpu_lock);
666	spa->spa_deadman_cycid = CYCLIC_NONE;
667#else	/* !illumos */
668#ifdef _KERNEL
669	callout_drain(&spa->spa_deadman_cycid);
670#endif
671#endif
672
673	refcount_destroy(&spa->spa_refcount);
674
675	spa_config_lock_destroy(spa);
676
677	for (int t = 0; t < TXG_SIZE; t++)
678		bplist_destroy(&spa->spa_free_bplist[t]);
679
680	cv_destroy(&spa->spa_async_cv);
681	cv_destroy(&spa->spa_proc_cv);
682	cv_destroy(&spa->spa_scrub_io_cv);
683	cv_destroy(&spa->spa_suspend_cv);
684
685	mutex_destroy(&spa->spa_async_lock);
686	mutex_destroy(&spa->spa_errlist_lock);
687	mutex_destroy(&spa->spa_errlog_lock);
688	mutex_destroy(&spa->spa_history_lock);
689	mutex_destroy(&spa->spa_proc_lock);
690	mutex_destroy(&spa->spa_props_lock);
691	mutex_destroy(&spa->spa_scrub_lock);
692	mutex_destroy(&spa->spa_suspend_lock);
693	mutex_destroy(&spa->spa_vdev_top_lock);
694
695	kmem_free(spa, sizeof (spa_t));
696}
697
698/*
699 * Given a pool, return the next pool in the namespace, or NULL if there is
700 * none.  If 'prev' is NULL, return the first pool.
701 */
702spa_t *
703spa_next(spa_t *prev)
704{
705	ASSERT(MUTEX_HELD(&spa_namespace_lock));
706
707	if (prev)
708		return (AVL_NEXT(&spa_namespace_avl, prev));
709	else
710		return (avl_first(&spa_namespace_avl));
711}
712
713/*
714 * ==========================================================================
715 * SPA refcount functions
716 * ==========================================================================
717 */
718
719/*
720 * Add a reference to the given spa_t.  Must have at least one reference, or
721 * have the namespace lock held.
722 */
723void
724spa_open_ref(spa_t *spa, void *tag)
725{
726	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
727	    MUTEX_HELD(&spa_namespace_lock));
728	(void) refcount_add(&spa->spa_refcount, tag);
729}
730
731/*
732 * Remove a reference to the given spa_t.  Must have at least one reference, or
733 * have the namespace lock held.
734 */
735void
736spa_close(spa_t *spa, void *tag)
737{
738	ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
739	    MUTEX_HELD(&spa_namespace_lock));
740	(void) refcount_remove(&spa->spa_refcount, tag);
741}
742
743/*
744 * Check to see if the spa refcount is zero.  Must be called with
745 * spa_namespace_lock held.  We really compare against spa_minref, which is the
746 * number of references acquired when opening a pool
747 */
748boolean_t
749spa_refcount_zero(spa_t *spa)
750{
751	ASSERT(MUTEX_HELD(&spa_namespace_lock));
752
753	return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
754}
755
756/*
757 * ==========================================================================
758 * SPA spare and l2cache tracking
759 * ==========================================================================
760 */
761
762/*
763 * Hot spares and cache devices are tracked using the same code below,
764 * for 'auxiliary' devices.
765 */
766
767typedef struct spa_aux {
768	uint64_t	aux_guid;
769	uint64_t	aux_pool;
770	avl_node_t	aux_avl;
771	int		aux_count;
772} spa_aux_t;
773
774static int
775spa_aux_compare(const void *a, const void *b)
776{
777	const spa_aux_t *sa = a;
778	const spa_aux_t *sb = b;
779
780	if (sa->aux_guid < sb->aux_guid)
781		return (-1);
782	else if (sa->aux_guid > sb->aux_guid)
783		return (1);
784	else
785		return (0);
786}
787
788void
789spa_aux_add(vdev_t *vd, avl_tree_t *avl)
790{
791	avl_index_t where;
792	spa_aux_t search;
793	spa_aux_t *aux;
794
795	search.aux_guid = vd->vdev_guid;
796	if ((aux = avl_find(avl, &search, &where)) != NULL) {
797		aux->aux_count++;
798	} else {
799		aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
800		aux->aux_guid = vd->vdev_guid;
801		aux->aux_count = 1;
802		avl_insert(avl, aux, where);
803	}
804}
805
806void
807spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
808{
809	spa_aux_t search;
810	spa_aux_t *aux;
811	avl_index_t where;
812
813	search.aux_guid = vd->vdev_guid;
814	aux = avl_find(avl, &search, &where);
815
816	ASSERT(aux != NULL);
817
818	if (--aux->aux_count == 0) {
819		avl_remove(avl, aux);
820		kmem_free(aux, sizeof (spa_aux_t));
821	} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
822		aux->aux_pool = 0ULL;
823	}
824}
825
826boolean_t
827spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
828{
829	spa_aux_t search, *found;
830
831	search.aux_guid = guid;
832	found = avl_find(avl, &search, NULL);
833
834	if (pool) {
835		if (found)
836			*pool = found->aux_pool;
837		else
838			*pool = 0ULL;
839	}
840
841	if (refcnt) {
842		if (found)
843			*refcnt = found->aux_count;
844		else
845			*refcnt = 0;
846	}
847
848	return (found != NULL);
849}
850
851void
852spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
853{
854	spa_aux_t search, *found;
855	avl_index_t where;
856
857	search.aux_guid = vd->vdev_guid;
858	found = avl_find(avl, &search, &where);
859	ASSERT(found != NULL);
860	ASSERT(found->aux_pool == 0ULL);
861
862	found->aux_pool = spa_guid(vd->vdev_spa);
863}
864
865/*
866 * Spares are tracked globally due to the following constraints:
867 *
868 * 	- A spare may be part of multiple pools.
869 * 	- A spare may be added to a pool even if it's actively in use within
870 *	  another pool.
871 * 	- A spare in use in any pool can only be the source of a replacement if
872 *	  the target is a spare in the same pool.
873 *
874 * We keep track of all spares on the system through the use of a reference
875 * counted AVL tree.  When a vdev is added as a spare, or used as a replacement
876 * spare, then we bump the reference count in the AVL tree.  In addition, we set
877 * the 'vdev_isspare' member to indicate that the device is a spare (active or
878 * inactive).  When a spare is made active (used to replace a device in the
879 * pool), we also keep track of which pool its been made a part of.
880 *
881 * The 'spa_spare_lock' protects the AVL tree.  These functions are normally
882 * called under the spa_namespace lock as part of vdev reconfiguration.  The
883 * separate spare lock exists for the status query path, which does not need to
884 * be completely consistent with respect to other vdev configuration changes.
885 */
886
887static int
888spa_spare_compare(const void *a, const void *b)
889{
890	return (spa_aux_compare(a, b));
891}
892
893void
894spa_spare_add(vdev_t *vd)
895{
896	mutex_enter(&spa_spare_lock);
897	ASSERT(!vd->vdev_isspare);
898	spa_aux_add(vd, &spa_spare_avl);
899	vd->vdev_isspare = B_TRUE;
900	mutex_exit(&spa_spare_lock);
901}
902
903void
904spa_spare_remove(vdev_t *vd)
905{
906	mutex_enter(&spa_spare_lock);
907	ASSERT(vd->vdev_isspare);
908	spa_aux_remove(vd, &spa_spare_avl);
909	vd->vdev_isspare = B_FALSE;
910	mutex_exit(&spa_spare_lock);
911}
912
913boolean_t
914spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
915{
916	boolean_t found;
917
918	mutex_enter(&spa_spare_lock);
919	found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
920	mutex_exit(&spa_spare_lock);
921
922	return (found);
923}
924
925void
926spa_spare_activate(vdev_t *vd)
927{
928	mutex_enter(&spa_spare_lock);
929	ASSERT(vd->vdev_isspare);
930	spa_aux_activate(vd, &spa_spare_avl);
931	mutex_exit(&spa_spare_lock);
932}
933
934/*
935 * Level 2 ARC devices are tracked globally for the same reasons as spares.
936 * Cache devices currently only support one pool per cache device, and so
937 * for these devices the aux reference count is currently unused beyond 1.
938 */
939
940static int
941spa_l2cache_compare(const void *a, const void *b)
942{
943	return (spa_aux_compare(a, b));
944}
945
946void
947spa_l2cache_add(vdev_t *vd)
948{
949	mutex_enter(&spa_l2cache_lock);
950	ASSERT(!vd->vdev_isl2cache);
951	spa_aux_add(vd, &spa_l2cache_avl);
952	vd->vdev_isl2cache = B_TRUE;
953	mutex_exit(&spa_l2cache_lock);
954}
955
956void
957spa_l2cache_remove(vdev_t *vd)
958{
959	mutex_enter(&spa_l2cache_lock);
960	ASSERT(vd->vdev_isl2cache);
961	spa_aux_remove(vd, &spa_l2cache_avl);
962	vd->vdev_isl2cache = B_FALSE;
963	mutex_exit(&spa_l2cache_lock);
964}
965
966boolean_t
967spa_l2cache_exists(uint64_t guid, uint64_t *pool)
968{
969	boolean_t found;
970
971	mutex_enter(&spa_l2cache_lock);
972	found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
973	mutex_exit(&spa_l2cache_lock);
974
975	return (found);
976}
977
978void
979spa_l2cache_activate(vdev_t *vd)
980{
981	mutex_enter(&spa_l2cache_lock);
982	ASSERT(vd->vdev_isl2cache);
983	spa_aux_activate(vd, &spa_l2cache_avl);
984	mutex_exit(&spa_l2cache_lock);
985}
986
987/*
988 * ==========================================================================
989 * SPA vdev locking
990 * ==========================================================================
991 */
992
993/*
994 * Lock the given spa_t for the purpose of adding or removing a vdev.
995 * Grabs the global spa_namespace_lock plus the spa config lock for writing.
996 * It returns the next transaction group for the spa_t.
997 */
998uint64_t
999spa_vdev_enter(spa_t *spa)
1000{
1001	mutex_enter(&spa->spa_vdev_top_lock);
1002	mutex_enter(&spa_namespace_lock);
1003	return (spa_vdev_config_enter(spa));
1004}
1005
1006/*
1007 * Internal implementation for spa_vdev_enter().  Used when a vdev
1008 * operation requires multiple syncs (i.e. removing a device) while
1009 * keeping the spa_namespace_lock held.
1010 */
1011uint64_t
1012spa_vdev_config_enter(spa_t *spa)
1013{
1014	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1015
1016	spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1017
1018	return (spa_last_synced_txg(spa) + 1);
1019}
1020
1021/*
1022 * Used in combination with spa_vdev_config_enter() to allow the syncing
1023 * of multiple transactions without releasing the spa_namespace_lock.
1024 */
1025void
1026spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
1027{
1028	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1029
1030	int config_changed = B_FALSE;
1031
1032	ASSERT(txg > spa_last_synced_txg(spa));
1033
1034	spa->spa_pending_vdev = NULL;
1035
1036	/*
1037	 * Reassess the DTLs.
1038	 */
1039	vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
1040
1041	if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
1042		config_changed = B_TRUE;
1043		spa->spa_config_generation++;
1044	}
1045
1046	/*
1047	 * Verify the metaslab classes.
1048	 */
1049	ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
1050	ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
1051
1052	spa_config_exit(spa, SCL_ALL, spa);
1053
1054	/*
1055	 * Panic the system if the specified tag requires it.  This
1056	 * is useful for ensuring that configurations are updated
1057	 * transactionally.
1058	 */
1059	if (zio_injection_enabled)
1060		zio_handle_panic_injection(spa, tag, 0);
1061
1062	/*
1063	 * Note: this txg_wait_synced() is important because it ensures
1064	 * that there won't be more than one config change per txg.
1065	 * This allows us to use the txg as the generation number.
1066	 */
1067	if (error == 0)
1068		txg_wait_synced(spa->spa_dsl_pool, txg);
1069
1070	if (vd != NULL) {
1071		ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
1072		spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
1073		vdev_free(vd);
1074		spa_config_exit(spa, SCL_ALL, spa);
1075	}
1076
1077	/*
1078	 * If the config changed, update the config cache.
1079	 */
1080	if (config_changed)
1081		spa_config_sync(spa, B_FALSE, B_TRUE);
1082}
1083
1084/*
1085 * Unlock the spa_t after adding or removing a vdev.  Besides undoing the
1086 * locking of spa_vdev_enter(), we also want make sure the transactions have
1087 * synced to disk, and then update the global configuration cache with the new
1088 * information.
1089 */
1090int
1091spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
1092{
1093	spa_vdev_config_exit(spa, vd, txg, error, FTAG);
1094	mutex_exit(&spa_namespace_lock);
1095	mutex_exit(&spa->spa_vdev_top_lock);
1096
1097	return (error);
1098}
1099
1100/*
1101 * Lock the given spa_t for the purpose of changing vdev state.
1102 */
1103void
1104spa_vdev_state_enter(spa_t *spa, int oplocks)
1105{
1106	int locks = SCL_STATE_ALL | oplocks;
1107
1108	/*
1109	 * Root pools may need to read of the underlying devfs filesystem
1110	 * when opening up a vdev.  Unfortunately if we're holding the
1111	 * SCL_ZIO lock it will result in a deadlock when we try to issue
1112	 * the read from the root filesystem.  Instead we "prefetch"
1113	 * the associated vnodes that we need prior to opening the
1114	 * underlying devices and cache them so that we can prevent
1115	 * any I/O when we are doing the actual open.
1116	 */
1117	if (spa_is_root(spa)) {
1118		int low = locks & ~(SCL_ZIO - 1);
1119		int high = locks & ~low;
1120
1121		spa_config_enter(spa, high, spa, RW_WRITER);
1122		vdev_hold(spa->spa_root_vdev);
1123		spa_config_enter(spa, low, spa, RW_WRITER);
1124	} else {
1125		spa_config_enter(spa, locks, spa, RW_WRITER);
1126	}
1127	spa->spa_vdev_locks = locks;
1128}
1129
1130int
1131spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
1132{
1133	boolean_t config_changed = B_FALSE;
1134
1135	if (vd != NULL || error == 0)
1136		vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
1137		    0, 0, B_FALSE);
1138
1139	if (vd != NULL) {
1140		vdev_state_dirty(vd->vdev_top);
1141		config_changed = B_TRUE;
1142		spa->spa_config_generation++;
1143	}
1144
1145	if (spa_is_root(spa))
1146		vdev_rele(spa->spa_root_vdev);
1147
1148	ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
1149	spa_config_exit(spa, spa->spa_vdev_locks, spa);
1150
1151	/*
1152	 * If anything changed, wait for it to sync.  This ensures that,
1153	 * from the system administrator's perspective, zpool(1M) commands
1154	 * are synchronous.  This is important for things like zpool offline:
1155	 * when the command completes, you expect no further I/O from ZFS.
1156	 */
1157	if (vd != NULL)
1158		txg_wait_synced(spa->spa_dsl_pool, 0);
1159
1160	/*
1161	 * If the config changed, update the config cache.
1162	 */
1163	if (config_changed) {
1164		mutex_enter(&spa_namespace_lock);
1165		spa_config_sync(spa, B_FALSE, B_TRUE);
1166		mutex_exit(&spa_namespace_lock);
1167	}
1168
1169	return (error);
1170}
1171
1172/*
1173 * ==========================================================================
1174 * Miscellaneous functions
1175 * ==========================================================================
1176 */
1177
1178void
1179spa_activate_mos_feature(spa_t *spa, const char *feature)
1180{
1181	(void) nvlist_add_boolean(spa->spa_label_features, feature);
1182	vdev_config_dirty(spa->spa_root_vdev);
1183}
1184
1185void
1186spa_deactivate_mos_feature(spa_t *spa, const char *feature)
1187{
1188	(void) nvlist_remove_all(spa->spa_label_features, feature);
1189	vdev_config_dirty(spa->spa_root_vdev);
1190}
1191
1192/*
1193 * Rename a spa_t.
1194 */
1195int
1196spa_rename(const char *name, const char *newname)
1197{
1198	spa_t *spa;
1199	int err;
1200
1201	/*
1202	 * Lookup the spa_t and grab the config lock for writing.  We need to
1203	 * actually open the pool so that we can sync out the necessary labels.
1204	 * It's OK to call spa_open() with the namespace lock held because we
1205	 * allow recursive calls for other reasons.
1206	 */
1207	mutex_enter(&spa_namespace_lock);
1208	if ((err = spa_open(name, &spa, FTAG)) != 0) {
1209		mutex_exit(&spa_namespace_lock);
1210		return (err);
1211	}
1212
1213	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1214
1215	avl_remove(&spa_namespace_avl, spa);
1216	(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
1217	avl_add(&spa_namespace_avl, spa);
1218
1219	/*
1220	 * Sync all labels to disk with the new names by marking the root vdev
1221	 * dirty and waiting for it to sync.  It will pick up the new pool name
1222	 * during the sync.
1223	 */
1224	vdev_config_dirty(spa->spa_root_vdev);
1225
1226	spa_config_exit(spa, SCL_ALL, FTAG);
1227
1228	txg_wait_synced(spa->spa_dsl_pool, 0);
1229
1230	/*
1231	 * Sync the updated config cache.
1232	 */
1233	spa_config_sync(spa, B_FALSE, B_TRUE);
1234
1235	spa_close(spa, FTAG);
1236
1237	mutex_exit(&spa_namespace_lock);
1238
1239	return (0);
1240}
1241
1242/*
1243 * Return the spa_t associated with given pool_guid, if it exists.  If
1244 * device_guid is non-zero, determine whether the pool exists *and* contains
1245 * a device with the specified device_guid.
1246 */
1247spa_t *
1248spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
1249{
1250	spa_t *spa;
1251	avl_tree_t *t = &spa_namespace_avl;
1252
1253	ASSERT(MUTEX_HELD(&spa_namespace_lock));
1254
1255	for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
1256		if (spa->spa_state == POOL_STATE_UNINITIALIZED)
1257			continue;
1258		if (spa->spa_root_vdev == NULL)
1259			continue;
1260		if (spa_guid(spa) == pool_guid) {
1261			if (device_guid == 0)
1262				break;
1263
1264			if (vdev_lookup_by_guid(spa->spa_root_vdev,
1265			    device_guid) != NULL)
1266				break;
1267
1268			/*
1269			 * Check any devices we may be in the process of adding.
1270			 */
1271			if (spa->spa_pending_vdev) {
1272				if (vdev_lookup_by_guid(spa->spa_pending_vdev,
1273				    device_guid) != NULL)
1274					break;
1275			}
1276		}
1277	}
1278
1279	return (spa);
1280}
1281
1282/*
1283 * Determine whether a pool with the given pool_guid exists.
1284 */
1285boolean_t
1286spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
1287{
1288	return (spa_by_guid(pool_guid, device_guid) != NULL);
1289}
1290
1291char *
1292spa_strdup(const char *s)
1293{
1294	size_t len;
1295	char *new;
1296
1297	len = strlen(s);
1298	new = kmem_alloc(len + 1, KM_SLEEP);
1299	bcopy(s, new, len);
1300	new[len] = '\0';
1301
1302	return (new);
1303}
1304
1305void
1306spa_strfree(char *s)
1307{
1308	kmem_free(s, strlen(s) + 1);
1309}
1310
1311uint64_t
1312spa_get_random(uint64_t range)
1313{
1314	uint64_t r;
1315
1316	ASSERT(range != 0);
1317
1318	(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
1319
1320	return (r % range);
1321}
1322
1323uint64_t
1324spa_generate_guid(spa_t *spa)
1325{
1326	uint64_t guid = spa_get_random(-1ULL);
1327
1328	if (spa != NULL) {
1329		while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
1330			guid = spa_get_random(-1ULL);
1331	} else {
1332		while (guid == 0 || spa_guid_exists(guid, 0))
1333			guid = spa_get_random(-1ULL);
1334	}
1335
1336	return (guid);
1337}
1338
1339void
1340sprintf_blkptr(char *buf, const blkptr_t *bp)
1341{
1342	char type[256];
1343	char *checksum = NULL;
1344	char *compress = NULL;
1345
1346	if (bp != NULL) {
1347		if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
1348			dmu_object_byteswap_t bswap =
1349			    DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
1350			(void) snprintf(type, sizeof (type), "bswap %s %s",
1351			    DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
1352			    "metadata" : "data",
1353			    dmu_ot_byteswap[bswap].ob_name);
1354		} else {
1355			(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
1356			    sizeof (type));
1357		}
1358		checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
1359		compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
1360	}
1361
1362	SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
1363}
1364
1365void
1366spa_freeze(spa_t *spa)
1367{
1368	uint64_t freeze_txg = 0;
1369
1370	spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
1371	if (spa->spa_freeze_txg == UINT64_MAX) {
1372		freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
1373		spa->spa_freeze_txg = freeze_txg;
1374	}
1375	spa_config_exit(spa, SCL_ALL, FTAG);
1376	if (freeze_txg != 0)
1377		txg_wait_synced(spa_get_dsl(spa), freeze_txg);
1378}
1379
1380void
1381zfs_panic_recover(const char *fmt, ...)
1382{
1383	va_list adx;
1384
1385	va_start(adx, fmt);
1386	vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
1387	va_end(adx);
1388}
1389
1390/*
1391 * This is a stripped-down version of strtoull, suitable only for converting
1392 * lowercase hexadecimal numbers that don't overflow.
1393 */
1394uint64_t
1395zfs_strtonum(const char *str, char **nptr)
1396{
1397	uint64_t val = 0;
1398	char c;
1399	int digit;
1400
1401	while ((c = *str) != '\0') {
1402		if (c >= '0' && c <= '9')
1403			digit = c - '0';
1404		else if (c >= 'a' && c <= 'f')
1405			digit = 10 + c - 'a';
1406		else
1407			break;
1408
1409		val *= 16;
1410		val += digit;
1411
1412		str++;
1413	}
1414
1415	if (nptr)
1416		*nptr = (char *)str;
1417
1418	return (val);
1419}
1420
1421/*
1422 * ==========================================================================
1423 * Accessor functions
1424 * ==========================================================================
1425 */
1426
1427boolean_t
1428spa_shutting_down(spa_t *spa)
1429{
1430	return (spa->spa_async_suspended);
1431}
1432
1433dsl_pool_t *
1434spa_get_dsl(spa_t *spa)
1435{
1436	return (spa->spa_dsl_pool);
1437}
1438
1439boolean_t
1440spa_is_initializing(spa_t *spa)
1441{
1442	return (spa->spa_is_initializing);
1443}
1444
1445blkptr_t *
1446spa_get_rootblkptr(spa_t *spa)
1447{
1448	return (&spa->spa_ubsync.ub_rootbp);
1449}
1450
1451void
1452spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
1453{
1454	spa->spa_uberblock.ub_rootbp = *bp;
1455}
1456
1457void
1458spa_altroot(spa_t *spa, char *buf, size_t buflen)
1459{
1460	if (spa->spa_root == NULL)
1461		buf[0] = '\0';
1462	else
1463		(void) strncpy(buf, spa->spa_root, buflen);
1464}
1465
1466int
1467spa_sync_pass(spa_t *spa)
1468{
1469	return (spa->spa_sync_pass);
1470}
1471
1472char *
1473spa_name(spa_t *spa)
1474{
1475	return (spa->spa_name);
1476}
1477
1478uint64_t
1479spa_guid(spa_t *spa)
1480{
1481	dsl_pool_t *dp = spa_get_dsl(spa);
1482	uint64_t guid;
1483
1484	/*
1485	 * If we fail to parse the config during spa_load(), we can go through
1486	 * the error path (which posts an ereport) and end up here with no root
1487	 * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
1488	 * this case.
1489	 */
1490	if (spa->spa_root_vdev == NULL)
1491		return (spa->spa_config_guid);
1492
1493	guid = spa->spa_last_synced_guid != 0 ?
1494	    spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
1495
1496	/*
1497	 * Return the most recently synced out guid unless we're
1498	 * in syncing context.
1499	 */
1500	if (dp && dsl_pool_sync_context(dp))
1501		return (spa->spa_root_vdev->vdev_guid);
1502	else
1503		return (guid);
1504}
1505
1506uint64_t
1507spa_load_guid(spa_t *spa)
1508{
1509	/*
1510	 * This is a GUID that exists solely as a reference for the
1511	 * purposes of the arc.  It is generated at load time, and
1512	 * is never written to persistent storage.
1513	 */
1514	return (spa->spa_load_guid);
1515}
1516
1517uint64_t
1518spa_last_synced_txg(spa_t *spa)
1519{
1520	return (spa->spa_ubsync.ub_txg);
1521}
1522
1523uint64_t
1524spa_first_txg(spa_t *spa)
1525{
1526	return (spa->spa_first_txg);
1527}
1528
1529uint64_t
1530spa_syncing_txg(spa_t *spa)
1531{
1532	return (spa->spa_syncing_txg);
1533}
1534
1535pool_state_t
1536spa_state(spa_t *spa)
1537{
1538	return (spa->spa_state);
1539}
1540
1541spa_load_state_t
1542spa_load_state(spa_t *spa)
1543{
1544	return (spa->spa_load_state);
1545}
1546
1547uint64_t
1548spa_freeze_txg(spa_t *spa)
1549{
1550	return (spa->spa_freeze_txg);
1551}
1552
1553/* ARGSUSED */
1554uint64_t
1555spa_get_asize(spa_t *spa, uint64_t lsize)
1556{
1557	return (lsize * spa_asize_inflation);
1558}
1559
1560uint64_t
1561spa_get_dspace(spa_t *spa)
1562{
1563	return (spa->spa_dspace);
1564}
1565
1566void
1567spa_update_dspace(spa_t *spa)
1568{
1569	spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
1570	    ddt_get_dedup_dspace(spa);
1571}
1572
1573/*
1574 * Return the failure mode that has been set to this pool. The default
1575 * behavior will be to block all I/Os when a complete failure occurs.
1576 */
1577uint8_t
1578spa_get_failmode(spa_t *spa)
1579{
1580	return (spa->spa_failmode);
1581}
1582
1583boolean_t
1584spa_suspended(spa_t *spa)
1585{
1586	return (spa->spa_suspended);
1587}
1588
1589uint64_t
1590spa_version(spa_t *spa)
1591{
1592	return (spa->spa_ubsync.ub_version);
1593}
1594
1595boolean_t
1596spa_deflate(spa_t *spa)
1597{
1598	return (spa->spa_deflate);
1599}
1600
1601metaslab_class_t *
1602spa_normal_class(spa_t *spa)
1603{
1604	return (spa->spa_normal_class);
1605}
1606
1607metaslab_class_t *
1608spa_log_class(spa_t *spa)
1609{
1610	return (spa->spa_log_class);
1611}
1612
1613int
1614spa_max_replication(spa_t *spa)
1615{
1616	/*
1617	 * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
1618	 * handle BPs with more than one DVA allocated.  Set our max
1619	 * replication level accordingly.
1620	 */
1621	if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
1622		return (1);
1623	return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
1624}
1625
1626int
1627spa_prev_software_version(spa_t *spa)
1628{
1629	return (spa->spa_prev_software_version);
1630}
1631
1632uint64_t
1633spa_deadman_synctime(spa_t *spa)
1634{
1635	return (spa->spa_deadman_synctime);
1636}
1637
1638uint64_t
1639dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
1640{
1641	uint64_t asize = DVA_GET_ASIZE(dva);
1642	uint64_t dsize = asize;
1643
1644	ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
1645
1646	if (asize != 0 && spa->spa_deflate) {
1647		vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
1648		dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
1649	}
1650
1651	return (dsize);
1652}
1653
1654uint64_t
1655bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
1656{
1657	uint64_t dsize = 0;
1658
1659	for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1660		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1661
1662	return (dsize);
1663}
1664
1665uint64_t
1666bp_get_dsize(spa_t *spa, const blkptr_t *bp)
1667{
1668	uint64_t dsize = 0;
1669
1670	spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
1671
1672	for (int d = 0; d < SPA_DVAS_PER_BP; d++)
1673		dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
1674
1675	spa_config_exit(spa, SCL_VDEV, FTAG);
1676
1677	return (dsize);
1678}
1679
1680/*
1681 * ==========================================================================
1682 * Initialization and Termination
1683 * ==========================================================================
1684 */
1685
1686static int
1687spa_name_compare(const void *a1, const void *a2)
1688{
1689	const spa_t *s1 = a1;
1690	const spa_t *s2 = a2;
1691	int s;
1692
1693	s = strcmp(s1->spa_name, s2->spa_name);
1694	if (s > 0)
1695		return (1);
1696	if (s < 0)
1697		return (-1);
1698	return (0);
1699}
1700
1701int
1702spa_busy(void)
1703{
1704	return (spa_active_count);
1705}
1706
1707void
1708spa_boot_init()
1709{
1710	spa_config_load();
1711}
1712
1713#ifdef _KERNEL
1714EVENTHANDLER_DEFINE(mountroot, spa_boot_init, NULL, 0);
1715#endif
1716
1717void
1718spa_init(int mode)
1719{
1720	mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
1721	mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
1722	mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
1723	cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
1724
1725	avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
1726	    offsetof(spa_t, spa_avl));
1727
1728	avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
1729	    offsetof(spa_aux_t, aux_avl));
1730
1731	avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
1732	    offsetof(spa_aux_t, aux_avl));
1733
1734	spa_mode_global = mode;
1735
1736#ifdef illumos
1737#ifdef _KERNEL
1738	spa_arch_init();
1739#else
1740	if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
1741		arc_procfd = open("/proc/self/ctl", O_WRONLY);
1742		if (arc_procfd == -1) {
1743			perror("could not enable watchpoints: "
1744			    "opening /proc/self/ctl failed: ");
1745		} else {
1746			arc_watch = B_TRUE;
1747		}
1748	}
1749#endif
1750#endif /* illumos */
1751	refcount_sysinit();
1752	unique_init();
1753	space_map_init();
1754	zio_init();
1755	lz4_init();
1756	dmu_init();
1757	zil_init();
1758	vdev_cache_stat_init();
1759	zfs_prop_init();
1760	zpool_prop_init();
1761	zpool_feature_init();
1762	spa_config_load();
1763	l2arc_start();
1764#ifndef illumos
1765#ifdef _KERNEL
1766	zfs_deadman_init();
1767#endif
1768#endif	/* !illumos */
1769}
1770
1771void
1772spa_fini(void)
1773{
1774	l2arc_stop();
1775
1776	spa_evict_all();
1777
1778	vdev_cache_stat_fini();
1779	zil_fini();
1780	dmu_fini();
1781	lz4_fini();
1782	zio_fini();
1783	space_map_fini();
1784	unique_fini();
1785	refcount_fini();
1786
1787	avl_destroy(&spa_namespace_avl);
1788	avl_destroy(&spa_spare_avl);
1789	avl_destroy(&spa_l2cache_avl);
1790
1791	cv_destroy(&spa_namespace_cv);
1792	mutex_destroy(&spa_namespace_lock);
1793	mutex_destroy(&spa_spare_lock);
1794	mutex_destroy(&spa_l2cache_lock);
1795}
1796
1797/*
1798 * Return whether this pool has slogs. No locking needed.
1799 * It's not a problem if the wrong answer is returned as it's only for
1800 * performance and not correctness
1801 */
1802boolean_t
1803spa_has_slogs(spa_t *spa)
1804{
1805	return (spa->spa_log_class->mc_rotor != NULL);
1806}
1807
1808spa_log_state_t
1809spa_get_log_state(spa_t *spa)
1810{
1811	return (spa->spa_log_state);
1812}
1813
1814void
1815spa_set_log_state(spa_t *spa, spa_log_state_t state)
1816{
1817	spa->spa_log_state = state;
1818}
1819
1820boolean_t
1821spa_is_root(spa_t *spa)
1822{
1823	return (spa->spa_is_root);
1824}
1825
1826boolean_t
1827spa_writeable(spa_t *spa)
1828{
1829	return (!!(spa->spa_mode & FWRITE));
1830}
1831
1832int
1833spa_mode(spa_t *spa)
1834{
1835	return (spa->spa_mode);
1836}
1837
1838uint64_t
1839spa_bootfs(spa_t *spa)
1840{
1841	return (spa->spa_bootfs);
1842}
1843
1844uint64_t
1845spa_delegation(spa_t *spa)
1846{
1847	return (spa->spa_delegation);
1848}
1849
1850objset_t *
1851spa_meta_objset(spa_t *spa)
1852{
1853	return (spa->spa_meta_objset);
1854}
1855
1856enum zio_checksum
1857spa_dedup_checksum(spa_t *spa)
1858{
1859	return (spa->spa_dedup_checksum);
1860}
1861
1862/*
1863 * Reset pool scan stat per scan pass (or reboot).
1864 */
1865void
1866spa_scan_stat_init(spa_t *spa)
1867{
1868	/* data not stored on disk */
1869	spa->spa_scan_pass_start = gethrestime_sec();
1870	spa->spa_scan_pass_exam = 0;
1871	vdev_scan_stat_init(spa->spa_root_vdev);
1872}
1873
1874/*
1875 * Get scan stats for zpool status reports
1876 */
1877int
1878spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
1879{
1880	dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
1881
1882	if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
1883		return (SET_ERROR(ENOENT));
1884	bzero(ps, sizeof (pool_scan_stat_t));
1885
1886	/* data stored on disk */
1887	ps->pss_func = scn->scn_phys.scn_func;
1888	ps->pss_start_time = scn->scn_phys.scn_start_time;
1889	ps->pss_end_time = scn->scn_phys.scn_end_time;
1890	ps->pss_to_examine = scn->scn_phys.scn_to_examine;
1891	ps->pss_examined = scn->scn_phys.scn_examined;
1892	ps->pss_to_process = scn->scn_phys.scn_to_process;
1893	ps->pss_processed = scn->scn_phys.scn_processed;
1894	ps->pss_errors = scn->scn_phys.scn_errors;
1895	ps->pss_state = scn->scn_phys.scn_state;
1896
1897	/* data not stored on disk */
1898	ps->pss_pass_start = spa->spa_scan_pass_start;
1899	ps->pss_pass_exam = spa->spa_scan_pass_exam;
1900
1901	return (0);
1902}
1903
1904boolean_t
1905spa_debug_enabled(spa_t *spa)
1906{
1907	return (spa->spa_debug);
1908}
1909