1168404Spjd/*
2168404Spjd * CDDL HEADER START
3168404Spjd *
4168404Spjd * The contents of this file are subject to the terms of the
5168404Spjd * Common Development and Distribution License (the "License").
6168404Spjd * You may not use this file except in compliance with the License.
7168404Spjd *
8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9168404Spjd * or http://www.opensolaris.org/os/licensing.
10168404Spjd * See the License for the specific language governing permissions
11168404Spjd * and limitations under the License.
12168404Spjd *
13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each
14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15168404Spjd * If applicable, add the following below this CDDL HEADER, with the
16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying
17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner]
18168404Spjd *
19168404Spjd * CDDL HEADER END
20168404Spjd */
21168404Spjd/*
22219089Spjd * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23228103Smm * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24239620Smm * Copyright (c) 2012 by Delphix. All rights reserved.
25168404Spjd */
26168404Spjd
27168404Spjd/*
28168404Spjd * Pool import support functions.
29168404Spjd *
30168404Spjd * To import a pool, we rely on reading the configuration information from the
31168404Spjd * ZFS label of each device.  If we successfully read the label, then we
32168404Spjd * organize the configuration information in the following hierarchy:
33168404Spjd *
34168404Spjd * 	pool guid -> toplevel vdev guid -> label txg
35168404Spjd *
36168404Spjd * Duplicate entries matching this same tuple will be discarded.  Once we have
37168404Spjd * examined every device, we pick the best label txg config for each toplevel
38168404Spjd * vdev.  We then arrange these toplevel vdevs into a complete pool config, and
39168404Spjd * update any paths that have changed.  Finally, we attempt to import the pool
40168404Spjd * using our derived config, and record the results.
41168404Spjd */
42168404Spjd
43219089Spjd#include <ctype.h>
44168404Spjd#include <devid.h>
45168404Spjd#include <dirent.h>
46168404Spjd#include <errno.h>
47168404Spjd#include <libintl.h>
48219089Spjd#include <stddef.h>
49168404Spjd#include <stdlib.h>
50168404Spjd#include <string.h>
51168404Spjd#include <sys/stat.h>
52168404Spjd#include <unistd.h>
53168404Spjd#include <fcntl.h>
54219089Spjd#include <thread_pool.h>
55168404Spjd#include <libgeom.h>
56168404Spjd
57168404Spjd#include <sys/vdev_impl.h>
58168404Spjd
59168404Spjd#include "libzfs.h"
60168404Spjd#include "libzfs_impl.h"
61168404Spjd
62168404Spjd/*
63168404Spjd * Intermediate structures used to gather configuration information.
64168404Spjd */
65168404Spjdtypedef struct config_entry {
66168404Spjd	uint64_t		ce_txg;
67168404Spjd	nvlist_t		*ce_config;
68168404Spjd	struct config_entry	*ce_next;
69168404Spjd} config_entry_t;
70168404Spjd
71168404Spjdtypedef struct vdev_entry {
72168404Spjd	uint64_t		ve_guid;
73168404Spjd	config_entry_t		*ve_configs;
74168404Spjd	struct vdev_entry	*ve_next;
75168404Spjd} vdev_entry_t;
76168404Spjd
77168404Spjdtypedef struct pool_entry {
78168404Spjd	uint64_t		pe_guid;
79168404Spjd	vdev_entry_t		*pe_vdevs;
80168404Spjd	struct pool_entry	*pe_next;
81168404Spjd} pool_entry_t;
82168404Spjd
83168404Spjdtypedef struct name_entry {
84168404Spjd	char			*ne_name;
85168404Spjd	uint64_t		ne_guid;
86168404Spjd	struct name_entry	*ne_next;
87168404Spjd} name_entry_t;
88168404Spjd
89168404Spjdtypedef struct pool_list {
90168404Spjd	pool_entry_t		*pools;
91168404Spjd	name_entry_t		*names;
92168404Spjd} pool_list_t;
93168404Spjd
94168404Spjdstatic char *
95168404Spjdget_devid(const char *path)
96168404Spjd{
97168404Spjd	int fd;
98168404Spjd	ddi_devid_t devid;
99168404Spjd	char *minor, *ret;
100168404Spjd
101168404Spjd	if ((fd = open(path, O_RDONLY)) < 0)
102168404Spjd		return (NULL);
103168404Spjd
104168404Spjd	minor = NULL;
105168404Spjd	ret = NULL;
106168404Spjd	if (devid_get(fd, &devid) == 0) {
107168404Spjd		if (devid_get_minor_name(fd, &minor) == 0)
108168404Spjd			ret = devid_str_encode(devid, minor);
109168404Spjd		if (minor != NULL)
110168404Spjd			devid_str_free(minor);
111168404Spjd		devid_free(devid);
112168404Spjd	}
113168404Spjd	(void) close(fd);
114168404Spjd
115168404Spjd	return (ret);
116168404Spjd}
117168404Spjd
118219089Spjd
119168404Spjd/*
120168404Spjd * Go through and fix up any path and/or devid information for the given vdev
121168404Spjd * configuration.
122168404Spjd */
123168404Spjdstatic int
124168404Spjdfix_paths(nvlist_t *nv, name_entry_t *names)
125168404Spjd{
126168404Spjd	nvlist_t **child;
127168404Spjd	uint_t c, children;
128168404Spjd	uint64_t guid;
129168404Spjd	name_entry_t *ne, *best;
130168404Spjd	char *path, *devid;
131168404Spjd	int matched;
132168404Spjd
133168404Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
134168404Spjd	    &child, &children) == 0) {
135168404Spjd		for (c = 0; c < children; c++)
136168404Spjd			if (fix_paths(child[c], names) != 0)
137168404Spjd				return (-1);
138168404Spjd		return (0);
139168404Spjd	}
140168404Spjd
141168404Spjd	/*
142168404Spjd	 * This is a leaf (file or disk) vdev.  In either case, go through
143168404Spjd	 * the name list and see if we find a matching guid.  If so, replace
144168404Spjd	 * the path and see if we can calculate a new devid.
145168404Spjd	 *
146168404Spjd	 * There may be multiple names associated with a particular guid, in
147168404Spjd	 * which case we have overlapping slices or multiple paths to the same
148168404Spjd	 * disk.  If this is the case, then we want to pick the path that is
149168404Spjd	 * the most similar to the original, where "most similar" is the number
150168404Spjd	 * of matching characters starting from the end of the path.  This will
151168404Spjd	 * preserve slice numbers even if the disks have been reorganized, and
152168404Spjd	 * will also catch preferred disk names if multiple paths exist.
153168404Spjd	 */
154168404Spjd	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
155168404Spjd	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
156168404Spjd		path = NULL;
157168404Spjd
158168404Spjd	matched = 0;
159168404Spjd	best = NULL;
160168404Spjd	for (ne = names; ne != NULL; ne = ne->ne_next) {
161168404Spjd		if (ne->ne_guid == guid) {
162168404Spjd			const char *src, *dst;
163168404Spjd			int count;
164168404Spjd
165168404Spjd			if (path == NULL) {
166168404Spjd				best = ne;
167168404Spjd				break;
168168404Spjd			}
169168404Spjd
170168404Spjd			src = ne->ne_name + strlen(ne->ne_name) - 1;
171168404Spjd			dst = path + strlen(path) - 1;
172168404Spjd			for (count = 0; src >= ne->ne_name && dst >= path;
173168404Spjd			    src--, dst--, count++)
174168404Spjd				if (*src != *dst)
175168404Spjd					break;
176168404Spjd
177168404Spjd			/*
178168404Spjd			 * At this point, 'count' is the number of characters
179168404Spjd			 * matched from the end.
180168404Spjd			 */
181168404Spjd			if (count > matched || best == NULL) {
182168404Spjd				best = ne;
183168404Spjd				matched = count;
184168404Spjd			}
185168404Spjd		}
186168404Spjd	}
187168404Spjd
188168404Spjd	if (best == NULL)
189168404Spjd		return (0);
190168404Spjd
191168404Spjd	if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
192168404Spjd		return (-1);
193168404Spjd
194168404Spjd	if ((devid = get_devid(best->ne_name)) == NULL) {
195168404Spjd		(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
196168404Spjd	} else {
197168404Spjd		if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
198168404Spjd			return (-1);
199168404Spjd		devid_str_free(devid);
200168404Spjd	}
201168404Spjd
202168404Spjd	return (0);
203168404Spjd}
204168404Spjd
205168404Spjd/*
206168404Spjd * Add the given configuration to the list of known devices.
207168404Spjd */
208168404Spjdstatic int
209168404Spjdadd_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
210168404Spjd    nvlist_t *config)
211168404Spjd{
212168404Spjd	uint64_t pool_guid, vdev_guid, top_guid, txg, state;
213168404Spjd	pool_entry_t *pe;
214168404Spjd	vdev_entry_t *ve;
215168404Spjd	config_entry_t *ce;
216168404Spjd	name_entry_t *ne;
217168404Spjd
218168404Spjd	/*
219185029Spjd	 * If this is a hot spare not currently in use or level 2 cache
220185029Spjd	 * device, add it to the list of names to translate, but don't do
221185029Spjd	 * anything else.
222168404Spjd	 */
223168404Spjd	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
224185029Spjd	    &state) == 0 &&
225185029Spjd	    (state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
226168404Spjd	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
227168404Spjd		if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
228168926Spjd			return (-1);
229168404Spjd
230168404Spjd		if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
231168404Spjd			free(ne);
232168404Spjd			return (-1);
233168404Spjd		}
234168404Spjd		ne->ne_guid = vdev_guid;
235168404Spjd		ne->ne_next = pl->names;
236168404Spjd		pl->names = ne;
237168404Spjd		return (0);
238168404Spjd	}
239168404Spjd
240168404Spjd	/*
241168404Spjd	 * If we have a valid config but cannot read any of these fields, then
242168404Spjd	 * it means we have a half-initialized label.  In vdev_label_init()
243168404Spjd	 * we write a label with txg == 0 so that we can identify the device
244168404Spjd	 * in case the user refers to the same disk later on.  If we fail to
245168404Spjd	 * create the pool, we'll be left with a label in this state
246168404Spjd	 * which should not be considered part of a valid pool.
247168404Spjd	 */
248168404Spjd	if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
249168404Spjd	    &pool_guid) != 0 ||
250168404Spjd	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
251168404Spjd	    &vdev_guid) != 0 ||
252168404Spjd	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
253168404Spjd	    &top_guid) != 0 ||
254168404Spjd	    nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
255168404Spjd	    &txg) != 0 || txg == 0) {
256168404Spjd		nvlist_free(config);
257168404Spjd		return (0);
258168404Spjd	}
259168404Spjd
260168404Spjd	/*
261168404Spjd	 * First, see if we know about this pool.  If not, then add it to the
262168404Spjd	 * list of known pools.
263168404Spjd	 */
264168404Spjd	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
265168404Spjd		if (pe->pe_guid == pool_guid)
266168404Spjd			break;
267168404Spjd	}
268168404Spjd
269168404Spjd	if (pe == NULL) {
270168404Spjd		if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
271168404Spjd			nvlist_free(config);
272168404Spjd			return (-1);
273168404Spjd		}
274168404Spjd		pe->pe_guid = pool_guid;
275168404Spjd		pe->pe_next = pl->pools;
276168404Spjd		pl->pools = pe;
277168404Spjd	}
278168404Spjd
279168404Spjd	/*
280168404Spjd	 * Second, see if we know about this toplevel vdev.  Add it if its
281168404Spjd	 * missing.
282168404Spjd	 */
283168404Spjd	for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
284168404Spjd		if (ve->ve_guid == top_guid)
285168404Spjd			break;
286168404Spjd	}
287168404Spjd
288168404Spjd	if (ve == NULL) {
289168404Spjd		if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
290168404Spjd			nvlist_free(config);
291168404Spjd			return (-1);
292168404Spjd		}
293168404Spjd		ve->ve_guid = top_guid;
294168404Spjd		ve->ve_next = pe->pe_vdevs;
295168404Spjd		pe->pe_vdevs = ve;
296168404Spjd	}
297168404Spjd
298168404Spjd	/*
299168404Spjd	 * Third, see if we have a config with a matching transaction group.  If
300168404Spjd	 * so, then we do nothing.  Otherwise, add it to the list of known
301168404Spjd	 * configs.
302168404Spjd	 */
303168404Spjd	for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
304168404Spjd		if (ce->ce_txg == txg)
305168404Spjd			break;
306168404Spjd	}
307168404Spjd
308168404Spjd	if (ce == NULL) {
309168404Spjd		if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
310168404Spjd			nvlist_free(config);
311168404Spjd			return (-1);
312168404Spjd		}
313168404Spjd		ce->ce_txg = txg;
314168404Spjd		ce->ce_config = config;
315168404Spjd		ce->ce_next = ve->ve_configs;
316168404Spjd		ve->ve_configs = ce;
317168404Spjd	} else {
318168404Spjd		nvlist_free(config);
319168404Spjd	}
320168404Spjd
321168404Spjd	/*
322168404Spjd	 * At this point we've successfully added our config to the list of
323168404Spjd	 * known configs.  The last thing to do is add the vdev guid -> path
324168404Spjd	 * mappings so that we can fix up the configuration as necessary before
325168404Spjd	 * doing the import.
326168404Spjd	 */
327168404Spjd	if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
328168404Spjd		return (-1);
329168404Spjd
330168404Spjd	if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
331168404Spjd		free(ne);
332168404Spjd		return (-1);
333168404Spjd	}
334168404Spjd
335168404Spjd	ne->ne_guid = vdev_guid;
336168404Spjd	ne->ne_next = pl->names;
337168404Spjd	pl->names = ne;
338168404Spjd
339168404Spjd	return (0);
340168404Spjd}
341168404Spjd
342168404Spjd/*
343168404Spjd * Returns true if the named pool matches the given GUID.
344168404Spjd */
345168404Spjdstatic int
346168404Spjdpool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
347168404Spjd    boolean_t *isactive)
348168404Spjd{
349168404Spjd	zpool_handle_t *zhp;
350168404Spjd	uint64_t theguid;
351168404Spjd
352168404Spjd	if (zpool_open_silent(hdl, name, &zhp) != 0)
353168404Spjd		return (-1);
354168404Spjd
355168404Spjd	if (zhp == NULL) {
356168404Spjd		*isactive = B_FALSE;
357168404Spjd		return (0);
358168404Spjd	}
359168404Spjd
360168404Spjd	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
361168404Spjd	    &theguid) == 0);
362168404Spjd
363168404Spjd	zpool_close(zhp);
364168404Spjd
365168404Spjd	*isactive = (theguid == guid);
366168404Spjd	return (0);
367168404Spjd}
368168404Spjd
369185029Spjdstatic nvlist_t *
370185029Spjdrefresh_config(libzfs_handle_t *hdl, nvlist_t *config)
371185029Spjd{
372185029Spjd	nvlist_t *nvl;
373185029Spjd	zfs_cmd_t zc = { 0 };
374185029Spjd	int err;
375185029Spjd
376185029Spjd	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
377185029Spjd		return (NULL);
378185029Spjd
379185029Spjd	if (zcmd_alloc_dst_nvlist(hdl, &zc,
380185029Spjd	    zc.zc_nvlist_conf_size * 2) != 0) {
381185029Spjd		zcmd_free_nvlists(&zc);
382185029Spjd		return (NULL);
383185029Spjd	}
384185029Spjd
385185029Spjd	while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
386185029Spjd	    &zc)) != 0 && errno == ENOMEM) {
387185029Spjd		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
388185029Spjd			zcmd_free_nvlists(&zc);
389185029Spjd			return (NULL);
390185029Spjd		}
391185029Spjd	}
392185029Spjd
393185029Spjd	if (err) {
394185029Spjd		zcmd_free_nvlists(&zc);
395185029Spjd		return (NULL);
396185029Spjd	}
397185029Spjd
398185029Spjd	if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
399185029Spjd		zcmd_free_nvlists(&zc);
400185029Spjd		return (NULL);
401185029Spjd	}
402185029Spjd
403185029Spjd	zcmd_free_nvlists(&zc);
404185029Spjd	return (nvl);
405185029Spjd}
406185029Spjd
407168404Spjd/*
408219089Spjd * Determine if the vdev id is a hole in the namespace.
409219089Spjd */
410219089Spjdboolean_t
411219089Spjdvdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
412219089Spjd{
413219089Spjd	for (int c = 0; c < holes; c++) {
414219089Spjd
415219089Spjd		/* Top-level is a hole */
416219089Spjd		if (hole_array[c] == id)
417219089Spjd			return (B_TRUE);
418219089Spjd	}
419219089Spjd	return (B_FALSE);
420219089Spjd}
421219089Spjd
422219089Spjd/*
423168404Spjd * Convert our list of pools into the definitive set of configurations.  We
424168404Spjd * start by picking the best config for each toplevel vdev.  Once that's done,
425168404Spjd * we assemble the toplevel vdevs into a full config for the pool.  We make a
426168404Spjd * pass to fix up any incorrect paths, and then add it to the main list to
427168404Spjd * return to the user.
428168404Spjd */
429168404Spjdstatic nvlist_t *
430185029Spjdget_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
431168404Spjd{
432168404Spjd	pool_entry_t *pe;
433168404Spjd	vdev_entry_t *ve;
434168404Spjd	config_entry_t *ce;
435168404Spjd	nvlist_t *ret = NULL, *config = NULL, *tmp, *nvtop, *nvroot;
436185029Spjd	nvlist_t **spares, **l2cache;
437185029Spjd	uint_t i, nspares, nl2cache;
438168404Spjd	boolean_t config_seen;
439168404Spjd	uint64_t best_txg;
440239620Smm	char *name, *hostname;
441239620Smm	uint64_t guid;
442168404Spjd	uint_t children = 0;
443168404Spjd	nvlist_t **child = NULL;
444219089Spjd	uint_t holes;
445219089Spjd	uint64_t *hole_array, max_id;
446168404Spjd	uint_t c;
447168404Spjd	boolean_t isactive;
448168498Spjd	uint64_t hostid;
449185029Spjd	nvlist_t *nvl;
450185029Spjd	boolean_t found_one = B_FALSE;
451219089Spjd	boolean_t valid_top_config = B_FALSE;
452168404Spjd
453168404Spjd	if (nvlist_alloc(&ret, 0, 0) != 0)
454168404Spjd		goto nomem;
455168404Spjd
456168404Spjd	for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
457219089Spjd		uint64_t id, max_txg = 0;
458168404Spjd
459168404Spjd		if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
460168404Spjd			goto nomem;
461168404Spjd		config_seen = B_FALSE;
462168404Spjd
463168404Spjd		/*
464168404Spjd		 * Iterate over all toplevel vdevs.  Grab the pool configuration
465168404Spjd		 * from the first one we find, and then go through the rest and
466168404Spjd		 * add them as necessary to the 'vdevs' member of the config.
467168404Spjd		 */
468168404Spjd		for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
469168404Spjd
470168404Spjd			/*
471168404Spjd			 * Determine the best configuration for this vdev by
472168404Spjd			 * selecting the config with the latest transaction
473168404Spjd			 * group.
474168404Spjd			 */
475168404Spjd			best_txg = 0;
476168404Spjd			for (ce = ve->ve_configs; ce != NULL;
477168404Spjd			    ce = ce->ce_next) {
478168404Spjd
479168404Spjd				if (ce->ce_txg > best_txg) {
480168404Spjd					tmp = ce->ce_config;
481168404Spjd					best_txg = ce->ce_txg;
482168404Spjd				}
483168404Spjd			}
484168404Spjd
485219089Spjd			/*
486219089Spjd			 * We rely on the fact that the max txg for the
487219089Spjd			 * pool will contain the most up-to-date information
488219089Spjd			 * about the valid top-levels in the vdev namespace.
489219089Spjd			 */
490219089Spjd			if (best_txg > max_txg) {
491219089Spjd				(void) nvlist_remove(config,
492219089Spjd				    ZPOOL_CONFIG_VDEV_CHILDREN,
493219089Spjd				    DATA_TYPE_UINT64);
494219089Spjd				(void) nvlist_remove(config,
495219089Spjd				    ZPOOL_CONFIG_HOLE_ARRAY,
496219089Spjd				    DATA_TYPE_UINT64_ARRAY);
497219089Spjd
498219089Spjd				max_txg = best_txg;
499219089Spjd				hole_array = NULL;
500219089Spjd				holes = 0;
501219089Spjd				max_id = 0;
502219089Spjd				valid_top_config = B_FALSE;
503219089Spjd
504219089Spjd				if (nvlist_lookup_uint64(tmp,
505219089Spjd				    ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
506219089Spjd					verify(nvlist_add_uint64(config,
507219089Spjd					    ZPOOL_CONFIG_VDEV_CHILDREN,
508219089Spjd					    max_id) == 0);
509219089Spjd					valid_top_config = B_TRUE;
510219089Spjd				}
511219089Spjd
512219089Spjd				if (nvlist_lookup_uint64_array(tmp,
513219089Spjd				    ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
514219089Spjd				    &holes) == 0) {
515219089Spjd					verify(nvlist_add_uint64_array(config,
516219089Spjd					    ZPOOL_CONFIG_HOLE_ARRAY,
517219089Spjd					    hole_array, holes) == 0);
518219089Spjd				}
519219089Spjd			}
520219089Spjd
521168404Spjd			if (!config_seen) {
522168404Spjd				/*
523168404Spjd				 * Copy the relevant pieces of data to the pool
524168404Spjd				 * configuration:
525168404Spjd				 *
526168404Spjd				 *	version
527239620Smm				 *	pool guid
528239620Smm				 *	name
529228103Smm				 *	comment (if available)
530239620Smm				 *	pool state
531168498Spjd				 *	hostid (if available)
532168498Spjd				 *	hostname (if available)
533168404Spjd				 */
534246631Smm				uint64_t state, version;
535239620Smm				char *comment = NULL;
536168404Spjd
537239620Smm				version = fnvlist_lookup_uint64(tmp,
538239620Smm				    ZPOOL_CONFIG_VERSION);
539239620Smm				fnvlist_add_uint64(config,
540239620Smm				    ZPOOL_CONFIG_VERSION, version);
541239620Smm				guid = fnvlist_lookup_uint64(tmp,
542239620Smm				    ZPOOL_CONFIG_POOL_GUID);
543239620Smm				fnvlist_add_uint64(config,
544239620Smm				    ZPOOL_CONFIG_POOL_GUID, guid);
545239620Smm				name = fnvlist_lookup_string(tmp,
546239620Smm				    ZPOOL_CONFIG_POOL_NAME);
547239620Smm				fnvlist_add_string(config,
548239620Smm				    ZPOOL_CONFIG_POOL_NAME, name);
549228103Smm
550228103Smm				if (nvlist_lookup_string(tmp,
551239620Smm				    ZPOOL_CONFIG_COMMENT, &comment) == 0)
552239620Smm					fnvlist_add_string(config,
553239620Smm					    ZPOOL_CONFIG_COMMENT, comment);
554228103Smm
555239620Smm				state = fnvlist_lookup_uint64(tmp,
556239620Smm				    ZPOOL_CONFIG_POOL_STATE);
557239620Smm				fnvlist_add_uint64(config,
558239620Smm				    ZPOOL_CONFIG_POOL_STATE, state);
559228103Smm
560168498Spjd				hostid = 0;
561168498Spjd				if (nvlist_lookup_uint64(tmp,
562168498Spjd				    ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
563239620Smm					fnvlist_add_uint64(config,
564239620Smm					    ZPOOL_CONFIG_HOSTID, hostid);
565239620Smm					hostname = fnvlist_lookup_string(tmp,
566239620Smm					    ZPOOL_CONFIG_HOSTNAME);
567239620Smm					fnvlist_add_string(config,
568239620Smm					    ZPOOL_CONFIG_HOSTNAME, hostname);
569168498Spjd				}
570168404Spjd
571168404Spjd				config_seen = B_TRUE;
572168404Spjd			}
573168404Spjd
574168404Spjd			/*
575168404Spjd			 * Add this top-level vdev to the child array.
576168404Spjd			 */
577168404Spjd			verify(nvlist_lookup_nvlist(tmp,
578168404Spjd			    ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
579168404Spjd			verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
580168404Spjd			    &id) == 0);
581219089Spjd
582168404Spjd			if (id >= children) {
583168404Spjd				nvlist_t **newchild;
584168404Spjd
585168404Spjd				newchild = zfs_alloc(hdl, (id + 1) *
586168404Spjd				    sizeof (nvlist_t *));
587168404Spjd				if (newchild == NULL)
588168404Spjd					goto nomem;
589168404Spjd
590168404Spjd				for (c = 0; c < children; c++)
591168404Spjd					newchild[c] = child[c];
592168404Spjd
593168404Spjd				free(child);
594168404Spjd				child = newchild;
595168404Spjd				children = id + 1;
596168404Spjd			}
597168404Spjd			if (nvlist_dup(nvtop, &child[id], 0) != 0)
598168404Spjd				goto nomem;
599168404Spjd
600168404Spjd		}
601168404Spjd
602219089Spjd		/*
603219089Spjd		 * If we have information about all the top-levels then
604219089Spjd		 * clean up the nvlist which we've constructed. This
605219089Spjd		 * means removing any extraneous devices that are
606219089Spjd		 * beyond the valid range or adding devices to the end
607219089Spjd		 * of our array which appear to be missing.
608219089Spjd		 */
609219089Spjd		if (valid_top_config) {
610219089Spjd			if (max_id < children) {
611219089Spjd				for (c = max_id; c < children; c++)
612219089Spjd					nvlist_free(child[c]);
613219089Spjd				children = max_id;
614219089Spjd			} else if (max_id > children) {
615219089Spjd				nvlist_t **newchild;
616219089Spjd
617219089Spjd				newchild = zfs_alloc(hdl, (max_id) *
618219089Spjd				    sizeof (nvlist_t *));
619219089Spjd				if (newchild == NULL)
620219089Spjd					goto nomem;
621219089Spjd
622219089Spjd				for (c = 0; c < children; c++)
623219089Spjd					newchild[c] = child[c];
624219089Spjd
625219089Spjd				free(child);
626219089Spjd				child = newchild;
627219089Spjd				children = max_id;
628219089Spjd			}
629219089Spjd		}
630219089Spjd
631168404Spjd		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
632168404Spjd		    &guid) == 0);
633168404Spjd
634168404Spjd		/*
635219089Spjd		 * The vdev namespace may contain holes as a result of
636219089Spjd		 * device removal. We must add them back into the vdev
637219089Spjd		 * tree before we process any missing devices.
638219089Spjd		 */
639219089Spjd		if (holes > 0) {
640219089Spjd			ASSERT(valid_top_config);
641219089Spjd
642219089Spjd			for (c = 0; c < children; c++) {
643219089Spjd				nvlist_t *holey;
644219089Spjd
645219089Spjd				if (child[c] != NULL ||
646219089Spjd				    !vdev_is_hole(hole_array, holes, c))
647219089Spjd					continue;
648219089Spjd
649219089Spjd				if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
650219089Spjd				    0) != 0)
651219089Spjd					goto nomem;
652219089Spjd
653219089Spjd				/*
654219089Spjd				 * Holes in the namespace are treated as
655219089Spjd				 * "hole" top-level vdevs and have a
656219089Spjd				 * special flag set on them.
657219089Spjd				 */
658219089Spjd				if (nvlist_add_string(holey,
659219089Spjd				    ZPOOL_CONFIG_TYPE,
660219089Spjd				    VDEV_TYPE_HOLE) != 0 ||
661219089Spjd				    nvlist_add_uint64(holey,
662219089Spjd				    ZPOOL_CONFIG_ID, c) != 0 ||
663219089Spjd				    nvlist_add_uint64(holey,
664219089Spjd				    ZPOOL_CONFIG_GUID, 0ULL) != 0)
665219089Spjd					goto nomem;
666219089Spjd				child[c] = holey;
667219089Spjd			}
668219089Spjd		}
669219089Spjd
670219089Spjd		/*
671168404Spjd		 * Look for any missing top-level vdevs.  If this is the case,
672168404Spjd		 * create a faked up 'missing' vdev as a placeholder.  We cannot
673168404Spjd		 * simply compress the child array, because the kernel performs
674168404Spjd		 * certain checks to make sure the vdev IDs match their location
675168404Spjd		 * in the configuration.
676168404Spjd		 */
677219089Spjd		for (c = 0; c < children; c++) {
678168404Spjd			if (child[c] == NULL) {
679168404Spjd				nvlist_t *missing;
680168404Spjd				if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
681168404Spjd				    0) != 0)
682168404Spjd					goto nomem;
683168404Spjd				if (nvlist_add_string(missing,
684168404Spjd				    ZPOOL_CONFIG_TYPE,
685168404Spjd				    VDEV_TYPE_MISSING) != 0 ||
686168404Spjd				    nvlist_add_uint64(missing,
687168404Spjd				    ZPOOL_CONFIG_ID, c) != 0 ||
688168404Spjd				    nvlist_add_uint64(missing,
689168404Spjd				    ZPOOL_CONFIG_GUID, 0ULL) != 0) {
690168404Spjd					nvlist_free(missing);
691168404Spjd					goto nomem;
692168404Spjd				}
693168404Spjd				child[c] = missing;
694168404Spjd			}
695219089Spjd		}
696168404Spjd
697168404Spjd		/*
698168404Spjd		 * Put all of this pool's top-level vdevs into a root vdev.
699168404Spjd		 */
700168404Spjd		if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
701168404Spjd			goto nomem;
702168404Spjd		if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
703168404Spjd		    VDEV_TYPE_ROOT) != 0 ||
704168404Spjd		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
705168404Spjd		    nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
706168404Spjd		    nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
707168404Spjd		    child, children) != 0) {
708168404Spjd			nvlist_free(nvroot);
709168404Spjd			goto nomem;
710168404Spjd		}
711168404Spjd
712168404Spjd		for (c = 0; c < children; c++)
713168404Spjd			nvlist_free(child[c]);
714168404Spjd		free(child);
715168404Spjd		children = 0;
716168404Spjd		child = NULL;
717168404Spjd
718168404Spjd		/*
719168404Spjd		 * Go through and fix up any paths and/or devids based on our
720168404Spjd		 * known list of vdev GUID -> path mappings.
721168404Spjd		 */
722168404Spjd		if (fix_paths(nvroot, pl->names) != 0) {
723168404Spjd			nvlist_free(nvroot);
724168404Spjd			goto nomem;
725168404Spjd		}
726168404Spjd
727168404Spjd		/*
728168404Spjd		 * Add the root vdev to this pool's configuration.
729168404Spjd		 */
730168404Spjd		if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
731168404Spjd		    nvroot) != 0) {
732168404Spjd			nvlist_free(nvroot);
733168404Spjd			goto nomem;
734168404Spjd		}
735168404Spjd		nvlist_free(nvroot);
736168404Spjd
737168404Spjd		/*
738185029Spjd		 * zdb uses this path to report on active pools that were
739185029Spjd		 * imported or created using -R.
740185029Spjd		 */
741185029Spjd		if (active_ok)
742185029Spjd			goto add_pool;
743185029Spjd
744185029Spjd		/*
745168404Spjd		 * Determine if this pool is currently active, in which case we
746168404Spjd		 * can't actually import it.
747168404Spjd		 */
748168404Spjd		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
749168404Spjd		    &name) == 0);
750168404Spjd		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
751168404Spjd		    &guid) == 0);
752168404Spjd
753168404Spjd		if (pool_active(hdl, name, guid, &isactive) != 0)
754168404Spjd			goto error;
755168404Spjd
756168404Spjd		if (isactive) {
757168404Spjd			nvlist_free(config);
758168404Spjd			config = NULL;
759168404Spjd			continue;
760168404Spjd		}
761168404Spjd
762219089Spjd		if ((nvl = refresh_config(hdl, config)) == NULL) {
763219089Spjd			nvlist_free(config);
764219089Spjd			config = NULL;
765219089Spjd			continue;
766219089Spjd		}
767168404Spjd
768168404Spjd		nvlist_free(config);
769185029Spjd		config = nvl;
770168404Spjd
771168404Spjd		/*
772168404Spjd		 * Go through and update the paths for spares, now that we have
773168404Spjd		 * them.
774168404Spjd		 */
775168404Spjd		verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
776168404Spjd		    &nvroot) == 0);
777168404Spjd		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
778168404Spjd		    &spares, &nspares) == 0) {
779168404Spjd			for (i = 0; i < nspares; i++) {
780168404Spjd				if (fix_paths(spares[i], pl->names) != 0)
781168404Spjd					goto nomem;
782168404Spjd			}
783168404Spjd		}
784168404Spjd
785168404Spjd		/*
786185029Spjd		 * Update the paths for l2cache devices.
787185029Spjd		 */
788185029Spjd		if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
789185029Spjd		    &l2cache, &nl2cache) == 0) {
790185029Spjd			for (i = 0; i < nl2cache; i++) {
791185029Spjd				if (fix_paths(l2cache[i], pl->names) != 0)
792185029Spjd					goto nomem;
793185029Spjd			}
794185029Spjd		}
795185029Spjd
796185029Spjd		/*
797168498Spjd		 * Restore the original information read from the actual label.
798168498Spjd		 */
799168498Spjd		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
800168498Spjd		    DATA_TYPE_UINT64);
801168498Spjd		(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
802168498Spjd		    DATA_TYPE_STRING);
803168498Spjd		if (hostid != 0) {
804168498Spjd			verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
805168498Spjd			    hostid) == 0);
806168498Spjd			verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
807168498Spjd			    hostname) == 0);
808168498Spjd		}
809168498Spjd
810185029Spjdadd_pool:
811168498Spjd		/*
812168404Spjd		 * Add this pool to the list of configs.
813168404Spjd		 */
814168404Spjd		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
815168404Spjd		    &name) == 0);
816168404Spjd		if (nvlist_add_nvlist(ret, name, config) != 0)
817168404Spjd			goto nomem;
818168404Spjd
819185029Spjd		found_one = B_TRUE;
820168404Spjd		nvlist_free(config);
821168404Spjd		config = NULL;
822168404Spjd	}
823168404Spjd
824185029Spjd	if (!found_one) {
825185029Spjd		nvlist_free(ret);
826185029Spjd		ret = NULL;
827185029Spjd	}
828185029Spjd
829168404Spjd	return (ret);
830168404Spjd
831168404Spjdnomem:
832168404Spjd	(void) no_memory(hdl);
833168404Spjderror:
834168404Spjd	nvlist_free(config);
835168404Spjd	nvlist_free(ret);
836168404Spjd	for (c = 0; c < children; c++)
837168404Spjd		nvlist_free(child[c]);
838168404Spjd	free(child);
839168404Spjd
840168404Spjd	return (NULL);
841168404Spjd}
842168404Spjd
843168404Spjd/*
844168404Spjd * Return the offset of the given label.
845168404Spjd */
846168404Spjdstatic uint64_t
847185029Spjdlabel_offset(uint64_t size, int l)
848168404Spjd{
849185029Spjd	ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
850168404Spjd	return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
851168404Spjd	    0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
852168404Spjd}
853168404Spjd
854168404Spjd/*
855168404Spjd * Given a file descriptor, read the label information and return an nvlist
856168404Spjd * describing the configuration, if there is one.
857168404Spjd */
858168404Spjdint
859168404Spjdzpool_read_label(int fd, nvlist_t **config)
860168404Spjd{
861168404Spjd	struct stat64 statbuf;
862168404Spjd	int l;
863168404Spjd	vdev_label_t *label;
864185029Spjd	uint64_t state, txg, size;
865168404Spjd
866168404Spjd	*config = NULL;
867168404Spjd
868168404Spjd	if (fstat64(fd, &statbuf) == -1)
869168404Spjd		return (0);
870185029Spjd	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
871168404Spjd
872168404Spjd	if ((label = malloc(sizeof (vdev_label_t))) == NULL)
873168404Spjd		return (-1);
874168404Spjd
875168404Spjd	for (l = 0; l < VDEV_LABELS; l++) {
876185029Spjd		if (pread64(fd, label, sizeof (vdev_label_t),
877185029Spjd		    label_offset(size, l)) != sizeof (vdev_label_t))
878168404Spjd			continue;
879168404Spjd
880168404Spjd		if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
881168404Spjd		    sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
882168404Spjd			continue;
883168404Spjd
884168404Spjd		if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
885185029Spjd		    &state) != 0 || state > POOL_STATE_L2CACHE) {
886168404Spjd			nvlist_free(*config);
887168404Spjd			continue;
888168404Spjd		}
889168404Spjd
890185029Spjd		if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
891168404Spjd		    (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
892168404Spjd		    &txg) != 0 || txg == 0)) {
893168404Spjd			nvlist_free(*config);
894168404Spjd			continue;
895168404Spjd		}
896168404Spjd
897168404Spjd		free(label);
898168404Spjd		return (0);
899168404Spjd	}
900168404Spjd
901168404Spjd	free(label);
902168404Spjd	*config = NULL;
903168404Spjd	return (0);
904168404Spjd}
905168404Spjd
906219089Spjdtypedef struct rdsk_node {
907219089Spjd	char *rn_name;
908219089Spjd	int rn_dfd;
909219089Spjd	libzfs_handle_t *rn_hdl;
910219089Spjd	nvlist_t *rn_config;
911219089Spjd	avl_tree_t *rn_avl;
912219089Spjd	avl_node_t rn_node;
913219089Spjd	boolean_t rn_nozpool;
914219089Spjd} rdsk_node_t;
915219089Spjd
916185029Spjdstatic int
917219089Spjdslice_cache_compare(const void *arg1, const void *arg2)
918168404Spjd{
919219089Spjd	const char  *nm1 = ((rdsk_node_t *)arg1)->rn_name;
920219089Spjd	const char  *nm2 = ((rdsk_node_t *)arg2)->rn_name;
921219089Spjd	char *nm1slice, *nm2slice;
922219089Spjd	int rv;
923219089Spjd
924219089Spjd	/*
925219089Spjd	 * slices zero and two are the most likely to provide results,
926219089Spjd	 * so put those first
927219089Spjd	 */
928219089Spjd	nm1slice = strstr(nm1, "s0");
929219089Spjd	nm2slice = strstr(nm2, "s0");
930219089Spjd	if (nm1slice && !nm2slice) {
931219089Spjd		return (-1);
932219089Spjd	}
933219089Spjd	if (!nm1slice && nm2slice) {
934219089Spjd		return (1);
935219089Spjd	}
936219089Spjd	nm1slice = strstr(nm1, "s2");
937219089Spjd	nm2slice = strstr(nm2, "s2");
938219089Spjd	if (nm1slice && !nm2slice) {
939219089Spjd		return (-1);
940219089Spjd	}
941219089Spjd	if (!nm1slice && nm2slice) {
942219089Spjd		return (1);
943219089Spjd	}
944219089Spjd
945219089Spjd	rv = strcmp(nm1, nm2);
946219089Spjd	if (rv == 0)
947219089Spjd		return (0);
948219089Spjd	return (rv > 0 ? 1 : -1);
949219089Spjd}
950219089Spjd
951219089Spjd#ifdef sun
952219089Spjdstatic void
953219089Spjdcheck_one_slice(avl_tree_t *r, char *diskname, uint_t partno,
954219089Spjd    diskaddr_t size, uint_t blksz)
955219089Spjd{
956219089Spjd	rdsk_node_t tmpnode;
957219089Spjd	rdsk_node_t *node;
958219089Spjd	char sname[MAXNAMELEN];
959219089Spjd
960219089Spjd	tmpnode.rn_name = &sname[0];
961219089Spjd	(void) snprintf(tmpnode.rn_name, MAXNAMELEN, "%s%u",
962219089Spjd	    diskname, partno);
963219089Spjd	/*
964219089Spjd	 * protect against division by zero for disk labels that
965219089Spjd	 * contain a bogus sector size
966219089Spjd	 */
967219089Spjd	if (blksz == 0)
968219089Spjd		blksz = DEV_BSIZE;
969219089Spjd	/* too small to contain a zpool? */
970219089Spjd	if ((size < (SPA_MINDEVSIZE / blksz)) &&
971219089Spjd	    (node = avl_find(r, &tmpnode, NULL)))
972219089Spjd		node->rn_nozpool = B_TRUE;
973219089Spjd}
974219089Spjd#endif	/* sun */
975219089Spjd
976219089Spjdstatic void
977219089Spjdnozpool_all_slices(avl_tree_t *r, const char *sname)
978219089Spjd{
979219089Spjd#ifdef sun
980219089Spjd	char diskname[MAXNAMELEN];
981219089Spjd	char *ptr;
982219089Spjd	int i;
983219089Spjd
984219089Spjd	(void) strncpy(diskname, sname, MAXNAMELEN);
985219089Spjd	if (((ptr = strrchr(diskname, 's')) == NULL) &&
986219089Spjd	    ((ptr = strrchr(diskname, 'p')) == NULL))
987219089Spjd		return;
988219089Spjd	ptr[0] = 's';
989219089Spjd	ptr[1] = '\0';
990219089Spjd	for (i = 0; i < NDKMAP; i++)
991219089Spjd		check_one_slice(r, diskname, i, 0, 1);
992219089Spjd	ptr[0] = 'p';
993219089Spjd	for (i = 0; i <= FD_NUMPART; i++)
994219089Spjd		check_one_slice(r, diskname, i, 0, 1);
995219089Spjd#endif	/* sun */
996219089Spjd}
997219089Spjd
998219089Spjdstatic void
999219089Spjdcheck_slices(avl_tree_t *r, int fd, const char *sname)
1000219089Spjd{
1001219089Spjd#ifdef sun
1002219089Spjd	struct extvtoc vtoc;
1003219089Spjd	struct dk_gpt *gpt;
1004219089Spjd	char diskname[MAXNAMELEN];
1005219089Spjd	char *ptr;
1006219089Spjd	int i;
1007219089Spjd
1008219089Spjd	(void) strncpy(diskname, sname, MAXNAMELEN);
1009219089Spjd	if ((ptr = strrchr(diskname, 's')) == NULL || !isdigit(ptr[1]))
1010219089Spjd		return;
1011219089Spjd	ptr[1] = '\0';
1012219089Spjd
1013219089Spjd	if (read_extvtoc(fd, &vtoc) >= 0) {
1014219089Spjd		for (i = 0; i < NDKMAP; i++)
1015219089Spjd			check_one_slice(r, diskname, i,
1016219089Spjd			    vtoc.v_part[i].p_size, vtoc.v_sectorsz);
1017219089Spjd	} else if (efi_alloc_and_read(fd, &gpt) >= 0) {
1018219089Spjd		/*
1019219089Spjd		 * on x86 we'll still have leftover links that point
1020219089Spjd		 * to slices s[9-15], so use NDKMAP instead
1021219089Spjd		 */
1022219089Spjd		for (i = 0; i < NDKMAP; i++)
1023219089Spjd			check_one_slice(r, diskname, i,
1024219089Spjd			    gpt->efi_parts[i].p_size, gpt->efi_lbasize);
1025219089Spjd		/* nodes p[1-4] are never used with EFI labels */
1026219089Spjd		ptr[0] = 'p';
1027219089Spjd		for (i = 1; i <= FD_NUMPART; i++)
1028219089Spjd			check_one_slice(r, diskname, i, 0, 1);
1029219089Spjd		efi_free(gpt);
1030219089Spjd	}
1031219089Spjd#endif	/* sun */
1032219089Spjd}
1033219089Spjd
1034219089Spjdstatic void
1035219089Spjdzpool_open_func(void *arg)
1036219089Spjd{
1037219089Spjd	rdsk_node_t *rn = arg;
1038219089Spjd	struct stat64 statbuf;
1039185029Spjd	nvlist_t *config;
1040219089Spjd	int fd;
1041168404Spjd
1042219089Spjd	if (rn->rn_nozpool)
1043219089Spjd		return;
1044219089Spjd	if ((fd = openat64(rn->rn_dfd, rn->rn_name, O_RDONLY)) < 0) {
1045219089Spjd		/* symlink to a device that's no longer there */
1046219089Spjd		if (errno == ENOENT)
1047219089Spjd			nozpool_all_slices(rn->rn_avl, rn->rn_name);
1048219089Spjd		return;
1049219089Spjd	}
1050168404Spjd	/*
1051219089Spjd	 * Ignore failed stats.  We only want regular
1052219089Spjd	 * files, character devs and block devs.
1053168404Spjd	 */
1054219089Spjd	if (fstat64(fd, &statbuf) != 0 ||
1055219089Spjd	    (!S_ISREG(statbuf.st_mode) &&
1056219089Spjd	    !S_ISCHR(statbuf.st_mode) &&
1057219089Spjd	    !S_ISBLK(statbuf.st_mode))) {
1058219089Spjd		(void) close(fd);
1059219089Spjd		return;
1060219089Spjd	}
1061219089Spjd	/* this file is too small to hold a zpool */
1062219089Spjd	if (S_ISREG(statbuf.st_mode) &&
1063219089Spjd	    statbuf.st_size < SPA_MINDEVSIZE) {
1064219089Spjd		(void) close(fd);
1065219089Spjd		return;
1066219089Spjd	} else if (!S_ISREG(statbuf.st_mode)) {
1067219089Spjd		/*
1068219089Spjd		 * Try to read the disk label first so we don't have to
1069219089Spjd		 * open a bunch of minor nodes that can't have a zpool.
1070219089Spjd		 */
1071219089Spjd		check_slices(rn->rn_avl, fd, rn->rn_name);
1072219089Spjd	}
1073168404Spjd
1074219089Spjd	if ((zpool_read_label(fd, &config)) != 0) {
1075219089Spjd		(void) close(fd);
1076219089Spjd		(void) no_memory(rn->rn_hdl);
1077219089Spjd		return;
1078219089Spjd	}
1079219089Spjd	(void) close(fd);
1080168404Spjd
1081168404Spjd
1082219089Spjd	rn->rn_config = config;
1083219089Spjd	if (config != NULL) {
1084219089Spjd		assert(rn->rn_nozpool == B_FALSE);
1085219089Spjd	}
1086219089Spjd}
1087168404Spjd
1088219089Spjd/*
1089219089Spjd * Given a file descriptor, clear (zero) the label information.  This function
1090224171Sgibbs * is used in the appliance stack as part of the ZFS sysevent module and
1091224171Sgibbs * to implement the "zpool labelclear" command.
1092219089Spjd */
1093219089Spjdint
1094219089Spjdzpool_clear_label(int fd)
1095219089Spjd{
1096219089Spjd	struct stat64 statbuf;
1097219089Spjd	int l;
1098219089Spjd	vdev_label_t *label;
1099219089Spjd	uint64_t size;
1100168404Spjd
1101219089Spjd	if (fstat64(fd, &statbuf) == -1)
1102219089Spjd		return (0);
1103219089Spjd	size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
1104168404Spjd
1105219089Spjd	if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
1106219089Spjd		return (-1);
1107168404Spjd
1108219089Spjd	for (l = 0; l < VDEV_LABELS; l++) {
1109219089Spjd		if (pwrite64(fd, label, sizeof (vdev_label_t),
1110219089Spjd		    label_offset(size, l)) != sizeof (vdev_label_t))
1111219089Spjd			return (-1);
1112185029Spjd	}
1113219089Spjd
1114219089Spjd	free(label);
1115219089Spjd	return (0);
1116185029Spjd}
1117185029Spjd
1118185029Spjd/*
1119185029Spjd * Given a list of directories to search, find all pools stored on disk.  This
1120185029Spjd * includes partial pools which are not available to import.  If no args are
1121185029Spjd * given (argc is 0), then the default directory (/dev/dsk) is searched.
1122185029Spjd * poolname or guid (but not both) are provided by the caller when trying
1123185029Spjd * to import a specific pool.
1124185029Spjd */
1125185029Spjdstatic nvlist_t *
1126219089Spjdzpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
1127185029Spjd{
1128219089Spjd	int i, dirs = iarg->paths;
1129185029Spjd	DIR *dirp = NULL;
1130185029Spjd	struct dirent64 *dp;
1131185029Spjd	char path[MAXPATHLEN];
1132219089Spjd	char *end, **dir = iarg->path;
1133185029Spjd	size_t pathleft;
1134219089Spjd	nvlist_t *ret = NULL;
1135235479Savg	static char *default_dir = "/dev";
1136185029Spjd	pool_list_t pools = { 0 };
1137185029Spjd	pool_entry_t *pe, *penext;
1138185029Spjd	vdev_entry_t *ve, *venext;
1139185029Spjd	config_entry_t *ce, *cenext;
1140185029Spjd	name_entry_t *ne, *nenext;
1141219089Spjd	avl_tree_t slice_cache;
1142219089Spjd	rdsk_node_t *slice;
1143219089Spjd	void *cookie;
1144185029Spjd
1145219089Spjd	if (dirs == 0) {
1146219089Spjd		dirs = 1;
1147219089Spjd		dir = &default_dir;
1148185029Spjd	}
1149185029Spjd
1150185029Spjd	/*
1151185029Spjd	 * Go through and read the label configuration information from every
1152185029Spjd	 * possible device, organizing the information according to pool GUID
1153185029Spjd	 * and toplevel GUID.
1154185029Spjd	 */
1155219089Spjd	for (i = 0; i < dirs; i++) {
1156219089Spjd		tpool_t *t;
1157185029Spjd		char *rdsk;
1158185029Spjd		int dfd;
1159185029Spjd
1160185029Spjd		/* use realpath to normalize the path */
1161219089Spjd		if (realpath(dir[i], path) == 0) {
1162185029Spjd			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
1163219089Spjd			    dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
1164185029Spjd			goto error;
1165185029Spjd		}
1166185029Spjd		end = &path[strlen(path)];
1167185029Spjd		*end++ = '/';
1168185029Spjd		*end = 0;
1169185029Spjd		pathleft = &path[sizeof (path)] - end;
1170185029Spjd
1171185029Spjd		/*
1172185029Spjd		 * Using raw devices instead of block devices when we're
1173185029Spjd		 * reading the labels skips a bunch of slow operations during
1174185029Spjd		 * close(2) processing, so we replace /dev/dsk with /dev/rdsk.
1175185029Spjd		 */
1176185029Spjd		if (strcmp(path, "/dev/dsk/") == 0)
1177219089Spjd			rdsk = "/dev/";
1178185029Spjd		else
1179185029Spjd			rdsk = path;
1180185029Spjd
1181219089Spjd		if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
1182219089Spjd		    (dirp = fdopendir(dfd)) == NULL) {
1183185029Spjd			zfs_error_aux(hdl, strerror(errno));
1184185029Spjd			(void) zfs_error_fmt(hdl, EZFS_BADPATH,
1185185029Spjd			    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1186185029Spjd			    rdsk);
1187185029Spjd			goto error;
1188185029Spjd		}
1189185029Spjd
1190219089Spjd		avl_create(&slice_cache, slice_cache_compare,
1191219089Spjd		    sizeof (rdsk_node_t), offsetof(rdsk_node_t, rn_node));
1192219089Spjd
1193219089Spjd		if (strcmp(rdsk, "/dev/") == 0) {
1194219089Spjd			struct gmesh mesh;
1195219089Spjd			struct gclass *mp;
1196219089Spjd			struct ggeom *gp;
1197219089Spjd			struct gprovider *pp;
1198219089Spjd
1199219089Spjd			errno = geom_gettree(&mesh);
1200219089Spjd			if (errno != 0) {
1201219089Spjd				zfs_error_aux(hdl, strerror(errno));
1202219089Spjd				(void) zfs_error_fmt(hdl, EZFS_BADPATH,
1203219089Spjd				    dgettext(TEXT_DOMAIN, "cannot get GEOM tree"));
1204219089Spjd				goto error;
1205219089Spjd			}
1206219089Spjd
1207219089Spjd			LIST_FOREACH(mp, &mesh.lg_class, lg_class) {
1208219089Spjd		        	LIST_FOREACH(gp, &mp->lg_geom, lg_geom) {
1209219089Spjd					LIST_FOREACH(pp, &gp->lg_provider, lg_provider) {
1210219089Spjd						slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1211219089Spjd						slice->rn_name = zfs_strdup(hdl, pp->lg_name);
1212219089Spjd						slice->rn_avl = &slice_cache;
1213219089Spjd						slice->rn_dfd = dfd;
1214219089Spjd						slice->rn_hdl = hdl;
1215219089Spjd						slice->rn_nozpool = B_FALSE;
1216219089Spjd						avl_add(&slice_cache, slice);
1217219089Spjd					}
1218219089Spjd				}
1219219089Spjd			}
1220219089Spjd
1221219089Spjd			geom_deletetree(&mesh);
1222219089Spjd			goto skipdir;
1223219089Spjd		}
1224219089Spjd
1225185029Spjd		/*
1226185029Spjd		 * This is not MT-safe, but we have no MT consumers of libzfs
1227185029Spjd		 */
1228185029Spjd		while ((dp = readdir64(dirp)) != NULL) {
1229185029Spjd			const char *name = dp->d_name;
1230185029Spjd			if (name[0] == '.' &&
1231185029Spjd			    (name[1] == 0 || (name[1] == '.' && name[2] == 0)))
1232185029Spjd				continue;
1233185029Spjd
1234219089Spjd			slice = zfs_alloc(hdl, sizeof (rdsk_node_t));
1235219089Spjd			slice->rn_name = zfs_strdup(hdl, name);
1236219089Spjd			slice->rn_avl = &slice_cache;
1237219089Spjd			slice->rn_dfd = dfd;
1238219089Spjd			slice->rn_hdl = hdl;
1239219089Spjd			slice->rn_nozpool = B_FALSE;
1240219089Spjd			avl_add(&slice_cache, slice);
1241219089Spjd		}
1242219089Spjdskipdir:
1243219089Spjd		/*
1244219089Spjd		 * create a thread pool to do all of this in parallel;
1245219089Spjd		 * rn_nozpool is not protected, so this is racy in that
1246219089Spjd		 * multiple tasks could decide that the same slice can
1247219089Spjd		 * not hold a zpool, which is benign.  Also choose
1248219089Spjd		 * double the number of processors; we hold a lot of
1249219089Spjd		 * locks in the kernel, so going beyond this doesn't
1250219089Spjd		 * buy us much.
1251219089Spjd		 */
1252219089Spjd		t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN),
1253219089Spjd		    0, NULL);
1254219089Spjd		for (slice = avl_first(&slice_cache); slice;
1255219089Spjd		    (slice = avl_walk(&slice_cache, slice,
1256219089Spjd		    AVL_AFTER)))
1257219089Spjd			(void) tpool_dispatch(t, zpool_open_func, slice);
1258219089Spjd		tpool_wait(t);
1259219089Spjd		tpool_destroy(t);
1260185029Spjd
1261219089Spjd		cookie = NULL;
1262219089Spjd		while ((slice = avl_destroy_nodes(&slice_cache,
1263219089Spjd		    &cookie)) != NULL) {
1264219089Spjd			if (slice->rn_config != NULL) {
1265219089Spjd				nvlist_t *config = slice->rn_config;
1266185029Spjd				boolean_t matched = B_TRUE;
1267185029Spjd
1268219089Spjd				if (iarg->poolname != NULL) {
1269185029Spjd					char *pname;
1270185029Spjd
1271185029Spjd					matched = nvlist_lookup_string(config,
1272185029Spjd					    ZPOOL_CONFIG_POOL_NAME,
1273185029Spjd					    &pname) == 0 &&
1274219089Spjd					    strcmp(iarg->poolname, pname) == 0;
1275219089Spjd				} else if (iarg->guid != 0) {
1276185029Spjd					uint64_t this_guid;
1277185029Spjd
1278185029Spjd					matched = nvlist_lookup_uint64(config,
1279185029Spjd					    ZPOOL_CONFIG_POOL_GUID,
1280185029Spjd					    &this_guid) == 0 &&
1281219089Spjd					    iarg->guid == this_guid;
1282185029Spjd				}
1283185029Spjd				if (!matched) {
1284185029Spjd					nvlist_free(config);
1285185029Spjd					config = NULL;
1286185029Spjd					continue;
1287185029Spjd				}
1288185029Spjd				/* use the non-raw path for the config */
1289219089Spjd				(void) strlcpy(end, slice->rn_name, pathleft);
1290168404Spjd				if (add_config(hdl, &pools, path, config) != 0)
1291168404Spjd					goto error;
1292168404Spjd			}
1293219089Spjd			free(slice->rn_name);
1294219089Spjd			free(slice);
1295168404Spjd		}
1296219089Spjd		avl_destroy(&slice_cache);
1297185029Spjd
1298185029Spjd		(void) closedir(dirp);
1299185029Spjd		dirp = NULL;
1300168404Spjd	}
1301168404Spjd
1302219089Spjd	ret = get_configs(hdl, &pools, iarg->can_be_active);
1303168404Spjd
1304168404Spjderror:
1305168404Spjd	for (pe = pools.pools; pe != NULL; pe = penext) {
1306168404Spjd		penext = pe->pe_next;
1307168404Spjd		for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
1308168404Spjd			venext = ve->ve_next;
1309168404Spjd			for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
1310168404Spjd				cenext = ce->ce_next;
1311168404Spjd				if (ce->ce_config)
1312168404Spjd					nvlist_free(ce->ce_config);
1313168404Spjd				free(ce);
1314168404Spjd			}
1315168404Spjd			free(ve);
1316168404Spjd		}
1317168404Spjd		free(pe);
1318168404Spjd	}
1319168404Spjd
1320168404Spjd	for (ne = pools.names; ne != NULL; ne = nenext) {
1321168404Spjd		nenext = ne->ne_next;
1322168404Spjd		if (ne->ne_name)
1323168404Spjd			free(ne->ne_name);
1324168404Spjd		free(ne);
1325168404Spjd	}
1326168404Spjd
1327185029Spjd	if (dirp)
1328185029Spjd		(void) closedir(dirp);
1329185029Spjd
1330168404Spjd	return (ret);
1331168404Spjd}
1332168404Spjd
1333185029Spjdnvlist_t *
1334185029Spjdzpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
1335185029Spjd{
1336219089Spjd	importargs_t iarg = { 0 };
1337185029Spjd
1338219089Spjd	iarg.paths = argc;
1339219089Spjd	iarg.path = argv;
1340185029Spjd
1341219089Spjd	return (zpool_find_import_impl(hdl, &iarg));
1342185029Spjd}
1343185029Spjd
1344185029Spjd/*
1345185029Spjd * Given a cache file, return the contents as a list of importable pools.
1346185029Spjd * poolname or guid (but not both) are provided by the caller when trying
1347185029Spjd * to import a specific pool.
1348185029Spjd */
1349185029Spjdnvlist_t *
1350185029Spjdzpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
1351185029Spjd    char *poolname, uint64_t guid)
1352185029Spjd{
1353185029Spjd	char *buf;
1354185029Spjd	int fd;
1355185029Spjd	struct stat64 statbuf;
1356185029Spjd	nvlist_t *raw, *src, *dst;
1357185029Spjd	nvlist_t *pools;
1358185029Spjd	nvpair_t *elem;
1359185029Spjd	char *name;
1360185029Spjd	uint64_t this_guid;
1361185029Spjd	boolean_t active;
1362185029Spjd
1363185029Spjd	verify(poolname == NULL || guid == 0);
1364185029Spjd
1365185029Spjd	if ((fd = open(cachefile, O_RDONLY)) < 0) {
1366185029Spjd		zfs_error_aux(hdl, "%s", strerror(errno));
1367185029Spjd		(void) zfs_error(hdl, EZFS_BADCACHE,
1368185029Spjd		    dgettext(TEXT_DOMAIN, "failed to open cache file"));
1369185029Spjd		return (NULL);
1370185029Spjd	}
1371185029Spjd
1372185029Spjd	if (fstat64(fd, &statbuf) != 0) {
1373185029Spjd		zfs_error_aux(hdl, "%s", strerror(errno));
1374185029Spjd		(void) close(fd);
1375185029Spjd		(void) zfs_error(hdl, EZFS_BADCACHE,
1376185029Spjd		    dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
1377185029Spjd		return (NULL);
1378185029Spjd	}
1379185029Spjd
1380185029Spjd	if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
1381185029Spjd		(void) close(fd);
1382185029Spjd		return (NULL);
1383185029Spjd	}
1384185029Spjd
1385185029Spjd	if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
1386185029Spjd		(void) close(fd);
1387185029Spjd		free(buf);
1388185029Spjd		(void) zfs_error(hdl, EZFS_BADCACHE,
1389185029Spjd		    dgettext(TEXT_DOMAIN,
1390185029Spjd		    "failed to read cache file contents"));
1391185029Spjd		return (NULL);
1392185029Spjd	}
1393185029Spjd
1394185029Spjd	(void) close(fd);
1395185029Spjd
1396185029Spjd	if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
1397185029Spjd		free(buf);
1398185029Spjd		(void) zfs_error(hdl, EZFS_BADCACHE,
1399185029Spjd		    dgettext(TEXT_DOMAIN,
1400185029Spjd		    "invalid or corrupt cache file contents"));
1401185029Spjd		return (NULL);
1402185029Spjd	}
1403185029Spjd
1404185029Spjd	free(buf);
1405185029Spjd
1406185029Spjd	/*
1407185029Spjd	 * Go through and get the current state of the pools and refresh their
1408185029Spjd	 * state.
1409185029Spjd	 */
1410185029Spjd	if (nvlist_alloc(&pools, 0, 0) != 0) {
1411185029Spjd		(void) no_memory(hdl);
1412185029Spjd		nvlist_free(raw);
1413185029Spjd		return (NULL);
1414185029Spjd	}
1415185029Spjd
1416185029Spjd	elem = NULL;
1417185029Spjd	while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
1418185029Spjd		verify(nvpair_value_nvlist(elem, &src) == 0);
1419185029Spjd
1420185029Spjd		verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
1421185029Spjd		    &name) == 0);
1422185029Spjd		if (poolname != NULL && strcmp(poolname, name) != 0)
1423185029Spjd			continue;
1424185029Spjd
1425185029Spjd		verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1426185029Spjd		    &this_guid) == 0);
1427185029Spjd		if (guid != 0) {
1428185029Spjd			verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
1429185029Spjd			    &this_guid) == 0);
1430185029Spjd			if (guid != this_guid)
1431185029Spjd				continue;
1432185029Spjd		}
1433185029Spjd
1434185029Spjd		if (pool_active(hdl, name, this_guid, &active) != 0) {
1435185029Spjd			nvlist_free(raw);
1436185029Spjd			nvlist_free(pools);
1437185029Spjd			return (NULL);
1438185029Spjd		}
1439185029Spjd
1440185029Spjd		if (active)
1441185029Spjd			continue;
1442185029Spjd
1443185029Spjd		if ((dst = refresh_config(hdl, src)) == NULL) {
1444185029Spjd			nvlist_free(raw);
1445185029Spjd			nvlist_free(pools);
1446185029Spjd			return (NULL);
1447185029Spjd		}
1448185029Spjd
1449185029Spjd		if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
1450185029Spjd			(void) no_memory(hdl);
1451185029Spjd			nvlist_free(dst);
1452185029Spjd			nvlist_free(raw);
1453185029Spjd			nvlist_free(pools);
1454185029Spjd			return (NULL);
1455185029Spjd		}
1456185029Spjd		nvlist_free(dst);
1457185029Spjd	}
1458185029Spjd
1459185029Spjd	nvlist_free(raw);
1460185029Spjd	return (pools);
1461185029Spjd}
1462185029Spjd
1463219089Spjdstatic int
1464219089Spjdname_or_guid_exists(zpool_handle_t *zhp, void *data)
1465219089Spjd{
1466219089Spjd	importargs_t *import = data;
1467219089Spjd	int found = 0;
1468185029Spjd
1469219089Spjd	if (import->poolname != NULL) {
1470219089Spjd		char *pool_name;
1471219089Spjd
1472219089Spjd		verify(nvlist_lookup_string(zhp->zpool_config,
1473219089Spjd		    ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
1474219089Spjd		if (strcmp(pool_name, import->poolname) == 0)
1475219089Spjd			found = 1;
1476219089Spjd	} else {
1477219089Spjd		uint64_t pool_guid;
1478219089Spjd
1479219089Spjd		verify(nvlist_lookup_uint64(zhp->zpool_config,
1480219089Spjd		    ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
1481219089Spjd		if (pool_guid == import->guid)
1482219089Spjd			found = 1;
1483219089Spjd	}
1484219089Spjd
1485219089Spjd	zpool_close(zhp);
1486219089Spjd	return (found);
1487219089Spjd}
1488219089Spjd
1489219089Spjdnvlist_t *
1490219089Spjdzpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
1491219089Spjd{
1492219089Spjd	verify(import->poolname == NULL || import->guid == 0);
1493219089Spjd
1494219089Spjd	if (import->unique)
1495219089Spjd		import->exists = zpool_iter(hdl, name_or_guid_exists, import);
1496219089Spjd
1497219089Spjd	if (import->cachefile != NULL)
1498219089Spjd		return (zpool_find_import_cached(hdl, import->cachefile,
1499219089Spjd		    import->poolname, import->guid));
1500219089Spjd
1501219089Spjd	return (zpool_find_import_impl(hdl, import));
1502219089Spjd}
1503219089Spjd
1504168404Spjdboolean_t
1505168404Spjdfind_guid(nvlist_t *nv, uint64_t guid)
1506168404Spjd{
1507168404Spjd	uint64_t tmp;
1508168404Spjd	nvlist_t **child;
1509168404Spjd	uint_t c, children;
1510168404Spjd
1511168404Spjd	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
1512168404Spjd	if (tmp == guid)
1513168404Spjd		return (B_TRUE);
1514168404Spjd
1515168404Spjd	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1516168404Spjd	    &child, &children) == 0) {
1517168404Spjd		for (c = 0; c < children; c++)
1518168404Spjd			if (find_guid(child[c], guid))
1519168404Spjd				return (B_TRUE);
1520168404Spjd	}
1521168404Spjd
1522168404Spjd	return (B_FALSE);
1523168404Spjd}
1524168404Spjd
1525185029Spjdtypedef struct aux_cbdata {
1526185029Spjd	const char	*cb_type;
1527168404Spjd	uint64_t	cb_guid;
1528168404Spjd	zpool_handle_t	*cb_zhp;
1529185029Spjd} aux_cbdata_t;
1530168404Spjd
1531168404Spjdstatic int
1532185029Spjdfind_aux(zpool_handle_t *zhp, void *data)
1533168404Spjd{
1534185029Spjd	aux_cbdata_t *cbp = data;
1535185029Spjd	nvlist_t **list;
1536185029Spjd	uint_t i, count;
1537168404Spjd	uint64_t guid;
1538168404Spjd	nvlist_t *nvroot;
1539168404Spjd
1540168404Spjd	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
1541168404Spjd	    &nvroot) == 0);
1542168404Spjd
1543185029Spjd	if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
1544185029Spjd	    &list, &count) == 0) {
1545185029Spjd		for (i = 0; i < count; i++) {
1546185029Spjd			verify(nvlist_lookup_uint64(list[i],
1547168404Spjd			    ZPOOL_CONFIG_GUID, &guid) == 0);
1548168404Spjd			if (guid == cbp->cb_guid) {
1549168404Spjd				cbp->cb_zhp = zhp;
1550168404Spjd				return (1);
1551168404Spjd			}
1552168404Spjd		}
1553168404Spjd	}
1554168404Spjd
1555168404Spjd	zpool_close(zhp);
1556168404Spjd	return (0);
1557168404Spjd}
1558168404Spjd
1559168404Spjd/*
1560168404Spjd * Determines if the pool is in use.  If so, it returns true and the state of
1561168404Spjd * the pool as well as the name of the pool.  Both strings are allocated and
1562168404Spjd * must be freed by the caller.
1563168404Spjd */
1564168404Spjdint
1565168404Spjdzpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
1566168404Spjd    boolean_t *inuse)
1567168404Spjd{
1568168404Spjd	nvlist_t *config;
1569168404Spjd	char *name;
1570168404Spjd	boolean_t ret;
1571168404Spjd	uint64_t guid, vdev_guid;
1572168404Spjd	zpool_handle_t *zhp;
1573168404Spjd	nvlist_t *pool_config;
1574168404Spjd	uint64_t stateval, isspare;
1575185029Spjd	aux_cbdata_t cb = { 0 };
1576168404Spjd	boolean_t isactive;
1577168404Spjd
1578168404Spjd	*inuse = B_FALSE;
1579168404Spjd
1580168404Spjd	if (zpool_read_label(fd, &config) != 0) {
1581168404Spjd		(void) no_memory(hdl);
1582168404Spjd		return (-1);
1583168404Spjd	}
1584168404Spjd
1585168404Spjd	if (config == NULL)
1586168404Spjd		return (0);
1587168404Spjd
1588168404Spjd	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
1589168404Spjd	    &stateval) == 0);
1590168404Spjd	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
1591168404Spjd	    &vdev_guid) == 0);
1592168404Spjd
1593185029Spjd	if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
1594168404Spjd		verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1595168404Spjd		    &name) == 0);
1596168404Spjd		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1597168404Spjd		    &guid) == 0);
1598168404Spjd	}
1599168404Spjd
1600168404Spjd	switch (stateval) {
1601168404Spjd	case POOL_STATE_EXPORTED:
1602219089Spjd		/*
1603219089Spjd		 * A pool with an exported state may in fact be imported
1604219089Spjd		 * read-only, so check the in-core state to see if it's
1605219089Spjd		 * active and imported read-only.  If it is, set
1606219089Spjd		 * its state to active.
1607219089Spjd		 */
1608219089Spjd		if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
1609219089Spjd		    (zhp = zpool_open_canfail(hdl, name)) != NULL &&
1610219089Spjd		    zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
1611219089Spjd			stateval = POOL_STATE_ACTIVE;
1612219089Spjd
1613168404Spjd		ret = B_TRUE;
1614168404Spjd		break;
1615168404Spjd
1616168404Spjd	case POOL_STATE_ACTIVE:
1617168404Spjd		/*
1618168404Spjd		 * For an active pool, we have to determine if it's really part
1619168404Spjd		 * of a currently active pool (in which case the pool will exist
1620168404Spjd		 * and the guid will be the same), or whether it's part of an
1621168404Spjd		 * active pool that was disconnected without being explicitly
1622168404Spjd		 * exported.
1623168404Spjd		 */
1624168404Spjd		if (pool_active(hdl, name, guid, &isactive) != 0) {
1625168404Spjd			nvlist_free(config);
1626168404Spjd			return (-1);
1627168404Spjd		}
1628168404Spjd
1629168404Spjd		if (isactive) {
1630168404Spjd			/*
1631168404Spjd			 * Because the device may have been removed while
1632168404Spjd			 * offlined, we only report it as active if the vdev is
1633168404Spjd			 * still present in the config.  Otherwise, pretend like
1634168404Spjd			 * it's not in use.
1635168404Spjd			 */
1636168404Spjd			if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
1637168404Spjd			    (pool_config = zpool_get_config(zhp, NULL))
1638168404Spjd			    != NULL) {
1639168404Spjd				nvlist_t *nvroot;
1640168404Spjd
1641168404Spjd				verify(nvlist_lookup_nvlist(pool_config,
1642168404Spjd				    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1643168404Spjd				ret = find_guid(nvroot, vdev_guid);
1644168404Spjd			} else {
1645168404Spjd				ret = B_FALSE;
1646168404Spjd			}
1647168404Spjd
1648168404Spjd			/*
1649168404Spjd			 * If this is an active spare within another pool, we
1650168404Spjd			 * treat it like an unused hot spare.  This allows the
1651168404Spjd			 * user to create a pool with a hot spare that currently
1652168404Spjd			 * in use within another pool.  Since we return B_TRUE,
1653168404Spjd			 * libdiskmgt will continue to prevent generic consumers
1654168404Spjd			 * from using the device.
1655168404Spjd			 */
1656168404Spjd			if (ret && nvlist_lookup_uint64(config,
1657168404Spjd			    ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
1658168404Spjd				stateval = POOL_STATE_SPARE;
1659168404Spjd
1660168404Spjd			if (zhp != NULL)
1661168404Spjd				zpool_close(zhp);
1662168404Spjd		} else {
1663168404Spjd			stateval = POOL_STATE_POTENTIALLY_ACTIVE;
1664168404Spjd			ret = B_TRUE;
1665168404Spjd		}
1666168404Spjd		break;
1667168404Spjd
1668168404Spjd	case POOL_STATE_SPARE:
1669168404Spjd		/*
1670168404Spjd		 * For a hot spare, it can be either definitively in use, or
1671168404Spjd		 * potentially active.  To determine if it's in use, we iterate
1672168404Spjd		 * over all pools in the system and search for one with a spare
1673168404Spjd		 * with a matching guid.
1674168404Spjd		 *
1675168404Spjd		 * Due to the shared nature of spares, we don't actually report
1676168404Spjd		 * the potentially active case as in use.  This means the user
1677168404Spjd		 * can freely create pools on the hot spares of exported pools,
1678168404Spjd		 * but to do otherwise makes the resulting code complicated, and
1679168404Spjd		 * we end up having to deal with this case anyway.
1680168404Spjd		 */
1681168404Spjd		cb.cb_zhp = NULL;
1682168404Spjd		cb.cb_guid = vdev_guid;
1683185029Spjd		cb.cb_type = ZPOOL_CONFIG_SPARES;
1684185029Spjd		if (zpool_iter(hdl, find_aux, &cb) == 1) {
1685168404Spjd			name = (char *)zpool_get_name(cb.cb_zhp);
1686168404Spjd			ret = TRUE;
1687168404Spjd		} else {
1688168404Spjd			ret = FALSE;
1689168404Spjd		}
1690168404Spjd		break;
1691168404Spjd
1692185029Spjd	case POOL_STATE_L2CACHE:
1693185029Spjd
1694185029Spjd		/*
1695185029Spjd		 * Check if any pool is currently using this l2cache device.
1696185029Spjd		 */
1697185029Spjd		cb.cb_zhp = NULL;
1698185029Spjd		cb.cb_guid = vdev_guid;
1699185029Spjd		cb.cb_type = ZPOOL_CONFIG_L2CACHE;
1700185029Spjd		if (zpool_iter(hdl, find_aux, &cb) == 1) {
1701185029Spjd			name = (char *)zpool_get_name(cb.cb_zhp);
1702185029Spjd			ret = TRUE;
1703185029Spjd		} else {
1704185029Spjd			ret = FALSE;
1705185029Spjd		}
1706185029Spjd		break;
1707185029Spjd
1708168404Spjd	default:
1709168404Spjd		ret = B_FALSE;
1710168404Spjd	}
1711168404Spjd
1712168404Spjd
1713168404Spjd	if (ret) {
1714168404Spjd		if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
1715185029Spjd			if (cb.cb_zhp)
1716185029Spjd				zpool_close(cb.cb_zhp);
1717168404Spjd			nvlist_free(config);
1718168404Spjd			return (-1);
1719168404Spjd		}
1720168404Spjd		*state = (pool_state_t)stateval;
1721168404Spjd	}
1722168404Spjd
1723168404Spjd	if (cb.cb_zhp)
1724168404Spjd		zpool_close(cb.cb_zhp);
1725168404Spjd
1726168404Spjd	nvlist_free(config);
1727168404Spjd	*inuse = ret;
1728168404Spjd	return (0);
1729168404Spjd}
1730