libzfs_pool.c revision 277905
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 */
28
29#include <sys/types.h>
30#include <sys/stat.h>
31#include <ctype.h>
32#include <errno.h>
33#include <devid.h>
34#include <fcntl.h>
35#include <libintl.h>
36#include <stdio.h>
37#include <stdlib.h>
38#include <strings.h>
39#include <unistd.h>
40#include <libgen.h>
41#include <sys/zfs_ioctl.h>
42#include <dlfcn.h>
43
44#include "zfs_namecheck.h"
45#include "zfs_prop.h"
46#include "libzfs_impl.h"
47#include "zfs_comutil.h"
48#include "zfeature_common.h"
49
50static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
51
52#define	DISK_ROOT	"/dev/dsk"
53#define	RDISK_ROOT	"/dev/rdsk"
54#define	BACKUP_SLICE	"s2"
55
56typedef struct prop_flags {
57	int create:1;	/* Validate property on creation */
58	int import:1;	/* Validate property on import */
59} prop_flags_t;
60
61/*
62 * ====================================================================
63 *   zpool property functions
64 * ====================================================================
65 */
66
67static int
68zpool_get_all_props(zpool_handle_t *zhp)
69{
70	zfs_cmd_t zc = { 0 };
71	libzfs_handle_t *hdl = zhp->zpool_hdl;
72
73	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
74
75	if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
76		return (-1);
77
78	while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
79		if (errno == ENOMEM) {
80			if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
81				zcmd_free_nvlists(&zc);
82				return (-1);
83			}
84		} else {
85			zcmd_free_nvlists(&zc);
86			return (-1);
87		}
88	}
89
90	if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
91		zcmd_free_nvlists(&zc);
92		return (-1);
93	}
94
95	zcmd_free_nvlists(&zc);
96
97	return (0);
98}
99
100static int
101zpool_props_refresh(zpool_handle_t *zhp)
102{
103	nvlist_t *old_props;
104
105	old_props = zhp->zpool_props;
106
107	if (zpool_get_all_props(zhp) != 0)
108		return (-1);
109
110	nvlist_free(old_props);
111	return (0);
112}
113
114static char *
115zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
116    zprop_source_t *src)
117{
118	nvlist_t *nv, *nvl;
119	uint64_t ival;
120	char *value;
121	zprop_source_t source;
122
123	nvl = zhp->zpool_props;
124	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
125		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
126		source = ival;
127		verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
128	} else {
129		source = ZPROP_SRC_DEFAULT;
130		if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
131			value = "-";
132	}
133
134	if (src)
135		*src = source;
136
137	return (value);
138}
139
140uint64_t
141zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
142{
143	nvlist_t *nv, *nvl;
144	uint64_t value;
145	zprop_source_t source;
146
147	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
148		/*
149		 * zpool_get_all_props() has most likely failed because
150		 * the pool is faulted, but if all we need is the top level
151		 * vdev's guid then get it from the zhp config nvlist.
152		 */
153		if ((prop == ZPOOL_PROP_GUID) &&
154		    (nvlist_lookup_nvlist(zhp->zpool_config,
155		    ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
156		    (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
157		    == 0)) {
158			return (value);
159		}
160		return (zpool_prop_default_numeric(prop));
161	}
162
163	nvl = zhp->zpool_props;
164	if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
165		verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
166		source = value;
167		verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
168	} else {
169		source = ZPROP_SRC_DEFAULT;
170		value = zpool_prop_default_numeric(prop);
171	}
172
173	if (src)
174		*src = source;
175
176	return (value);
177}
178
179/*
180 * Map VDEV STATE to printed strings.
181 */
182const char *
183zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
184{
185	switch (state) {
186	case VDEV_STATE_CLOSED:
187	case VDEV_STATE_OFFLINE:
188		return (gettext("OFFLINE"));
189	case VDEV_STATE_REMOVED:
190		return (gettext("REMOVED"));
191	case VDEV_STATE_CANT_OPEN:
192		if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
193			return (gettext("FAULTED"));
194		else if (aux == VDEV_AUX_SPLIT_POOL)
195			return (gettext("SPLIT"));
196		else
197			return (gettext("UNAVAIL"));
198	case VDEV_STATE_FAULTED:
199		return (gettext("FAULTED"));
200	case VDEV_STATE_DEGRADED:
201		return (gettext("DEGRADED"));
202	case VDEV_STATE_HEALTHY:
203		return (gettext("ONLINE"));
204	}
205
206	return (gettext("UNKNOWN"));
207}
208
209/*
210 * Map POOL STATE to printed strings.
211 */
212const char *
213zpool_pool_state_to_name(pool_state_t state)
214{
215	switch (state) {
216	case POOL_STATE_ACTIVE:
217		return (gettext("ACTIVE"));
218	case POOL_STATE_EXPORTED:
219		return (gettext("EXPORTED"));
220	case POOL_STATE_DESTROYED:
221		return (gettext("DESTROYED"));
222	case POOL_STATE_SPARE:
223		return (gettext("SPARE"));
224	case POOL_STATE_L2CACHE:
225		return (gettext("L2CACHE"));
226	case POOL_STATE_UNINITIALIZED:
227		return (gettext("UNINITIALIZED"));
228	case POOL_STATE_UNAVAIL:
229		return (gettext("UNAVAIL"));
230	case POOL_STATE_POTENTIALLY_ACTIVE:
231		return (gettext("POTENTIALLY_ACTIVE"));
232	}
233
234	return (gettext("UNKNOWN"));
235}
236
237/*
238 * Get a zpool property value for 'prop' and return the value in
239 * a pre-allocated buffer.
240 */
241int
242zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
243    zprop_source_t *srctype, boolean_t literal)
244{
245	uint64_t intval;
246	const char *strval;
247	zprop_source_t src = ZPROP_SRC_NONE;
248	nvlist_t *nvroot;
249	vdev_stat_t *vs;
250	uint_t vsc;
251
252	if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
253		switch (prop) {
254		case ZPOOL_PROP_NAME:
255			(void) strlcpy(buf, zpool_get_name(zhp), len);
256			break;
257
258		case ZPOOL_PROP_HEALTH:
259			(void) strlcpy(buf,
260			    zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len);
261			break;
262
263		case ZPOOL_PROP_GUID:
264			intval = zpool_get_prop_int(zhp, prop, &src);
265			(void) snprintf(buf, len, "%llu", intval);
266			break;
267
268		case ZPOOL_PROP_ALTROOT:
269		case ZPOOL_PROP_CACHEFILE:
270		case ZPOOL_PROP_COMMENT:
271			if (zhp->zpool_props != NULL ||
272			    zpool_get_all_props(zhp) == 0) {
273				(void) strlcpy(buf,
274				    zpool_get_prop_string(zhp, prop, &src),
275				    len);
276				break;
277			}
278			/* FALLTHROUGH */
279		default:
280			(void) strlcpy(buf, "-", len);
281			break;
282		}
283
284		if (srctype != NULL)
285			*srctype = src;
286		return (0);
287	}
288
289	if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
290	    prop != ZPOOL_PROP_NAME)
291		return (-1);
292
293	switch (zpool_prop_get_type(prop)) {
294	case PROP_TYPE_STRING:
295		(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
296		    len);
297		break;
298
299	case PROP_TYPE_NUMBER:
300		intval = zpool_get_prop_int(zhp, prop, &src);
301
302		switch (prop) {
303		case ZPOOL_PROP_SIZE:
304		case ZPOOL_PROP_ALLOCATED:
305		case ZPOOL_PROP_FREE:
306		case ZPOOL_PROP_FREEING:
307		case ZPOOL_PROP_LEAKED:
308			if (literal) {
309				(void) snprintf(buf, len, "%llu",
310				    (u_longlong_t)intval);
311			} else {
312				(void) zfs_nicenum(intval, buf, len);
313			}
314			break;
315		case ZPOOL_PROP_EXPANDSZ:
316			if (intval == 0) {
317				(void) strlcpy(buf, "-", len);
318			} else if (literal) {
319				(void) snprintf(buf, len, "%llu",
320				    (u_longlong_t)intval);
321			} else {
322				(void) zfs_nicenum(intval, buf, len);
323			}
324			break;
325		case ZPOOL_PROP_CAPACITY:
326			if (literal) {
327				(void) snprintf(buf, len, "%llu",
328				    (u_longlong_t)intval);
329			} else {
330				(void) snprintf(buf, len, "%llu%%",
331				    (u_longlong_t)intval);
332			}
333			break;
334		case ZPOOL_PROP_FRAGMENTATION:
335			if (intval == UINT64_MAX) {
336				(void) strlcpy(buf, "-", len);
337			} else {
338				(void) snprintf(buf, len, "%llu%%",
339				    (u_longlong_t)intval);
340			}
341			break;
342		case ZPOOL_PROP_DEDUPRATIO:
343			(void) snprintf(buf, len, "%llu.%02llux",
344			    (u_longlong_t)(intval / 100),
345			    (u_longlong_t)(intval % 100));
346			break;
347		case ZPOOL_PROP_HEALTH:
348			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
349			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
350			verify(nvlist_lookup_uint64_array(nvroot,
351			    ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
352			    == 0);
353
354			(void) strlcpy(buf, zpool_state_to_name(intval,
355			    vs->vs_aux), len);
356			break;
357		case ZPOOL_PROP_VERSION:
358			if (intval >= SPA_VERSION_FEATURES) {
359				(void) snprintf(buf, len, "-");
360				break;
361			}
362			/* FALLTHROUGH */
363		default:
364			(void) snprintf(buf, len, "%llu", intval);
365		}
366		break;
367
368	case PROP_TYPE_INDEX:
369		intval = zpool_get_prop_int(zhp, prop, &src);
370		if (zpool_prop_index_to_string(prop, intval, &strval)
371		    != 0)
372			return (-1);
373		(void) strlcpy(buf, strval, len);
374		break;
375
376	default:
377		abort();
378	}
379
380	if (srctype)
381		*srctype = src;
382
383	return (0);
384}
385
386/*
387 * Check if the bootfs name has the same pool name as it is set to.
388 * Assuming bootfs is a valid dataset name.
389 */
390static boolean_t
391bootfs_name_valid(const char *pool, char *bootfs)
392{
393	int len = strlen(pool);
394
395	if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
396		return (B_FALSE);
397
398	if (strncmp(pool, bootfs, len) == 0 &&
399	    (bootfs[len] == '/' || bootfs[len] == '\0'))
400		return (B_TRUE);
401
402	return (B_FALSE);
403}
404
405/*
406 * Inspect the configuration to determine if any of the devices contain
407 * an EFI label.
408 */
409static boolean_t
410pool_uses_efi(nvlist_t *config)
411{
412#ifdef sun
413	nvlist_t **child;
414	uint_t c, children;
415
416	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
417	    &child, &children) != 0)
418		return (read_efi_label(config, NULL) >= 0);
419
420	for (c = 0; c < children; c++) {
421		if (pool_uses_efi(child[c]))
422			return (B_TRUE);
423	}
424#endif	/* sun */
425	return (B_FALSE);
426}
427
428boolean_t
429zpool_is_bootable(zpool_handle_t *zhp)
430{
431	char bootfs[ZPOOL_MAXNAMELEN];
432
433	return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
434	    sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
435	    sizeof (bootfs)) != 0);
436}
437
438
439/*
440 * Given an nvlist of zpool properties to be set, validate that they are
441 * correct, and parse any numeric properties (index, boolean, etc) if they are
442 * specified as strings.
443 */
444static nvlist_t *
445zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
446    nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
447{
448	nvpair_t *elem;
449	nvlist_t *retprops;
450	zpool_prop_t prop;
451	char *strval;
452	uint64_t intval;
453	char *slash, *check;
454	struct stat64 statbuf;
455	zpool_handle_t *zhp;
456	nvlist_t *nvroot;
457
458	if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
459		(void) no_memory(hdl);
460		return (NULL);
461	}
462
463	elem = NULL;
464	while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
465		const char *propname = nvpair_name(elem);
466
467		prop = zpool_name_to_prop(propname);
468		if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
469			int err;
470			char *fname = strchr(propname, '@') + 1;
471
472			err = zfeature_lookup_name(fname, NULL);
473			if (err != 0) {
474				ASSERT3U(err, ==, ENOENT);
475				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
476				    "invalid feature '%s'"), fname);
477				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
478				goto error;
479			}
480
481			if (nvpair_type(elem) != DATA_TYPE_STRING) {
482				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
483				    "'%s' must be a string"), propname);
484				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
485				goto error;
486			}
487
488			(void) nvpair_value_string(elem, &strval);
489			if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
490				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
491				    "property '%s' can only be set to "
492				    "'enabled'"), propname);
493				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
494				goto error;
495			}
496
497			if (nvlist_add_uint64(retprops, propname, 0) != 0) {
498				(void) no_memory(hdl);
499				goto error;
500			}
501			continue;
502		}
503
504		/*
505		 * Make sure this property is valid and applies to this type.
506		 */
507		if (prop == ZPROP_INVAL) {
508			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
509			    "invalid property '%s'"), propname);
510			(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
511			goto error;
512		}
513
514		if (zpool_prop_readonly(prop)) {
515			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
516			    "is readonly"), propname);
517			(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
518			goto error;
519		}
520
521		if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
522		    &strval, &intval, errbuf) != 0)
523			goto error;
524
525		/*
526		 * Perform additional checking for specific properties.
527		 */
528		switch (prop) {
529		case ZPOOL_PROP_VERSION:
530			if (intval < version ||
531			    !SPA_VERSION_IS_SUPPORTED(intval)) {
532				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
533				    "property '%s' number %d is invalid."),
534				    propname, intval);
535				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
536				goto error;
537			}
538			break;
539
540		case ZPOOL_PROP_BOOTFS:
541			if (flags.create || flags.import) {
542				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
543				    "property '%s' cannot be set at creation "
544				    "or import time"), propname);
545				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
546				goto error;
547			}
548
549			if (version < SPA_VERSION_BOOTFS) {
550				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
551				    "pool must be upgraded to support "
552				    "'%s' property"), propname);
553				(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
554				goto error;
555			}
556
557			/*
558			 * bootfs property value has to be a dataset name and
559			 * the dataset has to be in the same pool as it sets to.
560			 */
561			if (strval[0] != '\0' && !bootfs_name_valid(poolname,
562			    strval)) {
563				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
564				    "is an invalid name"), strval);
565				(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
566				goto error;
567			}
568
569			if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
570				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
571				    "could not open pool '%s'"), poolname);
572				(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
573				goto error;
574			}
575			verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
576			    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
577
578#ifdef sun
579			/*
580			 * bootfs property cannot be set on a disk which has
581			 * been EFI labeled.
582			 */
583			if (pool_uses_efi(nvroot)) {
584				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
585				    "property '%s' not supported on "
586				    "EFI labeled devices"), propname);
587				(void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
588				zpool_close(zhp);
589				goto error;
590			}
591#endif	/* sun */
592			zpool_close(zhp);
593			break;
594
595		case ZPOOL_PROP_ALTROOT:
596			if (!flags.create && !flags.import) {
597				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
598				    "property '%s' can only be set during pool "
599				    "creation or import"), propname);
600				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
601				goto error;
602			}
603
604			if (strval[0] != '/') {
605				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
606				    "bad alternate root '%s'"), strval);
607				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
608				goto error;
609			}
610			break;
611
612		case ZPOOL_PROP_CACHEFILE:
613			if (strval[0] == '\0')
614				break;
615
616			if (strcmp(strval, "none") == 0)
617				break;
618
619			if (strval[0] != '/') {
620				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
621				    "property '%s' must be empty, an "
622				    "absolute path, or 'none'"), propname);
623				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
624				goto error;
625			}
626
627			slash = strrchr(strval, '/');
628
629			if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
630			    strcmp(slash, "/..") == 0) {
631				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
632				    "'%s' is not a valid file"), strval);
633				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
634				goto error;
635			}
636
637			*slash = '\0';
638
639			if (strval[0] != '\0' &&
640			    (stat64(strval, &statbuf) != 0 ||
641			    !S_ISDIR(statbuf.st_mode))) {
642				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
643				    "'%s' is not a valid directory"),
644				    strval);
645				(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
646				goto error;
647			}
648
649			*slash = '/';
650			break;
651
652		case ZPOOL_PROP_COMMENT:
653			for (check = strval; *check != '\0'; check++) {
654				if (!isprint(*check)) {
655					zfs_error_aux(hdl,
656					    dgettext(TEXT_DOMAIN,
657					    "comment may only have printable "
658					    "characters"));
659					(void) zfs_error(hdl, EZFS_BADPROP,
660					    errbuf);
661					goto error;
662				}
663			}
664			if (strlen(strval) > ZPROP_MAX_COMMENT) {
665				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
666				    "comment must not exceed %d characters"),
667				    ZPROP_MAX_COMMENT);
668				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
669				goto error;
670			}
671			break;
672		case ZPOOL_PROP_READONLY:
673			if (!flags.import) {
674				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
675				    "property '%s' can only be set at "
676				    "import time"), propname);
677				(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
678				goto error;
679			}
680			break;
681		}
682	}
683
684	return (retprops);
685error:
686	nvlist_free(retprops);
687	return (NULL);
688}
689
690/*
691 * Set zpool property : propname=propval.
692 */
693int
694zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
695{
696	zfs_cmd_t zc = { 0 };
697	int ret = -1;
698	char errbuf[1024];
699	nvlist_t *nvl = NULL;
700	nvlist_t *realprops;
701	uint64_t version;
702	prop_flags_t flags = { 0 };
703
704	(void) snprintf(errbuf, sizeof (errbuf),
705	    dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
706	    zhp->zpool_name);
707
708	if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
709		return (no_memory(zhp->zpool_hdl));
710
711	if (nvlist_add_string(nvl, propname, propval) != 0) {
712		nvlist_free(nvl);
713		return (no_memory(zhp->zpool_hdl));
714	}
715
716	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
717	if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
718	    zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
719		nvlist_free(nvl);
720		return (-1);
721	}
722
723	nvlist_free(nvl);
724	nvl = realprops;
725
726	/*
727	 * Execute the corresponding ioctl() to set this property.
728	 */
729	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
730
731	if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
732		nvlist_free(nvl);
733		return (-1);
734	}
735
736	ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
737
738	zcmd_free_nvlists(&zc);
739	nvlist_free(nvl);
740
741	if (ret)
742		(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
743	else
744		(void) zpool_props_refresh(zhp);
745
746	return (ret);
747}
748
749int
750zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
751{
752	libzfs_handle_t *hdl = zhp->zpool_hdl;
753	zprop_list_t *entry;
754	char buf[ZFS_MAXPROPLEN];
755	nvlist_t *features = NULL;
756	zprop_list_t **last;
757	boolean_t firstexpand = (NULL == *plp);
758
759	if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
760		return (-1);
761
762	last = plp;
763	while (*last != NULL)
764		last = &(*last)->pl_next;
765
766	if ((*plp)->pl_all)
767		features = zpool_get_features(zhp);
768
769	if ((*plp)->pl_all && firstexpand) {
770		for (int i = 0; i < SPA_FEATURES; i++) {
771			zprop_list_t *entry = zfs_alloc(hdl,
772			    sizeof (zprop_list_t));
773			entry->pl_prop = ZPROP_INVAL;
774			entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
775			    spa_feature_table[i].fi_uname);
776			entry->pl_width = strlen(entry->pl_user_prop);
777			entry->pl_all = B_TRUE;
778
779			*last = entry;
780			last = &entry->pl_next;
781		}
782	}
783
784	/* add any unsupported features */
785	for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
786	    nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
787		char *propname;
788		boolean_t found;
789		zprop_list_t *entry;
790
791		if (zfeature_is_supported(nvpair_name(nvp)))
792			continue;
793
794		propname = zfs_asprintf(hdl, "unsupported@%s",
795		    nvpair_name(nvp));
796
797		/*
798		 * Before adding the property to the list make sure that no
799		 * other pool already added the same property.
800		 */
801		found = B_FALSE;
802		entry = *plp;
803		while (entry != NULL) {
804			if (entry->pl_user_prop != NULL &&
805			    strcmp(propname, entry->pl_user_prop) == 0) {
806				found = B_TRUE;
807				break;
808			}
809			entry = entry->pl_next;
810		}
811		if (found) {
812			free(propname);
813			continue;
814		}
815
816		entry = zfs_alloc(hdl, sizeof (zprop_list_t));
817		entry->pl_prop = ZPROP_INVAL;
818		entry->pl_user_prop = propname;
819		entry->pl_width = strlen(entry->pl_user_prop);
820		entry->pl_all = B_TRUE;
821
822		*last = entry;
823		last = &entry->pl_next;
824	}
825
826	for (entry = *plp; entry != NULL; entry = entry->pl_next) {
827
828		if (entry->pl_fixed)
829			continue;
830
831		if (entry->pl_prop != ZPROP_INVAL &&
832		    zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
833		    NULL, B_FALSE) == 0) {
834			if (strlen(buf) > entry->pl_width)
835				entry->pl_width = strlen(buf);
836		}
837	}
838
839	return (0);
840}
841
842/*
843 * Get the state for the given feature on the given ZFS pool.
844 */
845int
846zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
847    size_t len)
848{
849	uint64_t refcount;
850	boolean_t found = B_FALSE;
851	nvlist_t *features = zpool_get_features(zhp);
852	boolean_t supported;
853	const char *feature = strchr(propname, '@') + 1;
854
855	supported = zpool_prop_feature(propname);
856	ASSERT(supported || zpool_prop_unsupported(propname));
857
858	/*
859	 * Convert from feature name to feature guid. This conversion is
860	 * unecessary for unsupported@... properties because they already
861	 * use guids.
862	 */
863	if (supported) {
864		int ret;
865		spa_feature_t fid;
866
867		ret = zfeature_lookup_name(feature, &fid);
868		if (ret != 0) {
869			(void) strlcpy(buf, "-", len);
870			return (ENOTSUP);
871		}
872		feature = spa_feature_table[fid].fi_guid;
873	}
874
875	if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
876		found = B_TRUE;
877
878	if (supported) {
879		if (!found) {
880			(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
881		} else  {
882			if (refcount == 0)
883				(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
884			else
885				(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
886		}
887	} else {
888		if (found) {
889			if (refcount == 0) {
890				(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
891			} else {
892				(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
893			}
894		} else {
895			(void) strlcpy(buf, "-", len);
896			return (ENOTSUP);
897		}
898	}
899
900	return (0);
901}
902
903/*
904 * Don't start the slice at the default block of 34; many storage
905 * devices will use a stripe width of 128k, so start there instead.
906 */
907#define	NEW_START_BLOCK	256
908
909/*
910 * Validate the given pool name, optionally putting an extended error message in
911 * 'buf'.
912 */
913boolean_t
914zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
915{
916	namecheck_err_t why;
917	char what;
918	int ret;
919
920	ret = pool_namecheck(pool, &why, &what);
921
922	/*
923	 * The rules for reserved pool names were extended at a later point.
924	 * But we need to support users with existing pools that may now be
925	 * invalid.  So we only check for this expanded set of names during a
926	 * create (or import), and only in userland.
927	 */
928	if (ret == 0 && !isopen &&
929	    (strncmp(pool, "mirror", 6) == 0 ||
930	    strncmp(pool, "raidz", 5) == 0 ||
931	    strncmp(pool, "spare", 5) == 0 ||
932	    strcmp(pool, "log") == 0)) {
933		if (hdl != NULL)
934			zfs_error_aux(hdl,
935			    dgettext(TEXT_DOMAIN, "name is reserved"));
936		return (B_FALSE);
937	}
938
939
940	if (ret != 0) {
941		if (hdl != NULL) {
942			switch (why) {
943			case NAME_ERR_TOOLONG:
944				zfs_error_aux(hdl,
945				    dgettext(TEXT_DOMAIN, "name is too long"));
946				break;
947
948			case NAME_ERR_INVALCHAR:
949				zfs_error_aux(hdl,
950				    dgettext(TEXT_DOMAIN, "invalid character "
951				    "'%c' in pool name"), what);
952				break;
953
954			case NAME_ERR_NOLETTER:
955				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
956				    "name must begin with a letter"));
957				break;
958
959			case NAME_ERR_RESERVED:
960				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
961				    "name is reserved"));
962				break;
963
964			case NAME_ERR_DISKLIKE:
965				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
966				    "pool name is reserved"));
967				break;
968
969			case NAME_ERR_LEADING_SLASH:
970				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
971				    "leading slash in name"));
972				break;
973
974			case NAME_ERR_EMPTY_COMPONENT:
975				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
976				    "empty component in name"));
977				break;
978
979			case NAME_ERR_TRAILING_SLASH:
980				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
981				    "trailing slash in name"));
982				break;
983
984			case NAME_ERR_MULTIPLE_AT:
985				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
986				    "multiple '@' delimiters in name"));
987				break;
988
989			}
990		}
991		return (B_FALSE);
992	}
993
994	return (B_TRUE);
995}
996
997/*
998 * Open a handle to the given pool, even if the pool is currently in the FAULTED
999 * state.
1000 */
1001zpool_handle_t *
1002zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
1003{
1004	zpool_handle_t *zhp;
1005	boolean_t missing;
1006
1007	/*
1008	 * Make sure the pool name is valid.
1009	 */
1010	if (!zpool_name_valid(hdl, B_TRUE, pool)) {
1011		(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1012		    dgettext(TEXT_DOMAIN, "cannot open '%s'"),
1013		    pool);
1014		return (NULL);
1015	}
1016
1017	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1018		return (NULL);
1019
1020	zhp->zpool_hdl = hdl;
1021	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1022
1023	if (zpool_refresh_stats(zhp, &missing) != 0) {
1024		zpool_close(zhp);
1025		return (NULL);
1026	}
1027
1028	if (missing) {
1029		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
1030		(void) zfs_error_fmt(hdl, EZFS_NOENT,
1031		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
1032		zpool_close(zhp);
1033		return (NULL);
1034	}
1035
1036	return (zhp);
1037}
1038
1039/*
1040 * Like the above, but silent on error.  Used when iterating over pools (because
1041 * the configuration cache may be out of date).
1042 */
1043int
1044zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
1045{
1046	zpool_handle_t *zhp;
1047	boolean_t missing;
1048
1049	if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
1050		return (-1);
1051
1052	zhp->zpool_hdl = hdl;
1053	(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
1054
1055	if (zpool_refresh_stats(zhp, &missing) != 0) {
1056		zpool_close(zhp);
1057		return (-1);
1058	}
1059
1060	if (missing) {
1061		zpool_close(zhp);
1062		*ret = NULL;
1063		return (0);
1064	}
1065
1066	*ret = zhp;
1067	return (0);
1068}
1069
1070/*
1071 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1072 * state.
1073 */
1074zpool_handle_t *
1075zpool_open(libzfs_handle_t *hdl, const char *pool)
1076{
1077	zpool_handle_t *zhp;
1078
1079	if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1080		return (NULL);
1081
1082	if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1083		(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1084		    dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1085		zpool_close(zhp);
1086		return (NULL);
1087	}
1088
1089	return (zhp);
1090}
1091
1092/*
1093 * Close the handle.  Simply frees the memory associated with the handle.
1094 */
1095void
1096zpool_close(zpool_handle_t *zhp)
1097{
1098	if (zhp->zpool_config)
1099		nvlist_free(zhp->zpool_config);
1100	if (zhp->zpool_old_config)
1101		nvlist_free(zhp->zpool_old_config);
1102	if (zhp->zpool_props)
1103		nvlist_free(zhp->zpool_props);
1104	free(zhp);
1105}
1106
1107/*
1108 * Return the name of the pool.
1109 */
1110const char *
1111zpool_get_name(zpool_handle_t *zhp)
1112{
1113	return (zhp->zpool_name);
1114}
1115
1116
1117/*
1118 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1119 */
1120int
1121zpool_get_state(zpool_handle_t *zhp)
1122{
1123	return (zhp->zpool_state);
1124}
1125
1126/*
1127 * Create the named pool, using the provided vdev list.  It is assumed
1128 * that the consumer has already validated the contents of the nvlist, so we
1129 * don't have to worry about error semantics.
1130 */
1131int
1132zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1133    nvlist_t *props, nvlist_t *fsprops)
1134{
1135	zfs_cmd_t zc = { 0 };
1136	nvlist_t *zc_fsprops = NULL;
1137	nvlist_t *zc_props = NULL;
1138	char msg[1024];
1139	int ret = -1;
1140
1141	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1142	    "cannot create '%s'"), pool);
1143
1144	if (!zpool_name_valid(hdl, B_FALSE, pool))
1145		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1146
1147	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1148		return (-1);
1149
1150	if (props) {
1151		prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1152
1153		if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1154		    SPA_VERSION_1, flags, msg)) == NULL) {
1155			goto create_failed;
1156		}
1157	}
1158
1159	if (fsprops) {
1160		uint64_t zoned;
1161		char *zonestr;
1162
1163		zoned = ((nvlist_lookup_string(fsprops,
1164		    zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1165		    strcmp(zonestr, "on") == 0);
1166
1167		if ((zc_fsprops = zfs_valid_proplist(hdl,
1168		    ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1169			goto create_failed;
1170		}
1171		if (!zc_props &&
1172		    (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1173			goto create_failed;
1174		}
1175		if (nvlist_add_nvlist(zc_props,
1176		    ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1177			goto create_failed;
1178		}
1179	}
1180
1181	if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1182		goto create_failed;
1183
1184	(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1185
1186	if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1187
1188		zcmd_free_nvlists(&zc);
1189		nvlist_free(zc_props);
1190		nvlist_free(zc_fsprops);
1191
1192		switch (errno) {
1193		case EBUSY:
1194			/*
1195			 * This can happen if the user has specified the same
1196			 * device multiple times.  We can't reliably detect this
1197			 * until we try to add it and see we already have a
1198			 * label.
1199			 */
1200			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1201			    "one or more vdevs refer to the same device"));
1202			return (zfs_error(hdl, EZFS_BADDEV, msg));
1203
1204		case EOVERFLOW:
1205			/*
1206			 * This occurs when one of the devices is below
1207			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1208			 * device was the problem device since there's no
1209			 * reliable way to determine device size from userland.
1210			 */
1211			{
1212				char buf[64];
1213
1214				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1215
1216				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1217				    "one or more devices is less than the "
1218				    "minimum size (%s)"), buf);
1219			}
1220			return (zfs_error(hdl, EZFS_BADDEV, msg));
1221
1222		case ENOSPC:
1223			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1224			    "one or more devices is out of space"));
1225			return (zfs_error(hdl, EZFS_BADDEV, msg));
1226
1227		case ENOTBLK:
1228			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1229			    "cache device must be a disk or disk slice"));
1230			return (zfs_error(hdl, EZFS_BADDEV, msg));
1231
1232		default:
1233			return (zpool_standard_error(hdl, errno, msg));
1234		}
1235	}
1236
1237create_failed:
1238	zcmd_free_nvlists(&zc);
1239	nvlist_free(zc_props);
1240	nvlist_free(zc_fsprops);
1241	return (ret);
1242}
1243
1244/*
1245 * Destroy the given pool.  It is up to the caller to ensure that there are no
1246 * datasets left in the pool.
1247 */
1248int
1249zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1250{
1251	zfs_cmd_t zc = { 0 };
1252	zfs_handle_t *zfp = NULL;
1253	libzfs_handle_t *hdl = zhp->zpool_hdl;
1254	char msg[1024];
1255
1256	if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1257	    (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1258		return (-1);
1259
1260	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1261	zc.zc_history = (uint64_t)(uintptr_t)log_str;
1262
1263	if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1264		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1265		    "cannot destroy '%s'"), zhp->zpool_name);
1266
1267		if (errno == EROFS) {
1268			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1269			    "one or more devices is read only"));
1270			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1271		} else {
1272			(void) zpool_standard_error(hdl, errno, msg);
1273		}
1274
1275		if (zfp)
1276			zfs_close(zfp);
1277		return (-1);
1278	}
1279
1280	if (zfp) {
1281		remove_mountpoint(zfp);
1282		zfs_close(zfp);
1283	}
1284
1285	return (0);
1286}
1287
1288/*
1289 * Add the given vdevs to the pool.  The caller must have already performed the
1290 * necessary verification to ensure that the vdev specification is well-formed.
1291 */
1292int
1293zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1294{
1295	zfs_cmd_t zc = { 0 };
1296	int ret;
1297	libzfs_handle_t *hdl = zhp->zpool_hdl;
1298	char msg[1024];
1299	nvlist_t **spares, **l2cache;
1300	uint_t nspares, nl2cache;
1301
1302	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1303	    "cannot add to '%s'"), zhp->zpool_name);
1304
1305	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1306	    SPA_VERSION_SPARES &&
1307	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1308	    &spares, &nspares) == 0) {
1309		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1310		    "upgraded to add hot spares"));
1311		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1312	}
1313
1314	if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1315	    ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1316		uint64_t s;
1317
1318		for (s = 0; s < nspares; s++) {
1319			char *path;
1320
1321			if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1322			    &path) == 0 && pool_uses_efi(spares[s])) {
1323				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1324				    "device '%s' contains an EFI label and "
1325				    "cannot be used on root pools."),
1326				    zpool_vdev_name(hdl, NULL, spares[s],
1327				    B_FALSE));
1328				return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1329			}
1330		}
1331	}
1332
1333	if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1334	    SPA_VERSION_L2CACHE &&
1335	    nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1336	    &l2cache, &nl2cache) == 0) {
1337		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1338		    "upgraded to add cache devices"));
1339		return (zfs_error(hdl, EZFS_BADVERSION, msg));
1340	}
1341
1342	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1343		return (-1);
1344	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1345
1346	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1347		switch (errno) {
1348		case EBUSY:
1349			/*
1350			 * This can happen if the user has specified the same
1351			 * device multiple times.  We can't reliably detect this
1352			 * until we try to add it and see we already have a
1353			 * label.
1354			 */
1355			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1356			    "one or more vdevs refer to the same device"));
1357			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1358			break;
1359
1360		case EOVERFLOW:
1361			/*
1362			 * This occurrs when one of the devices is below
1363			 * SPA_MINDEVSIZE.  Unfortunately, we can't detect which
1364			 * device was the problem device since there's no
1365			 * reliable way to determine device size from userland.
1366			 */
1367			{
1368				char buf[64];
1369
1370				zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1371
1372				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1373				    "device is less than the minimum "
1374				    "size (%s)"), buf);
1375			}
1376			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1377			break;
1378
1379		case ENOTSUP:
1380			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1381			    "pool must be upgraded to add these vdevs"));
1382			(void) zfs_error(hdl, EZFS_BADVERSION, msg);
1383			break;
1384
1385		case EDOM:
1386			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1387			    "root pool can not have multiple vdevs"
1388			    " or separate logs"));
1389			(void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1390			break;
1391
1392		case ENOTBLK:
1393			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1394			    "cache device must be a disk or disk slice"));
1395			(void) zfs_error(hdl, EZFS_BADDEV, msg);
1396			break;
1397
1398		default:
1399			(void) zpool_standard_error(hdl, errno, msg);
1400		}
1401
1402		ret = -1;
1403	} else {
1404		ret = 0;
1405	}
1406
1407	zcmd_free_nvlists(&zc);
1408
1409	return (ret);
1410}
1411
1412/*
1413 * Exports the pool from the system.  The caller must ensure that there are no
1414 * mounted datasets in the pool.
1415 */
1416static int
1417zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1418    const char *log_str)
1419{
1420	zfs_cmd_t zc = { 0 };
1421	char msg[1024];
1422
1423	(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1424	    "cannot export '%s'"), zhp->zpool_name);
1425
1426	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1427	zc.zc_cookie = force;
1428	zc.zc_guid = hardforce;
1429	zc.zc_history = (uint64_t)(uintptr_t)log_str;
1430
1431	if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1432		switch (errno) {
1433		case EXDEV:
1434			zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1435			    "use '-f' to override the following errors:\n"
1436			    "'%s' has an active shared spare which could be"
1437			    " used by other pools once '%s' is exported."),
1438			    zhp->zpool_name, zhp->zpool_name);
1439			return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1440			    msg));
1441		default:
1442			return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1443			    msg));
1444		}
1445	}
1446
1447	return (0);
1448}
1449
1450int
1451zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1452{
1453	return (zpool_export_common(zhp, force, B_FALSE, log_str));
1454}
1455
1456int
1457zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1458{
1459	return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1460}
1461
1462static void
1463zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1464    nvlist_t *config)
1465{
1466	nvlist_t *nv = NULL;
1467	uint64_t rewindto;
1468	int64_t loss = -1;
1469	struct tm t;
1470	char timestr[128];
1471
1472	if (!hdl->libzfs_printerr || config == NULL)
1473		return;
1474
1475	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1476	    nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1477		return;
1478	}
1479
1480	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1481		return;
1482	(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1483
1484	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1485	    strftime(timestr, 128, 0, &t) != 0) {
1486		if (dryrun) {
1487			(void) printf(dgettext(TEXT_DOMAIN,
1488			    "Would be able to return %s "
1489			    "to its state as of %s.\n"),
1490			    name, timestr);
1491		} else {
1492			(void) printf(dgettext(TEXT_DOMAIN,
1493			    "Pool %s returned to its state as of %s.\n"),
1494			    name, timestr);
1495		}
1496		if (loss > 120) {
1497			(void) printf(dgettext(TEXT_DOMAIN,
1498			    "%s approximately %lld "),
1499			    dryrun ? "Would discard" : "Discarded",
1500			    (loss + 30) / 60);
1501			(void) printf(dgettext(TEXT_DOMAIN,
1502			    "minutes of transactions.\n"));
1503		} else if (loss > 0) {
1504			(void) printf(dgettext(TEXT_DOMAIN,
1505			    "%s approximately %lld "),
1506			    dryrun ? "Would discard" : "Discarded", loss);
1507			(void) printf(dgettext(TEXT_DOMAIN,
1508			    "seconds of transactions.\n"));
1509		}
1510	}
1511}
1512
1513void
1514zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1515    nvlist_t *config)
1516{
1517	nvlist_t *nv = NULL;
1518	int64_t loss = -1;
1519	uint64_t edata = UINT64_MAX;
1520	uint64_t rewindto;
1521	struct tm t;
1522	char timestr[128];
1523
1524	if (!hdl->libzfs_printerr)
1525		return;
1526
1527	if (reason >= 0)
1528		(void) printf(dgettext(TEXT_DOMAIN, "action: "));
1529	else
1530		(void) printf(dgettext(TEXT_DOMAIN, "\t"));
1531
1532	/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1533	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1534	    nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1535	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1536		goto no_info;
1537
1538	(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1539	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1540	    &edata);
1541
1542	(void) printf(dgettext(TEXT_DOMAIN,
1543	    "Recovery is possible, but will result in some data loss.\n"));
1544
1545	if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1546	    strftime(timestr, 128, 0, &t) != 0) {
1547		(void) printf(dgettext(TEXT_DOMAIN,
1548		    "\tReturning the pool to its state as of %s\n"
1549		    "\tshould correct the problem.  "),
1550		    timestr);
1551	} else {
1552		(void) printf(dgettext(TEXT_DOMAIN,
1553		    "\tReverting the pool to an earlier state "
1554		    "should correct the problem.\n\t"));
1555	}
1556
1557	if (loss > 120) {
1558		(void) printf(dgettext(TEXT_DOMAIN,
1559		    "Approximately %lld minutes of data\n"
1560		    "\tmust be discarded, irreversibly.  "), (loss + 30) / 60);
1561	} else if (loss > 0) {
1562		(void) printf(dgettext(TEXT_DOMAIN,
1563		    "Approximately %lld seconds of data\n"
1564		    "\tmust be discarded, irreversibly.  "), loss);
1565	}
1566	if (edata != 0 && edata != UINT64_MAX) {
1567		if (edata == 1) {
1568			(void) printf(dgettext(TEXT_DOMAIN,
1569			    "After rewind, at least\n"
1570			    "\tone persistent user-data error will remain.  "));
1571		} else {
1572			(void) printf(dgettext(TEXT_DOMAIN,
1573			    "After rewind, several\n"
1574			    "\tpersistent user-data errors will remain.  "));
1575		}
1576	}
1577	(void) printf(dgettext(TEXT_DOMAIN,
1578	    "Recovery can be attempted\n\tby executing 'zpool %s -F %s'.  "),
1579	    reason >= 0 ? "clear" : "import", name);
1580
1581	(void) printf(dgettext(TEXT_DOMAIN,
1582	    "A scrub of the pool\n"
1583	    "\tis strongly recommended after recovery.\n"));
1584	return;
1585
1586no_info:
1587	(void) printf(dgettext(TEXT_DOMAIN,
1588	    "Destroy and re-create the pool from\n\ta backup source.\n"));
1589}
1590
1591/*
1592 * zpool_import() is a contracted interface. Should be kept the same
1593 * if possible.
1594 *
1595 * Applications should use zpool_import_props() to import a pool with
1596 * new properties value to be set.
1597 */
1598int
1599zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1600    char *altroot)
1601{
1602	nvlist_t *props = NULL;
1603	int ret;
1604
1605	if (altroot != NULL) {
1606		if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1607			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1608			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1609			    newname));
1610		}
1611
1612		if (nvlist_add_string(props,
1613		    zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1614		    nvlist_add_string(props,
1615		    zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1616			nvlist_free(props);
1617			return (zfs_error_fmt(hdl, EZFS_NOMEM,
1618			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1619			    newname));
1620		}
1621	}
1622
1623	ret = zpool_import_props(hdl, config, newname, props,
1624	    ZFS_IMPORT_NORMAL);
1625	if (props)
1626		nvlist_free(props);
1627	return (ret);
1628}
1629
1630static void
1631print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1632    int indent)
1633{
1634	nvlist_t **child;
1635	uint_t c, children;
1636	char *vname;
1637	uint64_t is_log = 0;
1638
1639	(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1640	    &is_log);
1641
1642	if (name != NULL)
1643		(void) printf("\t%*s%s%s\n", indent, "", name,
1644		    is_log ? " [log]" : "");
1645
1646	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1647	    &child, &children) != 0)
1648		return;
1649
1650	for (c = 0; c < children; c++) {
1651		vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1652		print_vdev_tree(hdl, vname, child[c], indent + 2);
1653		free(vname);
1654	}
1655}
1656
1657void
1658zpool_print_unsup_feat(nvlist_t *config)
1659{
1660	nvlist_t *nvinfo, *unsup_feat;
1661
1662	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1663	    0);
1664	verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1665	    &unsup_feat) == 0);
1666
1667	for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1668	    nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1669		char *desc;
1670
1671		verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1672		verify(nvpair_value_string(nvp, &desc) == 0);
1673
1674		if (strlen(desc) > 0)
1675			(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1676		else
1677			(void) printf("\t%s\n", nvpair_name(nvp));
1678	}
1679}
1680
1681/*
1682 * Import the given pool using the known configuration and a list of
1683 * properties to be set. The configuration should have come from
1684 * zpool_find_import(). The 'newname' parameters control whether the pool
1685 * is imported with a different name.
1686 */
1687int
1688zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1689    nvlist_t *props, int flags)
1690{
1691	zfs_cmd_t zc = { 0 };
1692	zpool_rewind_policy_t policy;
1693	nvlist_t *nv = NULL;
1694	nvlist_t *nvinfo = NULL;
1695	nvlist_t *missing = NULL;
1696	char *thename;
1697	char *origname;
1698	int ret;
1699	int error = 0;
1700	char errbuf[1024];
1701
1702	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1703	    &origname) == 0);
1704
1705	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1706	    "cannot import pool '%s'"), origname);
1707
1708	if (newname != NULL) {
1709		if (!zpool_name_valid(hdl, B_FALSE, newname))
1710			return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1711			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1712			    newname));
1713		thename = (char *)newname;
1714	} else {
1715		thename = origname;
1716	}
1717
1718	if (props) {
1719		uint64_t version;
1720		prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1721
1722		verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1723		    &version) == 0);
1724
1725		if ((props = zpool_valid_proplist(hdl, origname,
1726		    props, version, flags, errbuf)) == NULL) {
1727			return (-1);
1728		} else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1729			nvlist_free(props);
1730			return (-1);
1731		}
1732	}
1733
1734	(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1735
1736	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1737	    &zc.zc_guid) == 0);
1738
1739	if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1740		nvlist_free(props);
1741		return (-1);
1742	}
1743	if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1744		nvlist_free(props);
1745		return (-1);
1746	}
1747
1748	zc.zc_cookie = flags;
1749	while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1750	    errno == ENOMEM) {
1751		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1752			zcmd_free_nvlists(&zc);
1753			return (-1);
1754		}
1755	}
1756	if (ret != 0)
1757		error = errno;
1758
1759	(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1760	zpool_get_rewind_policy(config, &policy);
1761
1762	if (error) {
1763		char desc[1024];
1764
1765		/*
1766		 * Dry-run failed, but we print out what success
1767		 * looks like if we found a best txg
1768		 */
1769		if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1770			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1771			    B_TRUE, nv);
1772			nvlist_free(nv);
1773			return (-1);
1774		}
1775
1776		if (newname == NULL)
1777			(void) snprintf(desc, sizeof (desc),
1778			    dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1779			    thename);
1780		else
1781			(void) snprintf(desc, sizeof (desc),
1782			    dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1783			    origname, thename);
1784
1785		switch (error) {
1786		case ENOTSUP:
1787			if (nv != NULL && nvlist_lookup_nvlist(nv,
1788			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1789			    nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1790				(void) printf(dgettext(TEXT_DOMAIN, "This "
1791				    "pool uses the following feature(s) not "
1792				    "supported by this system:\n"));
1793				zpool_print_unsup_feat(nv);
1794				if (nvlist_exists(nvinfo,
1795				    ZPOOL_CONFIG_CAN_RDONLY)) {
1796					(void) printf(dgettext(TEXT_DOMAIN,
1797					    "All unsupported features are only "
1798					    "required for writing to the pool."
1799					    "\nThe pool can be imported using "
1800					    "'-o readonly=on'.\n"));
1801				}
1802			}
1803			/*
1804			 * Unsupported version.
1805			 */
1806			(void) zfs_error(hdl, EZFS_BADVERSION, desc);
1807			break;
1808
1809		case EINVAL:
1810			(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1811			break;
1812
1813		case EROFS:
1814			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1815			    "one or more devices is read only"));
1816			(void) zfs_error(hdl, EZFS_BADDEV, desc);
1817			break;
1818
1819		case ENXIO:
1820			if (nv && nvlist_lookup_nvlist(nv,
1821			    ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1822			    nvlist_lookup_nvlist(nvinfo,
1823			    ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1824				(void) printf(dgettext(TEXT_DOMAIN,
1825				    "The devices below are missing, use "
1826				    "'-m' to import the pool anyway:\n"));
1827				print_vdev_tree(hdl, NULL, missing, 2);
1828				(void) printf("\n");
1829			}
1830			(void) zpool_standard_error(hdl, error, desc);
1831			break;
1832
1833		case EEXIST:
1834			(void) zpool_standard_error(hdl, error, desc);
1835			break;
1836
1837		default:
1838			(void) zpool_standard_error(hdl, error, desc);
1839			zpool_explain_recover(hdl,
1840			    newname ? origname : thename, -error, nv);
1841			break;
1842		}
1843
1844		nvlist_free(nv);
1845		ret = -1;
1846	} else {
1847		zpool_handle_t *zhp;
1848
1849		/*
1850		 * This should never fail, but play it safe anyway.
1851		 */
1852		if (zpool_open_silent(hdl, thename, &zhp) != 0)
1853			ret = -1;
1854		else if (zhp != NULL)
1855			zpool_close(zhp);
1856		if (policy.zrp_request &
1857		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1858			zpool_rewind_exclaim(hdl, newname ? origname : thename,
1859			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1860		}
1861		nvlist_free(nv);
1862		return (0);
1863	}
1864
1865	zcmd_free_nvlists(&zc);
1866	nvlist_free(props);
1867
1868	return (ret);
1869}
1870
1871/*
1872 * Scan the pool.
1873 */
1874int
1875zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1876{
1877	zfs_cmd_t zc = { 0 };
1878	char msg[1024];
1879	libzfs_handle_t *hdl = zhp->zpool_hdl;
1880
1881	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1882	zc.zc_cookie = func;
1883
1884	if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1885	    (errno == ENOENT && func != POOL_SCAN_NONE))
1886		return (0);
1887
1888	if (func == POOL_SCAN_SCRUB) {
1889		(void) snprintf(msg, sizeof (msg),
1890		    dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1891	} else if (func == POOL_SCAN_NONE) {
1892		(void) snprintf(msg, sizeof (msg),
1893		    dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1894		    zc.zc_name);
1895	} else {
1896		assert(!"unexpected result");
1897	}
1898
1899	if (errno == EBUSY) {
1900		nvlist_t *nvroot;
1901		pool_scan_stat_t *ps = NULL;
1902		uint_t psc;
1903
1904		verify(nvlist_lookup_nvlist(zhp->zpool_config,
1905		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1906		(void) nvlist_lookup_uint64_array(nvroot,
1907		    ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1908		if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1909			return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1910		else
1911			return (zfs_error(hdl, EZFS_RESILVERING, msg));
1912	} else if (errno == ENOENT) {
1913		return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1914	} else {
1915		return (zpool_standard_error(hdl, errno, msg));
1916	}
1917}
1918
1919/*
1920 * This provides a very minimal check whether a given string is likely a
1921 * c#t#d# style string.  Users of this are expected to do their own
1922 * verification of the s# part.
1923 */
1924#define	CTD_CHECK(str)  (str && str[0] == 'c' && isdigit(str[1]))
1925
1926/*
1927 * More elaborate version for ones which may start with "/dev/dsk/"
1928 * and the like.
1929 */
1930static int
1931ctd_check_path(char *str) {
1932	/*
1933	 * If it starts with a slash, check the last component.
1934	 */
1935	if (str && str[0] == '/') {
1936		char *tmp = strrchr(str, '/');
1937
1938		/*
1939		 * If it ends in "/old", check the second-to-last
1940		 * component of the string instead.
1941		 */
1942		if (tmp != str && strcmp(tmp, "/old") == 0) {
1943			for (tmp--; *tmp != '/'; tmp--)
1944				;
1945		}
1946		str = tmp + 1;
1947	}
1948	return (CTD_CHECK(str));
1949}
1950
1951/*
1952 * Find a vdev that matches the search criteria specified. We use the
1953 * the nvpair name to determine how we should look for the device.
1954 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1955 * spare; but FALSE if its an INUSE spare.
1956 */
1957static nvlist_t *
1958vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1959    boolean_t *l2cache, boolean_t *log)
1960{
1961	uint_t c, children;
1962	nvlist_t **child;
1963	nvlist_t *ret;
1964	uint64_t is_log;
1965	char *srchkey;
1966	nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1967
1968	/* Nothing to look for */
1969	if (search == NULL || pair == NULL)
1970		return (NULL);
1971
1972	/* Obtain the key we will use to search */
1973	srchkey = nvpair_name(pair);
1974
1975	switch (nvpair_type(pair)) {
1976	case DATA_TYPE_UINT64:
1977		if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1978			uint64_t srchval, theguid;
1979
1980			verify(nvpair_value_uint64(pair, &srchval) == 0);
1981			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1982			    &theguid) == 0);
1983			if (theguid == srchval)
1984				return (nv);
1985		}
1986		break;
1987
1988	case DATA_TYPE_STRING: {
1989		char *srchval, *val;
1990
1991		verify(nvpair_value_string(pair, &srchval) == 0);
1992		if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1993			break;
1994
1995		/*
1996		 * Search for the requested value. Special cases:
1997		 *
1998		 * - ZPOOL_CONFIG_PATH for whole disk entries.  These end in
1999		 *   "s0" or "s0/old".  The "s0" part is hidden from the user,
2000		 *   but included in the string, so this matches around it.
2001		 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
2002		 *
2003		 * Otherwise, all other searches are simple string compares.
2004		 */
2005		if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
2006		    ctd_check_path(val)) {
2007			uint64_t wholedisk = 0;
2008
2009			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
2010			    &wholedisk);
2011			if (wholedisk) {
2012				int slen = strlen(srchval);
2013				int vlen = strlen(val);
2014
2015				if (slen != vlen - 2)
2016					break;
2017
2018				/*
2019				 * make_leaf_vdev() should only set
2020				 * wholedisk for ZPOOL_CONFIG_PATHs which
2021				 * will include "/dev/dsk/", giving plenty of
2022				 * room for the indices used next.
2023				 */
2024				ASSERT(vlen >= 6);
2025
2026				/*
2027				 * strings identical except trailing "s0"
2028				 */
2029				if (strcmp(&val[vlen - 2], "s0") == 0 &&
2030				    strncmp(srchval, val, slen) == 0)
2031					return (nv);
2032
2033				/*
2034				 * strings identical except trailing "s0/old"
2035				 */
2036				if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
2037				    strcmp(&srchval[slen - 4], "/old") == 0 &&
2038				    strncmp(srchval, val, slen - 4) == 0)
2039					return (nv);
2040
2041				break;
2042			}
2043		} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2044			char *type, *idx, *end, *p;
2045			uint64_t id, vdev_id;
2046
2047			/*
2048			 * Determine our vdev type, keeping in mind
2049			 * that the srchval is composed of a type and
2050			 * vdev id pair (i.e. mirror-4).
2051			 */
2052			if ((type = strdup(srchval)) == NULL)
2053				return (NULL);
2054
2055			if ((p = strrchr(type, '-')) == NULL) {
2056				free(type);
2057				break;
2058			}
2059			idx = p + 1;
2060			*p = '\0';
2061
2062			/*
2063			 * If the types don't match then keep looking.
2064			 */
2065			if (strncmp(val, type, strlen(val)) != 0) {
2066				free(type);
2067				break;
2068			}
2069
2070			verify(strncmp(type, VDEV_TYPE_RAIDZ,
2071			    strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2072			    strncmp(type, VDEV_TYPE_MIRROR,
2073			    strlen(VDEV_TYPE_MIRROR)) == 0);
2074			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2075			    &id) == 0);
2076
2077			errno = 0;
2078			vdev_id = strtoull(idx, &end, 10);
2079
2080			free(type);
2081			if (errno != 0)
2082				return (NULL);
2083
2084			/*
2085			 * Now verify that we have the correct vdev id.
2086			 */
2087			if (vdev_id == id)
2088				return (nv);
2089		}
2090
2091		/*
2092		 * Common case
2093		 */
2094		if (strcmp(srchval, val) == 0)
2095			return (nv);
2096		break;
2097	}
2098
2099	default:
2100		break;
2101	}
2102
2103	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2104	    &child, &children) != 0)
2105		return (NULL);
2106
2107	for (c = 0; c < children; c++) {
2108		if ((ret = vdev_to_nvlist_iter(child[c], search,
2109		    avail_spare, l2cache, NULL)) != NULL) {
2110			/*
2111			 * The 'is_log' value is only set for the toplevel
2112			 * vdev, not the leaf vdevs.  So we always lookup the
2113			 * log device from the root of the vdev tree (where
2114			 * 'log' is non-NULL).
2115			 */
2116			if (log != NULL &&
2117			    nvlist_lookup_uint64(child[c],
2118			    ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2119			    is_log) {
2120				*log = B_TRUE;
2121			}
2122			return (ret);
2123		}
2124	}
2125
2126	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2127	    &child, &children) == 0) {
2128		for (c = 0; c < children; c++) {
2129			if ((ret = vdev_to_nvlist_iter(child[c], search,
2130			    avail_spare, l2cache, NULL)) != NULL) {
2131				*avail_spare = B_TRUE;
2132				return (ret);
2133			}
2134		}
2135	}
2136
2137	if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2138	    &child, &children) == 0) {
2139		for (c = 0; c < children; c++) {
2140			if ((ret = vdev_to_nvlist_iter(child[c], search,
2141			    avail_spare, l2cache, NULL)) != NULL) {
2142				*l2cache = B_TRUE;
2143				return (ret);
2144			}
2145		}
2146	}
2147
2148	return (NULL);
2149}
2150
2151/*
2152 * Given a physical path (minus the "/devices" prefix), find the
2153 * associated vdev.
2154 */
2155nvlist_t *
2156zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2157    boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2158{
2159	nvlist_t *search, *nvroot, *ret;
2160
2161	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2162	verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2163
2164	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2165	    &nvroot) == 0);
2166
2167	*avail_spare = B_FALSE;
2168	*l2cache = B_FALSE;
2169	if (log != NULL)
2170		*log = B_FALSE;
2171	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2172	nvlist_free(search);
2173
2174	return (ret);
2175}
2176
2177/*
2178 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2179 */
2180boolean_t
2181zpool_vdev_is_interior(const char *name)
2182{
2183	if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2184	    strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2185		return (B_TRUE);
2186	return (B_FALSE);
2187}
2188
2189nvlist_t *
2190zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2191    boolean_t *l2cache, boolean_t *log)
2192{
2193	char buf[MAXPATHLEN];
2194	char *end;
2195	nvlist_t *nvroot, *search, *ret;
2196	uint64_t guid;
2197
2198	verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2199
2200	guid = strtoull(path, &end, 10);
2201	if (guid != 0 && *end == '\0') {
2202		verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2203	} else if (zpool_vdev_is_interior(path)) {
2204		verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2205	} else if (path[0] != '/') {
2206		(void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path);
2207		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2208	} else {
2209		verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2210	}
2211
2212	verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2213	    &nvroot) == 0);
2214
2215	*avail_spare = B_FALSE;
2216	*l2cache = B_FALSE;
2217	if (log != NULL)
2218		*log = B_FALSE;
2219	ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2220	nvlist_free(search);
2221
2222	return (ret);
2223}
2224
2225static int
2226vdev_online(nvlist_t *nv)
2227{
2228	uint64_t ival;
2229
2230	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2231	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2232	    nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2233		return (0);
2234
2235	return (1);
2236}
2237
2238/*
2239 * Helper function for zpool_get_physpaths().
2240 */
2241static int
2242vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2243    size_t *bytes_written)
2244{
2245	size_t bytes_left, pos, rsz;
2246	char *tmppath;
2247	const char *format;
2248
2249	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2250	    &tmppath) != 0)
2251		return (EZFS_NODEVICE);
2252
2253	pos = *bytes_written;
2254	bytes_left = physpath_size - pos;
2255	format = (pos == 0) ? "%s" : " %s";
2256
2257	rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2258	*bytes_written += rsz;
2259
2260	if (rsz >= bytes_left) {
2261		/* if physpath was not copied properly, clear it */
2262		if (bytes_left != 0) {
2263			physpath[pos] = 0;
2264		}
2265		return (EZFS_NOSPC);
2266	}
2267	return (0);
2268}
2269
2270static int
2271vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2272    size_t *rsz, boolean_t is_spare)
2273{
2274	char *type;
2275	int ret;
2276
2277	if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2278		return (EZFS_INVALCONFIG);
2279
2280	if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2281		/*
2282		 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2283		 * For a spare vdev, we only want to boot from the active
2284		 * spare device.
2285		 */
2286		if (is_spare) {
2287			uint64_t spare = 0;
2288			(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2289			    &spare);
2290			if (!spare)
2291				return (EZFS_INVALCONFIG);
2292		}
2293
2294		if (vdev_online(nv)) {
2295			if ((ret = vdev_get_one_physpath(nv, physpath,
2296			    phypath_size, rsz)) != 0)
2297				return (ret);
2298		}
2299	} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2300	    strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2301	    (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2302		nvlist_t **child;
2303		uint_t count;
2304		int i, ret;
2305
2306		if (nvlist_lookup_nvlist_array(nv,
2307		    ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2308			return (EZFS_INVALCONFIG);
2309
2310		for (i = 0; i < count; i++) {
2311			ret = vdev_get_physpaths(child[i], physpath,
2312			    phypath_size, rsz, is_spare);
2313			if (ret == EZFS_NOSPC)
2314				return (ret);
2315		}
2316	}
2317
2318	return (EZFS_POOL_INVALARG);
2319}
2320
2321/*
2322 * Get phys_path for a root pool config.
2323 * Return 0 on success; non-zero on failure.
2324 */
2325static int
2326zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2327{
2328	size_t rsz;
2329	nvlist_t *vdev_root;
2330	nvlist_t **child;
2331	uint_t count;
2332	char *type;
2333
2334	rsz = 0;
2335
2336	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2337	    &vdev_root) != 0)
2338		return (EZFS_INVALCONFIG);
2339
2340	if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2341	    nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2342	    &child, &count) != 0)
2343		return (EZFS_INVALCONFIG);
2344
2345	/*
2346	 * root pool can not have EFI labeled disks and can only have
2347	 * a single top-level vdev.
2348	 */
2349	if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2350	    pool_uses_efi(vdev_root))
2351		return (EZFS_POOL_INVALARG);
2352
2353	(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2354	    B_FALSE);
2355
2356	/* No online devices */
2357	if (rsz == 0)
2358		return (EZFS_NODEVICE);
2359
2360	return (0);
2361}
2362
2363/*
2364 * Get phys_path for a root pool
2365 * Return 0 on success; non-zero on failure.
2366 */
2367int
2368zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2369{
2370	return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2371	    phypath_size));
2372}
2373
2374/*
2375 * If the device has being dynamically expanded then we need to relabel
2376 * the disk to use the new unallocated space.
2377 */
2378static int
2379zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2380{
2381#ifdef sun
2382	char path[MAXPATHLEN];
2383	char errbuf[1024];
2384	int fd, error;
2385	int (*_efi_use_whole_disk)(int);
2386
2387	if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2388	    "efi_use_whole_disk")) == NULL)
2389		return (-1);
2390
2391	(void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2392
2393	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2394		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2395		    "relabel '%s': unable to open device"), name);
2396		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2397	}
2398
2399	/*
2400	 * It's possible that we might encounter an error if the device
2401	 * does not have any unallocated space left. If so, we simply
2402	 * ignore that error and continue on.
2403	 */
2404	error = _efi_use_whole_disk(fd);
2405	(void) close(fd);
2406	if (error && error != VT_ENOSPC) {
2407		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2408		    "relabel '%s': unable to read disk capacity"), name);
2409		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2410	}
2411#endif	/* sun */
2412	return (0);
2413}
2414
2415/*
2416 * Bring the specified vdev online.   The 'flags' parameter is a set of the
2417 * ZFS_ONLINE_* flags.
2418 */
2419int
2420zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2421    vdev_state_t *newstate)
2422{
2423	zfs_cmd_t zc = { 0 };
2424	char msg[1024];
2425	nvlist_t *tgt;
2426	boolean_t avail_spare, l2cache, islog;
2427	libzfs_handle_t *hdl = zhp->zpool_hdl;
2428
2429	if (flags & ZFS_ONLINE_EXPAND) {
2430		(void) snprintf(msg, sizeof (msg),
2431		    dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2432	} else {
2433		(void) snprintf(msg, sizeof (msg),
2434		    dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2435	}
2436
2437	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2438	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2439	    &islog)) == NULL)
2440		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2441
2442	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2443
2444	if (avail_spare)
2445		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2446
2447	if (flags & ZFS_ONLINE_EXPAND ||
2448	    zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2449		char *pathname = NULL;
2450		uint64_t wholedisk = 0;
2451
2452		(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2453		    &wholedisk);
2454		verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2455		    &pathname) == 0);
2456
2457		/*
2458		 * XXX - L2ARC 1.0 devices can't support expansion.
2459		 */
2460		if (l2cache) {
2461			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2462			    "cannot expand cache devices"));
2463			return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2464		}
2465
2466		if (wholedisk) {
2467			pathname += strlen(DISK_ROOT) + 1;
2468			(void) zpool_relabel_disk(hdl, pathname);
2469		}
2470	}
2471
2472	zc.zc_cookie = VDEV_STATE_ONLINE;
2473	zc.zc_obj = flags;
2474
2475	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2476		if (errno == EINVAL) {
2477			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2478			    "from this pool into a new one.  Use '%s' "
2479			    "instead"), "zpool detach");
2480			return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2481		}
2482		return (zpool_standard_error(hdl, errno, msg));
2483	}
2484
2485	*newstate = zc.zc_cookie;
2486	return (0);
2487}
2488
2489/*
2490 * Take the specified vdev offline
2491 */
2492int
2493zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2494{
2495	zfs_cmd_t zc = { 0 };
2496	char msg[1024];
2497	nvlist_t *tgt;
2498	boolean_t avail_spare, l2cache;
2499	libzfs_handle_t *hdl = zhp->zpool_hdl;
2500
2501	(void) snprintf(msg, sizeof (msg),
2502	    dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2503
2504	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2505	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2506	    NULL)) == NULL)
2507		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2508
2509	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2510
2511	if (avail_spare)
2512		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2513
2514	zc.zc_cookie = VDEV_STATE_OFFLINE;
2515	zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2516
2517	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2518		return (0);
2519
2520	switch (errno) {
2521	case EBUSY:
2522
2523		/*
2524		 * There are no other replicas of this device.
2525		 */
2526		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2527
2528	case EEXIST:
2529		/*
2530		 * The log device has unplayed logs
2531		 */
2532		return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2533
2534	default:
2535		return (zpool_standard_error(hdl, errno, msg));
2536	}
2537}
2538
2539/*
2540 * Mark the given vdev faulted.
2541 */
2542int
2543zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2544{
2545	zfs_cmd_t zc = { 0 };
2546	char msg[1024];
2547	libzfs_handle_t *hdl = zhp->zpool_hdl;
2548
2549	(void) snprintf(msg, sizeof (msg),
2550	    dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2551
2552	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2553	zc.zc_guid = guid;
2554	zc.zc_cookie = VDEV_STATE_FAULTED;
2555	zc.zc_obj = aux;
2556
2557	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2558		return (0);
2559
2560	switch (errno) {
2561	case EBUSY:
2562
2563		/*
2564		 * There are no other replicas of this device.
2565		 */
2566		return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2567
2568	default:
2569		return (zpool_standard_error(hdl, errno, msg));
2570	}
2571
2572}
2573
2574/*
2575 * Mark the given vdev degraded.
2576 */
2577int
2578zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2579{
2580	zfs_cmd_t zc = { 0 };
2581	char msg[1024];
2582	libzfs_handle_t *hdl = zhp->zpool_hdl;
2583
2584	(void) snprintf(msg, sizeof (msg),
2585	    dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2586
2587	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2588	zc.zc_guid = guid;
2589	zc.zc_cookie = VDEV_STATE_DEGRADED;
2590	zc.zc_obj = aux;
2591
2592	if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2593		return (0);
2594
2595	return (zpool_standard_error(hdl, errno, msg));
2596}
2597
2598/*
2599 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2600 * a hot spare.
2601 */
2602static boolean_t
2603is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2604{
2605	nvlist_t **child;
2606	uint_t c, children;
2607	char *type;
2608
2609	if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2610	    &children) == 0) {
2611		verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2612		    &type) == 0);
2613
2614		if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2615		    children == 2 && child[which] == tgt)
2616			return (B_TRUE);
2617
2618		for (c = 0; c < children; c++)
2619			if (is_replacing_spare(child[c], tgt, which))
2620				return (B_TRUE);
2621	}
2622
2623	return (B_FALSE);
2624}
2625
2626/*
2627 * Attach new_disk (fully described by nvroot) to old_disk.
2628 * If 'replacing' is specified, the new disk will replace the old one.
2629 */
2630int
2631zpool_vdev_attach(zpool_handle_t *zhp,
2632    const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2633{
2634	zfs_cmd_t zc = { 0 };
2635	char msg[1024];
2636	int ret;
2637	nvlist_t *tgt;
2638	boolean_t avail_spare, l2cache, islog;
2639	uint64_t val;
2640	char *newname;
2641	nvlist_t **child;
2642	uint_t children;
2643	nvlist_t *config_root;
2644	libzfs_handle_t *hdl = zhp->zpool_hdl;
2645	boolean_t rootpool = zpool_is_bootable(zhp);
2646
2647	if (replacing)
2648		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2649		    "cannot replace %s with %s"), old_disk, new_disk);
2650	else
2651		(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2652		    "cannot attach %s to %s"), new_disk, old_disk);
2653
2654	/*
2655	 * If this is a root pool, make sure that we're not attaching an
2656	 * EFI labeled device.
2657	 */
2658	if (rootpool && pool_uses_efi(nvroot)) {
2659		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2660		    "EFI labeled devices are not supported on root pools."));
2661		return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2662	}
2663
2664	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2665	if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2666	    &islog)) == 0)
2667		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2668
2669	if (avail_spare)
2670		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2671
2672	if (l2cache)
2673		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2674
2675	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2676	zc.zc_cookie = replacing;
2677
2678	if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2679	    &child, &children) != 0 || children != 1) {
2680		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2681		    "new device must be a single disk"));
2682		return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2683	}
2684
2685	verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2686	    ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2687
2688	if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2689		return (-1);
2690
2691	/*
2692	 * If the target is a hot spare that has been swapped in, we can only
2693	 * replace it with another hot spare.
2694	 */
2695	if (replacing &&
2696	    nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2697	    (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2698	    NULL) == NULL || !avail_spare) &&
2699	    is_replacing_spare(config_root, tgt, 1)) {
2700		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2701		    "can only be replaced by another hot spare"));
2702		free(newname);
2703		return (zfs_error(hdl, EZFS_BADTARGET, msg));
2704	}
2705
2706	free(newname);
2707
2708	if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2709		return (-1);
2710
2711	ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2712
2713	zcmd_free_nvlists(&zc);
2714
2715	if (ret == 0) {
2716		if (rootpool) {
2717			/*
2718			 * XXX need a better way to prevent user from
2719			 * booting up a half-baked vdev.
2720			 */
2721			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2722			    "sure to wait until resilver is done "
2723			    "before rebooting.\n"));
2724			(void) fprintf(stderr, "\n");
2725			(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If "
2726			    "you boot from pool '%s', you may need to update\n"
2727			    "boot code on newly attached disk '%s'.\n\n"
2728			    "Assuming you use GPT partitioning and 'da0' is "
2729			    "your new boot disk\n"
2730			    "you may use the following command:\n\n"
2731			    "\tgpart bootcode -b /boot/pmbr -p "
2732			    "/boot/gptzfsboot -i 1 da0\n\n"),
2733			    zhp->zpool_name, new_disk);
2734		}
2735		return (0);
2736	}
2737
2738	switch (errno) {
2739	case ENOTSUP:
2740		/*
2741		 * Can't attach to or replace this type of vdev.
2742		 */
2743		if (replacing) {
2744			uint64_t version = zpool_get_prop_int(zhp,
2745			    ZPOOL_PROP_VERSION, NULL);
2746
2747			if (islog)
2748				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2749				    "cannot replace a log with a spare"));
2750			else if (version >= SPA_VERSION_MULTI_REPLACE)
2751				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2752				    "already in replacing/spare config; wait "
2753				    "for completion or use 'zpool detach'"));
2754			else
2755				zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2756				    "cannot replace a replacing device"));
2757		} else {
2758			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2759			    "can only attach to mirrors and top-level "
2760			    "disks"));
2761		}
2762		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2763		break;
2764
2765	case EINVAL:
2766		/*
2767		 * The new device must be a single disk.
2768		 */
2769		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2770		    "new device must be a single disk"));
2771		(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2772		break;
2773
2774	case EBUSY:
2775		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2776		    new_disk);
2777		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2778		break;
2779
2780	case EOVERFLOW:
2781		/*
2782		 * The new device is too small.
2783		 */
2784		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2785		    "device is too small"));
2786		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2787		break;
2788
2789	case EDOM:
2790		/*
2791		 * The new device has a different alignment requirement.
2792		 */
2793		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2794		    "devices have different sector alignment"));
2795		(void) zfs_error(hdl, EZFS_BADDEV, msg);
2796		break;
2797
2798	case ENAMETOOLONG:
2799		/*
2800		 * The resulting top-level vdev spec won't fit in the label.
2801		 */
2802		(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2803		break;
2804
2805	default:
2806		(void) zpool_standard_error(hdl, errno, msg);
2807	}
2808
2809	return (-1);
2810}
2811
2812/*
2813 * Detach the specified device.
2814 */
2815int
2816zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2817{
2818	zfs_cmd_t zc = { 0 };
2819	char msg[1024];
2820	nvlist_t *tgt;
2821	boolean_t avail_spare, l2cache;
2822	libzfs_handle_t *hdl = zhp->zpool_hdl;
2823
2824	(void) snprintf(msg, sizeof (msg),
2825	    dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2826
2827	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2828	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2829	    NULL)) == 0)
2830		return (zfs_error(hdl, EZFS_NODEVICE, msg));
2831
2832	if (avail_spare)
2833		return (zfs_error(hdl, EZFS_ISSPARE, msg));
2834
2835	if (l2cache)
2836		return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2837
2838	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2839
2840	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2841		return (0);
2842
2843	switch (errno) {
2844
2845	case ENOTSUP:
2846		/*
2847		 * Can't detach from this type of vdev.
2848		 */
2849		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2850		    "applicable to mirror and replacing vdevs"));
2851		(void) zfs_error(hdl, EZFS_BADTARGET, msg);
2852		break;
2853
2854	case EBUSY:
2855		/*
2856		 * There are no other replicas of this device.
2857		 */
2858		(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2859		break;
2860
2861	default:
2862		(void) zpool_standard_error(hdl, errno, msg);
2863	}
2864
2865	return (-1);
2866}
2867
2868/*
2869 * Find a mirror vdev in the source nvlist.
2870 *
2871 * The mchild array contains a list of disks in one of the top-level mirrors
2872 * of the source pool.  The schild array contains a list of disks that the
2873 * user specified on the command line.  We loop over the mchild array to
2874 * see if any entry in the schild array matches.
2875 *
2876 * If a disk in the mchild array is found in the schild array, we return
2877 * the index of that entry.  Otherwise we return -1.
2878 */
2879static int
2880find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2881    nvlist_t **schild, uint_t schildren)
2882{
2883	uint_t mc;
2884
2885	for (mc = 0; mc < mchildren; mc++) {
2886		uint_t sc;
2887		char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2888		    mchild[mc], B_FALSE);
2889
2890		for (sc = 0; sc < schildren; sc++) {
2891			char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2892			    schild[sc], B_FALSE);
2893			boolean_t result = (strcmp(mpath, spath) == 0);
2894
2895			free(spath);
2896			if (result) {
2897				free(mpath);
2898				return (mc);
2899			}
2900		}
2901
2902		free(mpath);
2903	}
2904
2905	return (-1);
2906}
2907
2908/*
2909 * Split a mirror pool.  If newroot points to null, then a new nvlist
2910 * is generated and it is the responsibility of the caller to free it.
2911 */
2912int
2913zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2914    nvlist_t *props, splitflags_t flags)
2915{
2916	zfs_cmd_t zc = { 0 };
2917	char msg[1024];
2918	nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2919	nvlist_t **varray = NULL, *zc_props = NULL;
2920	uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2921	libzfs_handle_t *hdl = zhp->zpool_hdl;
2922	uint64_t vers;
2923	boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2924	int retval = 0;
2925
2926	(void) snprintf(msg, sizeof (msg),
2927	    dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2928
2929	if (!zpool_name_valid(hdl, B_FALSE, newname))
2930		return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2931
2932	if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2933		(void) fprintf(stderr, gettext("Internal error: unable to "
2934		    "retrieve pool configuration\n"));
2935		return (-1);
2936	}
2937
2938	verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2939	    == 0);
2940	verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2941
2942	if (props) {
2943		prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2944		if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2945		    props, vers, flags, msg)) == NULL)
2946			return (-1);
2947	}
2948
2949	if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2950	    &children) != 0) {
2951		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2952		    "Source pool is missing vdev tree"));
2953		if (zc_props)
2954			nvlist_free(zc_props);
2955		return (-1);
2956	}
2957
2958	varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2959	vcount = 0;
2960
2961	if (*newroot == NULL ||
2962	    nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2963	    &newchild, &newchildren) != 0)
2964		newchildren = 0;
2965
2966	for (c = 0; c < children; c++) {
2967		uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2968		char *type;
2969		nvlist_t **mchild, *vdev;
2970		uint_t mchildren;
2971		int entry;
2972
2973		/*
2974		 * Unlike cache & spares, slogs are stored in the
2975		 * ZPOOL_CONFIG_CHILDREN array.  We filter them out here.
2976		 */
2977		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2978		    &is_log);
2979		(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2980		    &is_hole);
2981		if (is_log || is_hole) {
2982			/*
2983			 * Create a hole vdev and put it in the config.
2984			 */
2985			if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2986				goto out;
2987			if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2988			    VDEV_TYPE_HOLE) != 0)
2989				goto out;
2990			if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2991			    1) != 0)
2992				goto out;
2993			if (lastlog == 0)
2994				lastlog = vcount;
2995			varray[vcount++] = vdev;
2996			continue;
2997		}
2998		lastlog = 0;
2999		verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
3000		    == 0);
3001		if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
3002			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3003			    "Source pool must be composed only of mirrors\n"));
3004			retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3005			goto out;
3006		}
3007
3008		verify(nvlist_lookup_nvlist_array(child[c],
3009		    ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
3010
3011		/* find or add an entry for this top-level vdev */
3012		if (newchildren > 0 &&
3013		    (entry = find_vdev_entry(zhp, mchild, mchildren,
3014		    newchild, newchildren)) >= 0) {
3015			/* We found a disk that the user specified. */
3016			vdev = mchild[entry];
3017			++found;
3018		} else {
3019			/* User didn't specify a disk for this vdev. */
3020			vdev = mchild[mchildren - 1];
3021		}
3022
3023		if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
3024			goto out;
3025	}
3026
3027	/* did we find every disk the user specified? */
3028	if (found != newchildren) {
3029		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
3030		    "include at most one disk from each mirror"));
3031		retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
3032		goto out;
3033	}
3034
3035	/* Prepare the nvlist for populating. */
3036	if (*newroot == NULL) {
3037		if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
3038			goto out;
3039		freelist = B_TRUE;
3040		if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
3041		    VDEV_TYPE_ROOT) != 0)
3042			goto out;
3043	} else {
3044		verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
3045	}
3046
3047	/* Add all the children we found */
3048	if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
3049	    lastlog == 0 ? vcount : lastlog) != 0)
3050		goto out;
3051
3052	/*
3053	 * If we're just doing a dry run, exit now with success.
3054	 */
3055	if (flags.dryrun) {
3056		memory_err = B_FALSE;
3057		freelist = B_FALSE;
3058		goto out;
3059	}
3060
3061	/* now build up the config list & call the ioctl */
3062	if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3063		goto out;
3064
3065	if (nvlist_add_nvlist(newconfig,
3066	    ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3067	    nvlist_add_string(newconfig,
3068	    ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3069	    nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3070		goto out;
3071
3072	/*
3073	 * The new pool is automatically part of the namespace unless we
3074	 * explicitly export it.
3075	 */
3076	if (!flags.import)
3077		zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3078	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3079	(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3080	if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3081		goto out;
3082	if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3083		goto out;
3084
3085	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3086		retval = zpool_standard_error(hdl, errno, msg);
3087		goto out;
3088	}
3089
3090	freelist = B_FALSE;
3091	memory_err = B_FALSE;
3092
3093out:
3094	if (varray != NULL) {
3095		int v;
3096
3097		for (v = 0; v < vcount; v++)
3098			nvlist_free(varray[v]);
3099		free(varray);
3100	}
3101	zcmd_free_nvlists(&zc);
3102	if (zc_props)
3103		nvlist_free(zc_props);
3104	if (newconfig)
3105		nvlist_free(newconfig);
3106	if (freelist) {
3107		nvlist_free(*newroot);
3108		*newroot = NULL;
3109	}
3110
3111	if (retval != 0)
3112		return (retval);
3113
3114	if (memory_err)
3115		return (no_memory(hdl));
3116
3117	return (0);
3118}
3119
3120/*
3121 * Remove the given device.  Currently, this is supported only for hot spares
3122 * and level 2 cache devices.
3123 */
3124int
3125zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3126{
3127	zfs_cmd_t zc = { 0 };
3128	char msg[1024];
3129	nvlist_t *tgt;
3130	boolean_t avail_spare, l2cache, islog;
3131	libzfs_handle_t *hdl = zhp->zpool_hdl;
3132	uint64_t version;
3133
3134	(void) snprintf(msg, sizeof (msg),
3135	    dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3136
3137	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3138	if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3139	    &islog)) == 0)
3140		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3141	/*
3142	 * XXX - this should just go away.
3143	 */
3144	if (!avail_spare && !l2cache && !islog) {
3145		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3146		    "only inactive hot spares, cache, top-level, "
3147		    "or log devices can be removed"));
3148		return (zfs_error(hdl, EZFS_NODEVICE, msg));
3149	}
3150
3151	version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3152	if (islog && version < SPA_VERSION_HOLES) {
3153		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3154		    "pool must be upgrade to support log removal"));
3155		return (zfs_error(hdl, EZFS_BADVERSION, msg));
3156	}
3157
3158	verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3159
3160	if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3161		return (0);
3162
3163	return (zpool_standard_error(hdl, errno, msg));
3164}
3165
3166/*
3167 * Clear the errors for the pool, or the particular device if specified.
3168 */
3169int
3170zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3171{
3172	zfs_cmd_t zc = { 0 };
3173	char msg[1024];
3174	nvlist_t *tgt;
3175	zpool_rewind_policy_t policy;
3176	boolean_t avail_spare, l2cache;
3177	libzfs_handle_t *hdl = zhp->zpool_hdl;
3178	nvlist_t *nvi = NULL;
3179	int error;
3180
3181	if (path)
3182		(void) snprintf(msg, sizeof (msg),
3183		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3184		    path);
3185	else
3186		(void) snprintf(msg, sizeof (msg),
3187		    dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3188		    zhp->zpool_name);
3189
3190	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3191	if (path) {
3192		if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3193		    &l2cache, NULL)) == 0)
3194			return (zfs_error(hdl, EZFS_NODEVICE, msg));
3195
3196		/*
3197		 * Don't allow error clearing for hot spares.  Do allow
3198		 * error clearing for l2cache devices.
3199		 */
3200		if (avail_spare)
3201			return (zfs_error(hdl, EZFS_ISSPARE, msg));
3202
3203		verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3204		    &zc.zc_guid) == 0);
3205	}
3206
3207	zpool_get_rewind_policy(rewindnvl, &policy);
3208	zc.zc_cookie = policy.zrp_request;
3209
3210	if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3211		return (-1);
3212
3213	if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3214		return (-1);
3215
3216	while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3217	    errno == ENOMEM) {
3218		if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3219			zcmd_free_nvlists(&zc);
3220			return (-1);
3221		}
3222	}
3223
3224	if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3225	    errno != EPERM && errno != EACCES)) {
3226		if (policy.zrp_request &
3227		    (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3228			(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3229			zpool_rewind_exclaim(hdl, zc.zc_name,
3230			    ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3231			    nvi);
3232			nvlist_free(nvi);
3233		}
3234		zcmd_free_nvlists(&zc);
3235		return (0);
3236	}
3237
3238	zcmd_free_nvlists(&zc);
3239	return (zpool_standard_error(hdl, errno, msg));
3240}
3241
3242/*
3243 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3244 */
3245int
3246zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3247{
3248	zfs_cmd_t zc = { 0 };
3249	char msg[1024];
3250	libzfs_handle_t *hdl = zhp->zpool_hdl;
3251
3252	(void) snprintf(msg, sizeof (msg),
3253	    dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3254	    guid);
3255
3256	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3257	zc.zc_guid = guid;
3258	zc.zc_cookie = ZPOOL_NO_REWIND;
3259
3260	if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3261		return (0);
3262
3263	return (zpool_standard_error(hdl, errno, msg));
3264}
3265
3266/*
3267 * Change the GUID for a pool.
3268 */
3269int
3270zpool_reguid(zpool_handle_t *zhp)
3271{
3272	char msg[1024];
3273	libzfs_handle_t *hdl = zhp->zpool_hdl;
3274	zfs_cmd_t zc = { 0 };
3275
3276	(void) snprintf(msg, sizeof (msg),
3277	    dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3278
3279	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3280	if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3281		return (0);
3282
3283	return (zpool_standard_error(hdl, errno, msg));
3284}
3285
3286/*
3287 * Reopen the pool.
3288 */
3289int
3290zpool_reopen(zpool_handle_t *zhp)
3291{
3292	zfs_cmd_t zc = { 0 };
3293	char msg[1024];
3294	libzfs_handle_t *hdl = zhp->zpool_hdl;
3295
3296	(void) snprintf(msg, sizeof (msg),
3297	    dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3298	    zhp->zpool_name);
3299
3300	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3301	if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3302		return (0);
3303	return (zpool_standard_error(hdl, errno, msg));
3304}
3305
3306/*
3307 * Convert from a devid string to a path.
3308 */
3309static char *
3310devid_to_path(char *devid_str)
3311{
3312	ddi_devid_t devid;
3313	char *minor;
3314	char *path;
3315	devid_nmlist_t *list = NULL;
3316	int ret;
3317
3318	if (devid_str_decode(devid_str, &devid, &minor) != 0)
3319		return (NULL);
3320
3321	ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3322
3323	devid_str_free(minor);
3324	devid_free(devid);
3325
3326	if (ret != 0)
3327		return (NULL);
3328
3329	if ((path = strdup(list[0].devname)) == NULL)
3330		return (NULL);
3331
3332	devid_free_nmlist(list);
3333
3334	return (path);
3335}
3336
3337/*
3338 * Convert from a path to a devid string.
3339 */
3340static char *
3341path_to_devid(const char *path)
3342{
3343#ifdef have_devid
3344	int fd;
3345	ddi_devid_t devid;
3346	char *minor, *ret;
3347
3348	if ((fd = open(path, O_RDONLY)) < 0)
3349		return (NULL);
3350
3351	minor = NULL;
3352	ret = NULL;
3353	if (devid_get(fd, &devid) == 0) {
3354		if (devid_get_minor_name(fd, &minor) == 0)
3355			ret = devid_str_encode(devid, minor);
3356		if (minor != NULL)
3357			devid_str_free(minor);
3358		devid_free(devid);
3359	}
3360	(void) close(fd);
3361
3362	return (ret);
3363#else
3364	return (NULL);
3365#endif
3366}
3367
3368/*
3369 * Issue the necessary ioctl() to update the stored path value for the vdev.  We
3370 * ignore any failure here, since a common case is for an unprivileged user to
3371 * type 'zpool status', and we'll display the correct information anyway.
3372 */
3373static void
3374set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3375{
3376	zfs_cmd_t zc = { 0 };
3377
3378	(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3379	(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3380	verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3381	    &zc.zc_guid) == 0);
3382
3383	(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3384}
3385
3386/*
3387 * Given a vdev, return the name to display in iostat.  If the vdev has a path,
3388 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3389 * We also check if this is a whole disk, in which case we strip off the
3390 * trailing 's0' slice name.
3391 *
3392 * This routine is also responsible for identifying when disks have been
3393 * reconfigured in a new location.  The kernel will have opened the device by
3394 * devid, but the path will still refer to the old location.  To catch this, we
3395 * first do a path -> devid translation (which is fast for the common case).  If
3396 * the devid matches, we're done.  If not, we do a reverse devid -> path
3397 * translation and issue the appropriate ioctl() to update the path of the vdev.
3398 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3399 * of these checks.
3400 */
3401char *
3402zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3403    boolean_t verbose)
3404{
3405	char *path, *devid;
3406	uint64_t value;
3407	char buf[64];
3408	vdev_stat_t *vs;
3409	uint_t vsc;
3410	int have_stats;
3411	int have_path;
3412
3413	have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3414	    (uint64_t **)&vs, &vsc) == 0;
3415	have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
3416
3417	/*
3418	 * If the device is not currently present, assume it will not
3419	 * come back at the same device path.  Display the device by GUID.
3420	 */
3421	if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
3422	    have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
3423		verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3424		    &value) == 0);
3425		(void) snprintf(buf, sizeof (buf), "%llu",
3426		    (u_longlong_t)value);
3427		path = buf;
3428	} else if (have_path) {
3429
3430		/*
3431		 * If the device is dead (faulted, offline, etc) then don't
3432		 * bother opening it.  Otherwise we may be forcing the user to
3433		 * open a misbehaving device, which can have undesirable
3434		 * effects.
3435		 */
3436		if ((have_stats == 0 ||
3437		    vs->vs_state >= VDEV_STATE_DEGRADED) &&
3438		    zhp != NULL &&
3439		    nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3440			/*
3441			 * Determine if the current path is correct.
3442			 */
3443			char *newdevid = path_to_devid(path);
3444
3445			if (newdevid == NULL ||
3446			    strcmp(devid, newdevid) != 0) {
3447				char *newpath;
3448
3449				if ((newpath = devid_to_path(devid)) != NULL) {
3450					/*
3451					 * Update the path appropriately.
3452					 */
3453					set_path(zhp, nv, newpath);
3454					if (nvlist_add_string(nv,
3455					    ZPOOL_CONFIG_PATH, newpath) == 0)
3456						verify(nvlist_lookup_string(nv,
3457						    ZPOOL_CONFIG_PATH,
3458						    &path) == 0);
3459					free(newpath);
3460				}
3461			}
3462
3463			if (newdevid)
3464				devid_str_free(newdevid);
3465		}
3466
3467#ifdef sun
3468		if (strncmp(path, "/dev/dsk/", 9) == 0)
3469			path += 9;
3470
3471		if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3472		    &value) == 0 && value) {
3473			int pathlen = strlen(path);
3474			char *tmp = zfs_strdup(hdl, path);
3475
3476			/*
3477			 * If it starts with c#, and ends with "s0", chop
3478			 * the "s0" off, or if it ends with "s0/old", remove
3479			 * the "s0" from the middle.
3480			 */
3481			if (CTD_CHECK(tmp)) {
3482				if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3483					tmp[pathlen - 2] = '\0';
3484				} else if (pathlen > 6 &&
3485				    strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3486					(void) strcpy(&tmp[pathlen - 6],
3487					    "/old");
3488				}
3489			}
3490			return (tmp);
3491		}
3492#else	/* !sun */
3493		if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0)
3494			path += sizeof(_PATH_DEV) - 1;
3495#endif	/* !sun */
3496	} else {
3497		verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3498
3499		/*
3500		 * If it's a raidz device, we need to stick in the parity level.
3501		 */
3502		if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3503			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3504			    &value) == 0);
3505			(void) snprintf(buf, sizeof (buf), "%s%llu", path,
3506			    (u_longlong_t)value);
3507			path = buf;
3508		}
3509
3510		/*
3511		 * We identify each top-level vdev by using a <type-id>
3512		 * naming convention.
3513		 */
3514		if (verbose) {
3515			uint64_t id;
3516
3517			verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3518			    &id) == 0);
3519			(void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3520			    (u_longlong_t)id);
3521			path = buf;
3522		}
3523	}
3524
3525	return (zfs_strdup(hdl, path));
3526}
3527
3528static int
3529zbookmark_compare(const void *a, const void *b)
3530{
3531	return (memcmp(a, b, sizeof (zbookmark_phys_t)));
3532}
3533
3534/*
3535 * Retrieve the persistent error log, uniquify the members, and return to the
3536 * caller.
3537 */
3538int
3539zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3540{
3541	zfs_cmd_t zc = { 0 };
3542	uint64_t count;
3543	zbookmark_phys_t *zb = NULL;
3544	int i;
3545
3546	/*
3547	 * Retrieve the raw error list from the kernel.  If the number of errors
3548	 * has increased, allocate more space and continue until we get the
3549	 * entire list.
3550	 */
3551	verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3552	    &count) == 0);
3553	if (count == 0)
3554		return (0);
3555	if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3556	    count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL)
3557		return (-1);
3558	zc.zc_nvlist_dst_size = count;
3559	(void) strcpy(zc.zc_name, zhp->zpool_name);
3560	for (;;) {
3561		if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3562		    &zc) != 0) {
3563			free((void *)(uintptr_t)zc.zc_nvlist_dst);
3564			if (errno == ENOMEM) {
3565				void *dst;
3566
3567				count = zc.zc_nvlist_dst_size;
3568				dst = zfs_alloc(zhp->zpool_hdl, count *
3569				    sizeof (zbookmark_phys_t));
3570				if (dst == NULL)
3571					return (-1);
3572				zc.zc_nvlist_dst = (uintptr_t)dst;
3573			} else {
3574				return (-1);
3575			}
3576		} else {
3577			break;
3578		}
3579	}
3580
3581	/*
3582	 * Sort the resulting bookmarks.  This is a little confusing due to the
3583	 * implementation of ZFS_IOC_ERROR_LOG.  The bookmarks are copied last
3584	 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3585	 * _not_ copied as part of the process.  So we point the start of our
3586	 * array appropriate and decrement the total number of elements.
3587	 */
3588	zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
3589	    zc.zc_nvlist_dst_size;
3590	count -= zc.zc_nvlist_dst_size;
3591
3592	qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare);
3593
3594	verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3595
3596	/*
3597	 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3598	 */
3599	for (i = 0; i < count; i++) {
3600		nvlist_t *nv;
3601
3602		/* ignoring zb_blkid and zb_level for now */
3603		if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3604		    zb[i-1].zb_object == zb[i].zb_object)
3605			continue;
3606
3607		if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3608			goto nomem;
3609		if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3610		    zb[i].zb_objset) != 0) {
3611			nvlist_free(nv);
3612			goto nomem;
3613		}
3614		if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3615		    zb[i].zb_object) != 0) {
3616			nvlist_free(nv);
3617			goto nomem;
3618		}
3619		if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3620			nvlist_free(nv);
3621			goto nomem;
3622		}
3623		nvlist_free(nv);
3624	}
3625
3626	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3627	return (0);
3628
3629nomem:
3630	free((void *)(uintptr_t)zc.zc_nvlist_dst);
3631	return (no_memory(zhp->zpool_hdl));
3632}
3633
3634/*
3635 * Upgrade a ZFS pool to the latest on-disk version.
3636 */
3637int
3638zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3639{
3640	zfs_cmd_t zc = { 0 };
3641	libzfs_handle_t *hdl = zhp->zpool_hdl;
3642
3643	(void) strcpy(zc.zc_name, zhp->zpool_name);
3644	zc.zc_cookie = new_version;
3645
3646	if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3647		return (zpool_standard_error_fmt(hdl, errno,
3648		    dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3649		    zhp->zpool_name));
3650	return (0);
3651}
3652
3653void
3654zfs_save_arguments(int argc, char **argv, char *string, int len)
3655{
3656	(void) strlcpy(string, basename(argv[0]), len);
3657	for (int i = 1; i < argc; i++) {
3658		(void) strlcat(string, " ", len);
3659		(void) strlcat(string, argv[i], len);
3660	}
3661}
3662
3663int
3664zpool_log_history(libzfs_handle_t *hdl, const char *message)
3665{
3666	zfs_cmd_t zc = { 0 };
3667	nvlist_t *args;
3668	int err;
3669
3670	args = fnvlist_alloc();
3671	fnvlist_add_string(args, "message", message);
3672	err = zcmd_write_src_nvlist(hdl, &zc, args);
3673	if (err == 0)
3674		err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3675	nvlist_free(args);
3676	zcmd_free_nvlists(&zc);
3677	return (err);
3678}
3679
3680/*
3681 * Perform ioctl to get some command history of a pool.
3682 *
3683 * 'buf' is the buffer to fill up to 'len' bytes.  'off' is the
3684 * logical offset of the history buffer to start reading from.
3685 *
3686 * Upon return, 'off' is the next logical offset to read from and
3687 * 'len' is the actual amount of bytes read into 'buf'.
3688 */
3689static int
3690get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3691{
3692	zfs_cmd_t zc = { 0 };
3693	libzfs_handle_t *hdl = zhp->zpool_hdl;
3694
3695	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3696
3697	zc.zc_history = (uint64_t)(uintptr_t)buf;
3698	zc.zc_history_len = *len;
3699	zc.zc_history_offset = *off;
3700
3701	if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3702		switch (errno) {
3703		case EPERM:
3704			return (zfs_error_fmt(hdl, EZFS_PERM,
3705			    dgettext(TEXT_DOMAIN,
3706			    "cannot show history for pool '%s'"),
3707			    zhp->zpool_name));
3708		case ENOENT:
3709			return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3710			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3711			    "'%s'"), zhp->zpool_name));
3712		case ENOTSUP:
3713			return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3714			    dgettext(TEXT_DOMAIN, "cannot get history for pool "
3715			    "'%s', pool must be upgraded"), zhp->zpool_name));
3716		default:
3717			return (zpool_standard_error_fmt(hdl, errno,
3718			    dgettext(TEXT_DOMAIN,
3719			    "cannot get history for '%s'"), zhp->zpool_name));
3720		}
3721	}
3722
3723	*len = zc.zc_history_len;
3724	*off = zc.zc_history_offset;
3725
3726	return (0);
3727}
3728
3729/*
3730 * Process the buffer of nvlists, unpacking and storing each nvlist record
3731 * into 'records'.  'leftover' is set to the number of bytes that weren't
3732 * processed as there wasn't a complete record.
3733 */
3734int
3735zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3736    nvlist_t ***records, uint_t *numrecords)
3737{
3738	uint64_t reclen;
3739	nvlist_t *nv;
3740	int i;
3741
3742	while (bytes_read > sizeof (reclen)) {
3743
3744		/* get length of packed record (stored as little endian) */
3745		for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3746			reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3747
3748		if (bytes_read < sizeof (reclen) + reclen)
3749			break;
3750
3751		/* unpack record */
3752		if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3753			return (ENOMEM);
3754		bytes_read -= sizeof (reclen) + reclen;
3755		buf += sizeof (reclen) + reclen;
3756
3757		/* add record to nvlist array */
3758		(*numrecords)++;
3759		if (ISP2(*numrecords + 1)) {
3760			*records = realloc(*records,
3761			    *numrecords * 2 * sizeof (nvlist_t *));
3762		}
3763		(*records)[*numrecords - 1] = nv;
3764	}
3765
3766	*leftover = bytes_read;
3767	return (0);
3768}
3769
3770/* from spa_history.c: spa_history_create_obj() */
3771#define	HIS_BUF_LEN_DEF	(128 << 10)
3772#define	HIS_BUF_LEN_MAX	(1 << 30)
3773
3774/*
3775 * Retrieve the command history of a pool.
3776 */
3777int
3778zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3779{
3780	char *buf = NULL;
3781	uint64_t bufsize = HIS_BUF_LEN_DEF;
3782	uint64_t off = 0;
3783	nvlist_t **records = NULL;
3784	uint_t numrecords = 0;
3785	int err, i;
3786
3787	if ((buf = malloc(bufsize)) == NULL)
3788		return (ENOMEM);
3789	do {
3790		uint64_t bytes_read = bufsize;
3791		uint64_t leftover;
3792
3793		if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3794			break;
3795
3796		/* if nothing else was read in, we're at EOF, just return */
3797		if (bytes_read == 0)
3798			break;
3799
3800		if ((err = zpool_history_unpack(buf, bytes_read,
3801		    &leftover, &records, &numrecords)) != 0)
3802			break;
3803		off -= leftover;
3804
3805		/*
3806		 * If the history block is too big, double the buffer
3807		 * size and try again.
3808		 */
3809		if (leftover == bytes_read) {
3810			free(buf);
3811			buf = NULL;
3812
3813			bufsize <<= 1;
3814			if ((bufsize >= HIS_BUF_LEN_MAX) ||
3815			    ((buf = malloc(bufsize)) == NULL)) {
3816				err = ENOMEM;
3817				break;
3818			}
3819		}
3820
3821		/* CONSTCOND */
3822	} while (1);
3823	free(buf);
3824
3825	if (!err) {
3826		verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3827		verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3828		    records, numrecords) == 0);
3829	}
3830	for (i = 0; i < numrecords; i++)
3831		nvlist_free(records[i]);
3832	free(records);
3833
3834	return (err);
3835}
3836
3837void
3838zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3839    char *pathname, size_t len)
3840{
3841	zfs_cmd_t zc = { 0 };
3842	boolean_t mounted = B_FALSE;
3843	char *mntpnt = NULL;
3844	char dsname[MAXNAMELEN];
3845
3846	if (dsobj == 0) {
3847		/* special case for the MOS */
3848		(void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3849		return;
3850	}
3851
3852	/* get the dataset's name */
3853	(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3854	zc.zc_obj = dsobj;
3855	if (ioctl(zhp->zpool_hdl->libzfs_fd,
3856	    ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3857		/* just write out a path of two object numbers */
3858		(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3859		    dsobj, obj);
3860		return;
3861	}
3862	(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3863
3864	/* find out if the dataset is mounted */
3865	mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3866
3867	/* get the corrupted object's path */
3868	(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3869	zc.zc_obj = obj;
3870	if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3871	    &zc) == 0) {
3872		if (mounted) {
3873			(void) snprintf(pathname, len, "%s%s", mntpnt,
3874			    zc.zc_value);
3875		} else {
3876			(void) snprintf(pathname, len, "%s:%s",
3877			    dsname, zc.zc_value);
3878		}
3879	} else {
3880		(void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3881	}
3882	free(mntpnt);
3883}
3884
3885#ifdef sun
3886/*
3887 * Read the EFI label from the config, if a label does not exist then
3888 * pass back the error to the caller. If the caller has passed a non-NULL
3889 * diskaddr argument then we set it to the starting address of the EFI
3890 * partition.
3891 */
3892static int
3893read_efi_label(nvlist_t *config, diskaddr_t *sb)
3894{
3895	char *path;
3896	int fd;
3897	char diskname[MAXPATHLEN];
3898	int err = -1;
3899
3900	if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3901		return (err);
3902
3903	(void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3904	    strrchr(path, '/'));
3905	if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3906		struct dk_gpt *vtoc;
3907
3908		if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3909			if (sb != NULL)
3910				*sb = vtoc->efi_parts[0].p_start;
3911			efi_free(vtoc);
3912		}
3913		(void) close(fd);
3914	}
3915	return (err);
3916}
3917
3918/*
3919 * determine where a partition starts on a disk in the current
3920 * configuration
3921 */
3922static diskaddr_t
3923find_start_block(nvlist_t *config)
3924{
3925	nvlist_t **child;
3926	uint_t c, children;
3927	diskaddr_t sb = MAXOFFSET_T;
3928	uint64_t wholedisk;
3929
3930	if (nvlist_lookup_nvlist_array(config,
3931	    ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3932		if (nvlist_lookup_uint64(config,
3933		    ZPOOL_CONFIG_WHOLE_DISK,
3934		    &wholedisk) != 0 || !wholedisk) {
3935			return (MAXOFFSET_T);
3936		}
3937		if (read_efi_label(config, &sb) < 0)
3938			sb = MAXOFFSET_T;
3939		return (sb);
3940	}
3941
3942	for (c = 0; c < children; c++) {
3943		sb = find_start_block(child[c]);
3944		if (sb != MAXOFFSET_T) {
3945			return (sb);
3946		}
3947	}
3948	return (MAXOFFSET_T);
3949}
3950#endif /* sun */
3951
3952/*
3953 * Label an individual disk.  The name provided is the short name,
3954 * stripped of any leading /dev path.
3955 */
3956int
3957zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name)
3958{
3959#ifdef sun
3960	char path[MAXPATHLEN];
3961	struct dk_gpt *vtoc;
3962	int fd;
3963	size_t resv = EFI_MIN_RESV_SIZE;
3964	uint64_t slice_size;
3965	diskaddr_t start_block;
3966	char errbuf[1024];
3967
3968	/* prepare an error message just in case */
3969	(void) snprintf(errbuf, sizeof (errbuf),
3970	    dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3971
3972	if (zhp) {
3973		nvlist_t *nvroot;
3974
3975		if (zpool_is_bootable(zhp)) {
3976			zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3977			    "EFI labeled devices are not supported on root "
3978			    "pools."));
3979			return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3980		}
3981
3982		verify(nvlist_lookup_nvlist(zhp->zpool_config,
3983		    ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3984
3985		if (zhp->zpool_start_block == 0)
3986			start_block = find_start_block(nvroot);
3987		else
3988			start_block = zhp->zpool_start_block;
3989		zhp->zpool_start_block = start_block;
3990	} else {
3991		/* new pool */
3992		start_block = NEW_START_BLOCK;
3993	}
3994
3995	(void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3996	    BACKUP_SLICE);
3997
3998	if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3999		/*
4000		 * This shouldn't happen.  We've long since verified that this
4001		 * is a valid device.
4002		 */
4003		zfs_error_aux(hdl,
4004		    dgettext(TEXT_DOMAIN, "unable to open device"));
4005		return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
4006	}
4007
4008	if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
4009		/*
4010		 * The only way this can fail is if we run out of memory, or we
4011		 * were unable to read the disk's capacity
4012		 */
4013		if (errno == ENOMEM)
4014			(void) no_memory(hdl);
4015
4016		(void) close(fd);
4017		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4018		    "unable to read disk capacity"), name);
4019
4020		return (zfs_error(hdl, EZFS_NOCAP, errbuf));
4021	}
4022
4023	slice_size = vtoc->efi_last_u_lba + 1;
4024	slice_size -= EFI_MIN_RESV_SIZE;
4025	if (start_block == MAXOFFSET_T)
4026		start_block = NEW_START_BLOCK;
4027	slice_size -= start_block;
4028
4029	vtoc->efi_parts[0].p_start = start_block;
4030	vtoc->efi_parts[0].p_size = slice_size;
4031
4032	/*
4033	 * Why we use V_USR: V_BACKUP confuses users, and is considered
4034	 * disposable by some EFI utilities (since EFI doesn't have a backup
4035	 * slice).  V_UNASSIGNED is supposed to be used only for zero size
4036	 * partitions, and efi_write() will fail if we use it.  V_ROOT, V_BOOT,
4037	 * etc. were all pretty specific.  V_USR is as close to reality as we
4038	 * can get, in the absence of V_OTHER.
4039	 */
4040	vtoc->efi_parts[0].p_tag = V_USR;
4041	(void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
4042
4043	vtoc->efi_parts[8].p_start = slice_size + start_block;
4044	vtoc->efi_parts[8].p_size = resv;
4045	vtoc->efi_parts[8].p_tag = V_RESERVED;
4046
4047	if (efi_write(fd, vtoc) != 0) {
4048		/*
4049		 * Some block drivers (like pcata) may not support EFI
4050		 * GPT labels.  Print out a helpful error message dir-
4051		 * ecting the user to manually label the disk and give
4052		 * a specific slice.
4053		 */
4054		(void) close(fd);
4055		efi_free(vtoc);
4056
4057		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4058		    "try using fdisk(1M) and then provide a specific slice"));
4059		return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
4060	}
4061
4062	(void) close(fd);
4063	efi_free(vtoc);
4064#endif /* sun */
4065	return (0);
4066}
4067
4068static boolean_t
4069supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
4070{
4071	char *type;
4072	nvlist_t **child;
4073	uint_t children, c;
4074
4075	verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
4076	if (strcmp(type, VDEV_TYPE_FILE) == 0 ||
4077	    strcmp(type, VDEV_TYPE_HOLE) == 0 ||
4078	    strcmp(type, VDEV_TYPE_MISSING) == 0) {
4079		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4080		    "vdev type '%s' is not supported"), type);
4081		(void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
4082		return (B_FALSE);
4083	}
4084	if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
4085	    &child, &children) == 0) {
4086		for (c = 0; c < children; c++) {
4087			if (!supported_dump_vdev_type(hdl, child[c], errbuf))
4088				return (B_FALSE);
4089		}
4090	}
4091	return (B_TRUE);
4092}
4093
4094/*
4095 * Check if this zvol is allowable for use as a dump device; zero if
4096 * it is, > 0 if it isn't, < 0 if it isn't a zvol.
4097 *
4098 * Allowable storage configurations include mirrors, all raidz variants, and
4099 * pools with log, cache, and spare devices.  Pools which are backed by files or
4100 * have missing/hole vdevs are not suitable.
4101 */
4102int
4103zvol_check_dump_config(char *arg)
4104{
4105	zpool_handle_t *zhp = NULL;
4106	nvlist_t *config, *nvroot;
4107	char *p, *volname;
4108	nvlist_t **top;
4109	uint_t toplevels;
4110	libzfs_handle_t *hdl;
4111	char errbuf[1024];
4112	char poolname[ZPOOL_MAXNAMELEN];
4113	int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4114	int ret = 1;
4115
4116	if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4117		return (-1);
4118	}
4119
4120	(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4121	    "dump is not supported on device '%s'"), arg);
4122
4123	if ((hdl = libzfs_init()) == NULL)
4124		return (1);
4125	libzfs_print_on_error(hdl, B_TRUE);
4126
4127	volname = arg + pathlen;
4128
4129	/* check the configuration of the pool */
4130	if ((p = strchr(volname, '/')) == NULL) {
4131		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4132		    "malformed dataset name"));
4133		(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4134		return (1);
4135	} else if (p - volname >= ZFS_MAXNAMELEN) {
4136		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4137		    "dataset name is too long"));
4138		(void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4139		return (1);
4140	} else {
4141		(void) strncpy(poolname, volname, p - volname);
4142		poolname[p - volname] = '\0';
4143	}
4144
4145	if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4146		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4147		    "could not open pool '%s'"), poolname);
4148		(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4149		goto out;
4150	}
4151	config = zpool_get_config(zhp, NULL);
4152	if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4153	    &nvroot) != 0) {
4154		zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4155		    "could not obtain vdev configuration for  '%s'"), poolname);
4156		(void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4157		goto out;
4158	}
4159
4160	verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4161	    &top, &toplevels) == 0);
4162
4163	if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4164		goto out;
4165	}
4166	ret = 0;
4167
4168out:
4169	if (zhp)
4170		zpool_close(zhp);
4171	libzfs_fini(hdl);
4172	return (ret);
4173}
4174