libzfs_pool.c revision 278177
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 25 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29#include <sys/types.h> 30#include <sys/stat.h> 31#include <ctype.h> 32#include <errno.h> 33#include <devid.h> 34#include <fcntl.h> 35#include <libintl.h> 36#include <stdio.h> 37#include <stdlib.h> 38#include <strings.h> 39#include <unistd.h> 40#include <libgen.h> 41#include <sys/zfs_ioctl.h> 42#include <dlfcn.h> 43 44#include "zfs_namecheck.h" 45#include "zfs_prop.h" 46#include "libzfs_impl.h" 47#include "zfs_comutil.h" 48#include "zfeature_common.h" 49 50static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 51 52#define DISK_ROOT "/dev/dsk" 53#define RDISK_ROOT "/dev/rdsk" 54#define BACKUP_SLICE "s2" 55 56typedef struct prop_flags { 57 int create:1; /* Validate property on creation */ 58 int import:1; /* Validate property on import */ 59} prop_flags_t; 60 61/* 62 * ==================================================================== 63 * zpool property functions 64 * ==================================================================== 65 */ 66 67static int 68zpool_get_all_props(zpool_handle_t *zhp) 69{ 70 zfs_cmd_t zc = { 0 }; 71 libzfs_handle_t *hdl = zhp->zpool_hdl; 72 73 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 74 75 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 76 return (-1); 77 78 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 79 if (errno == ENOMEM) { 80 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 81 zcmd_free_nvlists(&zc); 82 return (-1); 83 } 84 } else { 85 zcmd_free_nvlists(&zc); 86 return (-1); 87 } 88 } 89 90 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 91 zcmd_free_nvlists(&zc); 92 return (-1); 93 } 94 95 zcmd_free_nvlists(&zc); 96 97 return (0); 98} 99 100static int 101zpool_props_refresh(zpool_handle_t *zhp) 102{ 103 nvlist_t *old_props; 104 105 old_props = zhp->zpool_props; 106 107 if (zpool_get_all_props(zhp) != 0) 108 return (-1); 109 110 nvlist_free(old_props); 111 return (0); 112} 113 114static char * 115zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 116 zprop_source_t *src) 117{ 118 nvlist_t *nv, *nvl; 119 uint64_t ival; 120 char *value; 121 zprop_source_t source; 122 123 nvl = zhp->zpool_props; 124 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 125 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 126 source = ival; 127 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 128 } else { 129 source = ZPROP_SRC_DEFAULT; 130 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 131 value = "-"; 132 } 133 134 if (src) 135 *src = source; 136 137 return (value); 138} 139 140uint64_t 141zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 142{ 143 nvlist_t *nv, *nvl; 144 uint64_t value; 145 zprop_source_t source; 146 147 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 148 /* 149 * zpool_get_all_props() has most likely failed because 150 * the pool is faulted, but if all we need is the top level 151 * vdev's guid then get it from the zhp config nvlist. 152 */ 153 if ((prop == ZPOOL_PROP_GUID) && 154 (nvlist_lookup_nvlist(zhp->zpool_config, 155 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 156 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 157 == 0)) { 158 return (value); 159 } 160 return (zpool_prop_default_numeric(prop)); 161 } 162 163 nvl = zhp->zpool_props; 164 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 165 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 166 source = value; 167 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 168 } else { 169 source = ZPROP_SRC_DEFAULT; 170 value = zpool_prop_default_numeric(prop); 171 } 172 173 if (src) 174 *src = source; 175 176 return (value); 177} 178 179/* 180 * Map VDEV STATE to printed strings. 181 */ 182const char * 183zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 184{ 185 switch (state) { 186 case VDEV_STATE_CLOSED: 187 case VDEV_STATE_OFFLINE: 188 return (gettext("OFFLINE")); 189 case VDEV_STATE_REMOVED: 190 return (gettext("REMOVED")); 191 case VDEV_STATE_CANT_OPEN: 192 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 193 return (gettext("FAULTED")); 194 else if (aux == VDEV_AUX_SPLIT_POOL) 195 return (gettext("SPLIT")); 196 else 197 return (gettext("UNAVAIL")); 198 case VDEV_STATE_FAULTED: 199 return (gettext("FAULTED")); 200 case VDEV_STATE_DEGRADED: 201 return (gettext("DEGRADED")); 202 case VDEV_STATE_HEALTHY: 203 return (gettext("ONLINE")); 204 } 205 206 return (gettext("UNKNOWN")); 207} 208 209/* 210 * Map POOL STATE to printed strings. 211 */ 212const char * 213zpool_pool_state_to_name(pool_state_t state) 214{ 215 switch (state) { 216 case POOL_STATE_ACTIVE: 217 return (gettext("ACTIVE")); 218 case POOL_STATE_EXPORTED: 219 return (gettext("EXPORTED")); 220 case POOL_STATE_DESTROYED: 221 return (gettext("DESTROYED")); 222 case POOL_STATE_SPARE: 223 return (gettext("SPARE")); 224 case POOL_STATE_L2CACHE: 225 return (gettext("L2CACHE")); 226 case POOL_STATE_UNINITIALIZED: 227 return (gettext("UNINITIALIZED")); 228 case POOL_STATE_UNAVAIL: 229 return (gettext("UNAVAIL")); 230 case POOL_STATE_POTENTIALLY_ACTIVE: 231 return (gettext("POTENTIALLY_ACTIVE")); 232 } 233 234 return (gettext("UNKNOWN")); 235} 236 237/* 238 * Get a zpool property value for 'prop' and return the value in 239 * a pre-allocated buffer. 240 */ 241int 242zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 243 zprop_source_t *srctype, boolean_t literal) 244{ 245 uint64_t intval; 246 const char *strval; 247 zprop_source_t src = ZPROP_SRC_NONE; 248 nvlist_t *nvroot; 249 vdev_stat_t *vs; 250 uint_t vsc; 251 252 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 253 switch (prop) { 254 case ZPOOL_PROP_NAME: 255 (void) strlcpy(buf, zpool_get_name(zhp), len); 256 break; 257 258 case ZPOOL_PROP_HEALTH: 259 (void) strlcpy(buf, 260 zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len); 261 break; 262 263 case ZPOOL_PROP_GUID: 264 intval = zpool_get_prop_int(zhp, prop, &src); 265 (void) snprintf(buf, len, "%llu", intval); 266 break; 267 268 case ZPOOL_PROP_ALTROOT: 269 case ZPOOL_PROP_CACHEFILE: 270 case ZPOOL_PROP_COMMENT: 271 if (zhp->zpool_props != NULL || 272 zpool_get_all_props(zhp) == 0) { 273 (void) strlcpy(buf, 274 zpool_get_prop_string(zhp, prop, &src), 275 len); 276 break; 277 } 278 /* FALLTHROUGH */ 279 default: 280 (void) strlcpy(buf, "-", len); 281 break; 282 } 283 284 if (srctype != NULL) 285 *srctype = src; 286 return (0); 287 } 288 289 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 290 prop != ZPOOL_PROP_NAME) 291 return (-1); 292 293 switch (zpool_prop_get_type(prop)) { 294 case PROP_TYPE_STRING: 295 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 296 len); 297 break; 298 299 case PROP_TYPE_NUMBER: 300 intval = zpool_get_prop_int(zhp, prop, &src); 301 302 switch (prop) { 303 case ZPOOL_PROP_SIZE: 304 case ZPOOL_PROP_ALLOCATED: 305 case ZPOOL_PROP_FREE: 306 case ZPOOL_PROP_FREEING: 307 case ZPOOL_PROP_LEAKED: 308 if (literal) { 309 (void) snprintf(buf, len, "%llu", 310 (u_longlong_t)intval); 311 } else { 312 (void) zfs_nicenum(intval, buf, len); 313 } 314 break; 315 case ZPOOL_PROP_EXPANDSZ: 316 if (intval == 0) { 317 (void) strlcpy(buf, "-", len); 318 } else if (literal) { 319 (void) snprintf(buf, len, "%llu", 320 (u_longlong_t)intval); 321 } else { 322 (void) zfs_nicenum(intval, buf, len); 323 } 324 break; 325 case ZPOOL_PROP_CAPACITY: 326 if (literal) { 327 (void) snprintf(buf, len, "%llu", 328 (u_longlong_t)intval); 329 } else { 330 (void) snprintf(buf, len, "%llu%%", 331 (u_longlong_t)intval); 332 } 333 break; 334 case ZPOOL_PROP_FRAGMENTATION: 335 if (intval == UINT64_MAX) { 336 (void) strlcpy(buf, "-", len); 337 } else { 338 (void) snprintf(buf, len, "%llu%%", 339 (u_longlong_t)intval); 340 } 341 break; 342 case ZPOOL_PROP_DEDUPRATIO: 343 (void) snprintf(buf, len, "%llu.%02llux", 344 (u_longlong_t)(intval / 100), 345 (u_longlong_t)(intval % 100)); 346 break; 347 case ZPOOL_PROP_HEALTH: 348 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 349 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 350 verify(nvlist_lookup_uint64_array(nvroot, 351 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 352 == 0); 353 354 (void) strlcpy(buf, zpool_state_to_name(intval, 355 vs->vs_aux), len); 356 break; 357 case ZPOOL_PROP_VERSION: 358 if (intval >= SPA_VERSION_FEATURES) { 359 (void) snprintf(buf, len, "-"); 360 break; 361 } 362 /* FALLTHROUGH */ 363 default: 364 (void) snprintf(buf, len, "%llu", intval); 365 } 366 break; 367 368 case PROP_TYPE_INDEX: 369 intval = zpool_get_prop_int(zhp, prop, &src); 370 if (zpool_prop_index_to_string(prop, intval, &strval) 371 != 0) 372 return (-1); 373 (void) strlcpy(buf, strval, len); 374 break; 375 376 default: 377 abort(); 378 } 379 380 if (srctype) 381 *srctype = src; 382 383 return (0); 384} 385 386/* 387 * Check if the bootfs name has the same pool name as it is set to. 388 * Assuming bootfs is a valid dataset name. 389 */ 390static boolean_t 391bootfs_name_valid(const char *pool, char *bootfs) 392{ 393 int len = strlen(pool); 394 395 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 396 return (B_FALSE); 397 398 if (strncmp(pool, bootfs, len) == 0 && 399 (bootfs[len] == '/' || bootfs[len] == '\0')) 400 return (B_TRUE); 401 402 return (B_FALSE); 403} 404 405/* 406 * Inspect the configuration to determine if any of the devices contain 407 * an EFI label. 408 */ 409static boolean_t 410pool_uses_efi(nvlist_t *config) 411{ 412#ifdef sun 413 nvlist_t **child; 414 uint_t c, children; 415 416 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 417 &child, &children) != 0) 418 return (read_efi_label(config, NULL) >= 0); 419 420 for (c = 0; c < children; c++) { 421 if (pool_uses_efi(child[c])) 422 return (B_TRUE); 423 } 424#endif /* sun */ 425 return (B_FALSE); 426} 427 428boolean_t 429zpool_is_bootable(zpool_handle_t *zhp) 430{ 431 char bootfs[ZPOOL_MAXNAMELEN]; 432 433 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 434 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 435 sizeof (bootfs)) != 0); 436} 437 438 439/* 440 * Given an nvlist of zpool properties to be set, validate that they are 441 * correct, and parse any numeric properties (index, boolean, etc) if they are 442 * specified as strings. 443 */ 444static nvlist_t * 445zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 446 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 447{ 448 nvpair_t *elem; 449 nvlist_t *retprops; 450 zpool_prop_t prop; 451 char *strval; 452 uint64_t intval; 453 char *slash, *check; 454 struct stat64 statbuf; 455 zpool_handle_t *zhp; 456 nvlist_t *nvroot; 457 458 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 459 (void) no_memory(hdl); 460 return (NULL); 461 } 462 463 elem = NULL; 464 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 465 const char *propname = nvpair_name(elem); 466 467 prop = zpool_name_to_prop(propname); 468 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 469 int err; 470 char *fname = strchr(propname, '@') + 1; 471 472 err = zfeature_lookup_name(fname, NULL); 473 if (err != 0) { 474 ASSERT3U(err, ==, ENOENT); 475 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 476 "invalid feature '%s'"), fname); 477 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 478 goto error; 479 } 480 481 if (nvpair_type(elem) != DATA_TYPE_STRING) { 482 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 483 "'%s' must be a string"), propname); 484 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 485 goto error; 486 } 487 488 (void) nvpair_value_string(elem, &strval); 489 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 490 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 491 "property '%s' can only be set to " 492 "'enabled'"), propname); 493 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 494 goto error; 495 } 496 497 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 498 (void) no_memory(hdl); 499 goto error; 500 } 501 continue; 502 } 503 504 /* 505 * Make sure this property is valid and applies to this type. 506 */ 507 if (prop == ZPROP_INVAL) { 508 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 509 "invalid property '%s'"), propname); 510 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 511 goto error; 512 } 513 514 if (zpool_prop_readonly(prop)) { 515 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 516 "is readonly"), propname); 517 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 518 goto error; 519 } 520 521 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 522 &strval, &intval, errbuf) != 0) 523 goto error; 524 525 /* 526 * Perform additional checking for specific properties. 527 */ 528 switch (prop) { 529 case ZPOOL_PROP_VERSION: 530 if (intval < version || 531 !SPA_VERSION_IS_SUPPORTED(intval)) { 532 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 533 "property '%s' number %d is invalid."), 534 propname, intval); 535 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 536 goto error; 537 } 538 break; 539 540 case ZPOOL_PROP_BOOTFS: 541 if (flags.create || flags.import) { 542 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 543 "property '%s' cannot be set at creation " 544 "or import time"), propname); 545 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 546 goto error; 547 } 548 549 if (version < SPA_VERSION_BOOTFS) { 550 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 551 "pool must be upgraded to support " 552 "'%s' property"), propname); 553 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 554 goto error; 555 } 556 557 /* 558 * bootfs property value has to be a dataset name and 559 * the dataset has to be in the same pool as it sets to. 560 */ 561 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 562 strval)) { 563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 564 "is an invalid name"), strval); 565 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 566 goto error; 567 } 568 569 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 570 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 571 "could not open pool '%s'"), poolname); 572 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 573 goto error; 574 } 575 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 576 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 577 578#ifdef sun 579 /* 580 * bootfs property cannot be set on a disk which has 581 * been EFI labeled. 582 */ 583 if (pool_uses_efi(nvroot)) { 584 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 585 "property '%s' not supported on " 586 "EFI labeled devices"), propname); 587 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf); 588 zpool_close(zhp); 589 goto error; 590 } 591#endif /* sun */ 592 zpool_close(zhp); 593 break; 594 595 case ZPOOL_PROP_ALTROOT: 596 if (!flags.create && !flags.import) { 597 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 598 "property '%s' can only be set during pool " 599 "creation or import"), propname); 600 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 601 goto error; 602 } 603 604 if (strval[0] != '/') { 605 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 606 "bad alternate root '%s'"), strval); 607 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 608 goto error; 609 } 610 break; 611 612 case ZPOOL_PROP_CACHEFILE: 613 if (strval[0] == '\0') 614 break; 615 616 if (strcmp(strval, "none") == 0) 617 break; 618 619 if (strval[0] != '/') { 620 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 621 "property '%s' must be empty, an " 622 "absolute path, or 'none'"), propname); 623 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 624 goto error; 625 } 626 627 slash = strrchr(strval, '/'); 628 629 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 630 strcmp(slash, "/..") == 0) { 631 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 632 "'%s' is not a valid file"), strval); 633 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 634 goto error; 635 } 636 637 *slash = '\0'; 638 639 if (strval[0] != '\0' && 640 (stat64(strval, &statbuf) != 0 || 641 !S_ISDIR(statbuf.st_mode))) { 642 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 643 "'%s' is not a valid directory"), 644 strval); 645 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 646 goto error; 647 } 648 649 *slash = '/'; 650 break; 651 652 case ZPOOL_PROP_COMMENT: 653 for (check = strval; *check != '\0'; check++) { 654 if (!isprint(*check)) { 655 zfs_error_aux(hdl, 656 dgettext(TEXT_DOMAIN, 657 "comment may only have printable " 658 "characters")); 659 (void) zfs_error(hdl, EZFS_BADPROP, 660 errbuf); 661 goto error; 662 } 663 } 664 if (strlen(strval) > ZPROP_MAX_COMMENT) { 665 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 666 "comment must not exceed %d characters"), 667 ZPROP_MAX_COMMENT); 668 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 669 goto error; 670 } 671 break; 672 case ZPOOL_PROP_READONLY: 673 if (!flags.import) { 674 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 675 "property '%s' can only be set at " 676 "import time"), propname); 677 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 678 goto error; 679 } 680 break; 681 } 682 } 683 684 return (retprops); 685error: 686 nvlist_free(retprops); 687 return (NULL); 688} 689 690/* 691 * Set zpool property : propname=propval. 692 */ 693int 694zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 695{ 696 zfs_cmd_t zc = { 0 }; 697 int ret = -1; 698 char errbuf[1024]; 699 nvlist_t *nvl = NULL; 700 nvlist_t *realprops; 701 uint64_t version; 702 prop_flags_t flags = { 0 }; 703 704 (void) snprintf(errbuf, sizeof (errbuf), 705 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 706 zhp->zpool_name); 707 708 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 709 return (no_memory(zhp->zpool_hdl)); 710 711 if (nvlist_add_string(nvl, propname, propval) != 0) { 712 nvlist_free(nvl); 713 return (no_memory(zhp->zpool_hdl)); 714 } 715 716 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 717 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 718 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 719 nvlist_free(nvl); 720 return (-1); 721 } 722 723 nvlist_free(nvl); 724 nvl = realprops; 725 726 /* 727 * Execute the corresponding ioctl() to set this property. 728 */ 729 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 730 731 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 732 nvlist_free(nvl); 733 return (-1); 734 } 735 736 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 737 738 zcmd_free_nvlists(&zc); 739 nvlist_free(nvl); 740 741 if (ret) 742 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 743 else 744 (void) zpool_props_refresh(zhp); 745 746 return (ret); 747} 748 749int 750zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 751{ 752 libzfs_handle_t *hdl = zhp->zpool_hdl; 753 zprop_list_t *entry; 754 char buf[ZFS_MAXPROPLEN]; 755 nvlist_t *features = NULL; 756 zprop_list_t **last; 757 boolean_t firstexpand = (NULL == *plp); 758 759 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 760 return (-1); 761 762 last = plp; 763 while (*last != NULL) 764 last = &(*last)->pl_next; 765 766 if ((*plp)->pl_all) 767 features = zpool_get_features(zhp); 768 769 if ((*plp)->pl_all && firstexpand) { 770 for (int i = 0; i < SPA_FEATURES; i++) { 771 zprop_list_t *entry = zfs_alloc(hdl, 772 sizeof (zprop_list_t)); 773 entry->pl_prop = ZPROP_INVAL; 774 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 775 spa_feature_table[i].fi_uname); 776 entry->pl_width = strlen(entry->pl_user_prop); 777 entry->pl_all = B_TRUE; 778 779 *last = entry; 780 last = &entry->pl_next; 781 } 782 } 783 784 /* add any unsupported features */ 785 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 786 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 787 char *propname; 788 boolean_t found; 789 zprop_list_t *entry; 790 791 if (zfeature_is_supported(nvpair_name(nvp))) 792 continue; 793 794 propname = zfs_asprintf(hdl, "unsupported@%s", 795 nvpair_name(nvp)); 796 797 /* 798 * Before adding the property to the list make sure that no 799 * other pool already added the same property. 800 */ 801 found = B_FALSE; 802 entry = *plp; 803 while (entry != NULL) { 804 if (entry->pl_user_prop != NULL && 805 strcmp(propname, entry->pl_user_prop) == 0) { 806 found = B_TRUE; 807 break; 808 } 809 entry = entry->pl_next; 810 } 811 if (found) { 812 free(propname); 813 continue; 814 } 815 816 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 817 entry->pl_prop = ZPROP_INVAL; 818 entry->pl_user_prop = propname; 819 entry->pl_width = strlen(entry->pl_user_prop); 820 entry->pl_all = B_TRUE; 821 822 *last = entry; 823 last = &entry->pl_next; 824 } 825 826 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 827 828 if (entry->pl_fixed) 829 continue; 830 831 if (entry->pl_prop != ZPROP_INVAL && 832 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 833 NULL, B_FALSE) == 0) { 834 if (strlen(buf) > entry->pl_width) 835 entry->pl_width = strlen(buf); 836 } 837 } 838 839 return (0); 840} 841 842/* 843 * Get the state for the given feature on the given ZFS pool. 844 */ 845int 846zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 847 size_t len) 848{ 849 uint64_t refcount; 850 boolean_t found = B_FALSE; 851 nvlist_t *features = zpool_get_features(zhp); 852 boolean_t supported; 853 const char *feature = strchr(propname, '@') + 1; 854 855 supported = zpool_prop_feature(propname); 856 ASSERT(supported || zpool_prop_unsupported(propname)); 857 858 /* 859 * Convert from feature name to feature guid. This conversion is 860 * unecessary for unsupported@... properties because they already 861 * use guids. 862 */ 863 if (supported) { 864 int ret; 865 spa_feature_t fid; 866 867 ret = zfeature_lookup_name(feature, &fid); 868 if (ret != 0) { 869 (void) strlcpy(buf, "-", len); 870 return (ENOTSUP); 871 } 872 feature = spa_feature_table[fid].fi_guid; 873 } 874 875 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 876 found = B_TRUE; 877 878 if (supported) { 879 if (!found) { 880 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 881 } else { 882 if (refcount == 0) 883 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 884 else 885 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 886 } 887 } else { 888 if (found) { 889 if (refcount == 0) { 890 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 891 } else { 892 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 893 } 894 } else { 895 (void) strlcpy(buf, "-", len); 896 return (ENOTSUP); 897 } 898 } 899 900 return (0); 901} 902 903/* 904 * Don't start the slice at the default block of 34; many storage 905 * devices will use a stripe width of 128k, so start there instead. 906 */ 907#define NEW_START_BLOCK 256 908 909/* 910 * Validate the given pool name, optionally putting an extended error message in 911 * 'buf'. 912 */ 913boolean_t 914zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 915{ 916 namecheck_err_t why; 917 char what; 918 int ret; 919 920 ret = pool_namecheck(pool, &why, &what); 921 922 /* 923 * The rules for reserved pool names were extended at a later point. 924 * But we need to support users with existing pools that may now be 925 * invalid. So we only check for this expanded set of names during a 926 * create (or import), and only in userland. 927 */ 928 if (ret == 0 && !isopen && 929 (strncmp(pool, "mirror", 6) == 0 || 930 strncmp(pool, "raidz", 5) == 0 || 931 strncmp(pool, "spare", 5) == 0 || 932 strcmp(pool, "log") == 0)) { 933 if (hdl != NULL) 934 zfs_error_aux(hdl, 935 dgettext(TEXT_DOMAIN, "name is reserved")); 936 return (B_FALSE); 937 } 938 939 940 if (ret != 0) { 941 if (hdl != NULL) { 942 switch (why) { 943 case NAME_ERR_TOOLONG: 944 zfs_error_aux(hdl, 945 dgettext(TEXT_DOMAIN, "name is too long")); 946 break; 947 948 case NAME_ERR_INVALCHAR: 949 zfs_error_aux(hdl, 950 dgettext(TEXT_DOMAIN, "invalid character " 951 "'%c' in pool name"), what); 952 break; 953 954 case NAME_ERR_NOLETTER: 955 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 956 "name must begin with a letter")); 957 break; 958 959 case NAME_ERR_RESERVED: 960 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 961 "name is reserved")); 962 break; 963 964 case NAME_ERR_DISKLIKE: 965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 966 "pool name is reserved")); 967 break; 968 969 case NAME_ERR_LEADING_SLASH: 970 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 971 "leading slash in name")); 972 break; 973 974 case NAME_ERR_EMPTY_COMPONENT: 975 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 976 "empty component in name")); 977 break; 978 979 case NAME_ERR_TRAILING_SLASH: 980 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 981 "trailing slash in name")); 982 break; 983 984 case NAME_ERR_MULTIPLE_AT: 985 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 986 "multiple '@' delimiters in name")); 987 break; 988 989 } 990 } 991 return (B_FALSE); 992 } 993 994 return (B_TRUE); 995} 996 997/* 998 * Open a handle to the given pool, even if the pool is currently in the FAULTED 999 * state. 1000 */ 1001zpool_handle_t * 1002zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 1003{ 1004 zpool_handle_t *zhp; 1005 boolean_t missing; 1006 1007 /* 1008 * Make sure the pool name is valid. 1009 */ 1010 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 1011 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1012 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 1013 pool); 1014 return (NULL); 1015 } 1016 1017 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1018 return (NULL); 1019 1020 zhp->zpool_hdl = hdl; 1021 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1022 1023 if (zpool_refresh_stats(zhp, &missing) != 0) { 1024 zpool_close(zhp); 1025 return (NULL); 1026 } 1027 1028 if (missing) { 1029 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1030 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1031 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1032 zpool_close(zhp); 1033 return (NULL); 1034 } 1035 1036 return (zhp); 1037} 1038 1039/* 1040 * Like the above, but silent on error. Used when iterating over pools (because 1041 * the configuration cache may be out of date). 1042 */ 1043int 1044zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1045{ 1046 zpool_handle_t *zhp; 1047 boolean_t missing; 1048 1049 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1050 return (-1); 1051 1052 zhp->zpool_hdl = hdl; 1053 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1054 1055 if (zpool_refresh_stats(zhp, &missing) != 0) { 1056 zpool_close(zhp); 1057 return (-1); 1058 } 1059 1060 if (missing) { 1061 zpool_close(zhp); 1062 *ret = NULL; 1063 return (0); 1064 } 1065 1066 *ret = zhp; 1067 return (0); 1068} 1069 1070/* 1071 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1072 * state. 1073 */ 1074zpool_handle_t * 1075zpool_open(libzfs_handle_t *hdl, const char *pool) 1076{ 1077 zpool_handle_t *zhp; 1078 1079 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1080 return (NULL); 1081 1082 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1083 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1084 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1085 zpool_close(zhp); 1086 return (NULL); 1087 } 1088 1089 return (zhp); 1090} 1091 1092/* 1093 * Close the handle. Simply frees the memory associated with the handle. 1094 */ 1095void 1096zpool_close(zpool_handle_t *zhp) 1097{ 1098 if (zhp->zpool_config) 1099 nvlist_free(zhp->zpool_config); 1100 if (zhp->zpool_old_config) 1101 nvlist_free(zhp->zpool_old_config); 1102 if (zhp->zpool_props) 1103 nvlist_free(zhp->zpool_props); 1104 free(zhp); 1105} 1106 1107/* 1108 * Return the name of the pool. 1109 */ 1110const char * 1111zpool_get_name(zpool_handle_t *zhp) 1112{ 1113 return (zhp->zpool_name); 1114} 1115 1116 1117/* 1118 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1119 */ 1120int 1121zpool_get_state(zpool_handle_t *zhp) 1122{ 1123 return (zhp->zpool_state); 1124} 1125 1126/* 1127 * Create the named pool, using the provided vdev list. It is assumed 1128 * that the consumer has already validated the contents of the nvlist, so we 1129 * don't have to worry about error semantics. 1130 */ 1131int 1132zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1133 nvlist_t *props, nvlist_t *fsprops) 1134{ 1135 zfs_cmd_t zc = { 0 }; 1136 nvlist_t *zc_fsprops = NULL; 1137 nvlist_t *zc_props = NULL; 1138 char msg[1024]; 1139 int ret = -1; 1140 1141 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1142 "cannot create '%s'"), pool); 1143 1144 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1145 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1146 1147 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1148 return (-1); 1149 1150 if (props) { 1151 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1152 1153 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1154 SPA_VERSION_1, flags, msg)) == NULL) { 1155 goto create_failed; 1156 } 1157 } 1158 1159 if (fsprops) { 1160 uint64_t zoned; 1161 char *zonestr; 1162 1163 zoned = ((nvlist_lookup_string(fsprops, 1164 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1165 strcmp(zonestr, "on") == 0); 1166 1167 if ((zc_fsprops = zfs_valid_proplist(hdl, 1168 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) { 1169 goto create_failed; 1170 } 1171 if (!zc_props && 1172 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1173 goto create_failed; 1174 } 1175 if (nvlist_add_nvlist(zc_props, 1176 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1177 goto create_failed; 1178 } 1179 } 1180 1181 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1182 goto create_failed; 1183 1184 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1185 1186 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1187 1188 zcmd_free_nvlists(&zc); 1189 nvlist_free(zc_props); 1190 nvlist_free(zc_fsprops); 1191 1192 switch (errno) { 1193 case EBUSY: 1194 /* 1195 * This can happen if the user has specified the same 1196 * device multiple times. We can't reliably detect this 1197 * until we try to add it and see we already have a 1198 * label. 1199 */ 1200 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1201 "one or more vdevs refer to the same device")); 1202 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1203 1204 case EOVERFLOW: 1205 /* 1206 * This occurs when one of the devices is below 1207 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1208 * device was the problem device since there's no 1209 * reliable way to determine device size from userland. 1210 */ 1211 { 1212 char buf[64]; 1213 1214 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1215 1216 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1217 "one or more devices is less than the " 1218 "minimum size (%s)"), buf); 1219 } 1220 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1221 1222 case ENOSPC: 1223 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1224 "one or more devices is out of space")); 1225 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1226 1227 case ENOTBLK: 1228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1229 "cache device must be a disk or disk slice")); 1230 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1231 1232 default: 1233 return (zpool_standard_error(hdl, errno, msg)); 1234 } 1235 } 1236 1237create_failed: 1238 zcmd_free_nvlists(&zc); 1239 nvlist_free(zc_props); 1240 nvlist_free(zc_fsprops); 1241 return (ret); 1242} 1243 1244/* 1245 * Destroy the given pool. It is up to the caller to ensure that there are no 1246 * datasets left in the pool. 1247 */ 1248int 1249zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1250{ 1251 zfs_cmd_t zc = { 0 }; 1252 zfs_handle_t *zfp = NULL; 1253 libzfs_handle_t *hdl = zhp->zpool_hdl; 1254 char msg[1024]; 1255 1256 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1257 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1258 return (-1); 1259 1260 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1261 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1262 1263 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1264 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1265 "cannot destroy '%s'"), zhp->zpool_name); 1266 1267 if (errno == EROFS) { 1268 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1269 "one or more devices is read only")); 1270 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1271 } else { 1272 (void) zpool_standard_error(hdl, errno, msg); 1273 } 1274 1275 if (zfp) 1276 zfs_close(zfp); 1277 return (-1); 1278 } 1279 1280 if (zfp) { 1281 remove_mountpoint(zfp); 1282 zfs_close(zfp); 1283 } 1284 1285 return (0); 1286} 1287 1288/* 1289 * Add the given vdevs to the pool. The caller must have already performed the 1290 * necessary verification to ensure that the vdev specification is well-formed. 1291 */ 1292int 1293zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1294{ 1295 zfs_cmd_t zc = { 0 }; 1296 int ret; 1297 libzfs_handle_t *hdl = zhp->zpool_hdl; 1298 char msg[1024]; 1299 nvlist_t **spares, **l2cache; 1300 uint_t nspares, nl2cache; 1301 1302 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1303 "cannot add to '%s'"), zhp->zpool_name); 1304 1305 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1306 SPA_VERSION_SPARES && 1307 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1308 &spares, &nspares) == 0) { 1309 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1310 "upgraded to add hot spares")); 1311 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1312 } 1313 1314 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot, 1315 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) { 1316 uint64_t s; 1317 1318 for (s = 0; s < nspares; s++) { 1319 char *path; 1320 1321 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH, 1322 &path) == 0 && pool_uses_efi(spares[s])) { 1323 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1324 "device '%s' contains an EFI label and " 1325 "cannot be used on root pools."), 1326 zpool_vdev_name(hdl, NULL, spares[s], 1327 B_FALSE)); 1328 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 1329 } 1330 } 1331 } 1332 1333 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1334 SPA_VERSION_L2CACHE && 1335 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1336 &l2cache, &nl2cache) == 0) { 1337 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1338 "upgraded to add cache devices")); 1339 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1340 } 1341 1342 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1343 return (-1); 1344 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1345 1346 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1347 switch (errno) { 1348 case EBUSY: 1349 /* 1350 * This can happen if the user has specified the same 1351 * device multiple times. We can't reliably detect this 1352 * until we try to add it and see we already have a 1353 * label. 1354 */ 1355 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1356 "one or more vdevs refer to the same device")); 1357 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1358 break; 1359 1360 case EOVERFLOW: 1361 /* 1362 * This occurrs when one of the devices is below 1363 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1364 * device was the problem device since there's no 1365 * reliable way to determine device size from userland. 1366 */ 1367 { 1368 char buf[64]; 1369 1370 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1371 1372 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1373 "device is less than the minimum " 1374 "size (%s)"), buf); 1375 } 1376 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1377 break; 1378 1379 case ENOTSUP: 1380 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1381 "pool must be upgraded to add these vdevs")); 1382 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1383 break; 1384 1385 case EDOM: 1386 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1387 "root pool can not have multiple vdevs" 1388 " or separate logs")); 1389 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1390 break; 1391 1392 case ENOTBLK: 1393 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1394 "cache device must be a disk or disk slice")); 1395 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1396 break; 1397 1398 default: 1399 (void) zpool_standard_error(hdl, errno, msg); 1400 } 1401 1402 ret = -1; 1403 } else { 1404 ret = 0; 1405 } 1406 1407 zcmd_free_nvlists(&zc); 1408 1409 return (ret); 1410} 1411 1412/* 1413 * Exports the pool from the system. The caller must ensure that there are no 1414 * mounted datasets in the pool. 1415 */ 1416static int 1417zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1418 const char *log_str) 1419{ 1420 zfs_cmd_t zc = { 0 }; 1421 char msg[1024]; 1422 1423 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1424 "cannot export '%s'"), zhp->zpool_name); 1425 1426 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1427 zc.zc_cookie = force; 1428 zc.zc_guid = hardforce; 1429 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1430 1431 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1432 switch (errno) { 1433 case EXDEV: 1434 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1435 "use '-f' to override the following errors:\n" 1436 "'%s' has an active shared spare which could be" 1437 " used by other pools once '%s' is exported."), 1438 zhp->zpool_name, zhp->zpool_name); 1439 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1440 msg)); 1441 default: 1442 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1443 msg)); 1444 } 1445 } 1446 1447 return (0); 1448} 1449 1450int 1451zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1452{ 1453 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1454} 1455 1456int 1457zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1458{ 1459 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1460} 1461 1462static void 1463zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1464 nvlist_t *config) 1465{ 1466 nvlist_t *nv = NULL; 1467 uint64_t rewindto; 1468 int64_t loss = -1; 1469 struct tm t; 1470 char timestr[128]; 1471 1472 if (!hdl->libzfs_printerr || config == NULL) 1473 return; 1474 1475 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1476 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1477 return; 1478 } 1479 1480 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1481 return; 1482 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1483 1484 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1485 strftime(timestr, 128, 0, &t) != 0) { 1486 if (dryrun) { 1487 (void) printf(dgettext(TEXT_DOMAIN, 1488 "Would be able to return %s " 1489 "to its state as of %s.\n"), 1490 name, timestr); 1491 } else { 1492 (void) printf(dgettext(TEXT_DOMAIN, 1493 "Pool %s returned to its state as of %s.\n"), 1494 name, timestr); 1495 } 1496 if (loss > 120) { 1497 (void) printf(dgettext(TEXT_DOMAIN, 1498 "%s approximately %lld "), 1499 dryrun ? "Would discard" : "Discarded", 1500 (loss + 30) / 60); 1501 (void) printf(dgettext(TEXT_DOMAIN, 1502 "minutes of transactions.\n")); 1503 } else if (loss > 0) { 1504 (void) printf(dgettext(TEXT_DOMAIN, 1505 "%s approximately %lld "), 1506 dryrun ? "Would discard" : "Discarded", loss); 1507 (void) printf(dgettext(TEXT_DOMAIN, 1508 "seconds of transactions.\n")); 1509 } 1510 } 1511} 1512 1513void 1514zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1515 nvlist_t *config) 1516{ 1517 nvlist_t *nv = NULL; 1518 int64_t loss = -1; 1519 uint64_t edata = UINT64_MAX; 1520 uint64_t rewindto; 1521 struct tm t; 1522 char timestr[128]; 1523 1524 if (!hdl->libzfs_printerr) 1525 return; 1526 1527 if (reason >= 0) 1528 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1529 else 1530 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1531 1532 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1533 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1534 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1535 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1536 goto no_info; 1537 1538 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1539 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1540 &edata); 1541 1542 (void) printf(dgettext(TEXT_DOMAIN, 1543 "Recovery is possible, but will result in some data loss.\n")); 1544 1545 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1546 strftime(timestr, 128, 0, &t) != 0) { 1547 (void) printf(dgettext(TEXT_DOMAIN, 1548 "\tReturning the pool to its state as of %s\n" 1549 "\tshould correct the problem. "), 1550 timestr); 1551 } else { 1552 (void) printf(dgettext(TEXT_DOMAIN, 1553 "\tReverting the pool to an earlier state " 1554 "should correct the problem.\n\t")); 1555 } 1556 1557 if (loss > 120) { 1558 (void) printf(dgettext(TEXT_DOMAIN, 1559 "Approximately %lld minutes of data\n" 1560 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1561 } else if (loss > 0) { 1562 (void) printf(dgettext(TEXT_DOMAIN, 1563 "Approximately %lld seconds of data\n" 1564 "\tmust be discarded, irreversibly. "), loss); 1565 } 1566 if (edata != 0 && edata != UINT64_MAX) { 1567 if (edata == 1) { 1568 (void) printf(dgettext(TEXT_DOMAIN, 1569 "After rewind, at least\n" 1570 "\tone persistent user-data error will remain. ")); 1571 } else { 1572 (void) printf(dgettext(TEXT_DOMAIN, 1573 "After rewind, several\n" 1574 "\tpersistent user-data errors will remain. ")); 1575 } 1576 } 1577 (void) printf(dgettext(TEXT_DOMAIN, 1578 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1579 reason >= 0 ? "clear" : "import", name); 1580 1581 (void) printf(dgettext(TEXT_DOMAIN, 1582 "A scrub of the pool\n" 1583 "\tis strongly recommended after recovery.\n")); 1584 return; 1585 1586no_info: 1587 (void) printf(dgettext(TEXT_DOMAIN, 1588 "Destroy and re-create the pool from\n\ta backup source.\n")); 1589} 1590 1591/* 1592 * zpool_import() is a contracted interface. Should be kept the same 1593 * if possible. 1594 * 1595 * Applications should use zpool_import_props() to import a pool with 1596 * new properties value to be set. 1597 */ 1598int 1599zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1600 char *altroot) 1601{ 1602 nvlist_t *props = NULL; 1603 int ret; 1604 1605 if (altroot != NULL) { 1606 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1607 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1608 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1609 newname)); 1610 } 1611 1612 if (nvlist_add_string(props, 1613 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1614 nvlist_add_string(props, 1615 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1616 nvlist_free(props); 1617 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1618 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1619 newname)); 1620 } 1621 } 1622 1623 ret = zpool_import_props(hdl, config, newname, props, 1624 ZFS_IMPORT_NORMAL); 1625 if (props) 1626 nvlist_free(props); 1627 return (ret); 1628} 1629 1630static void 1631print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1632 int indent) 1633{ 1634 nvlist_t **child; 1635 uint_t c, children; 1636 char *vname; 1637 uint64_t is_log = 0; 1638 1639 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1640 &is_log); 1641 1642 if (name != NULL) 1643 (void) printf("\t%*s%s%s\n", indent, "", name, 1644 is_log ? " [log]" : ""); 1645 1646 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1647 &child, &children) != 0) 1648 return; 1649 1650 for (c = 0; c < children; c++) { 1651 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1652 print_vdev_tree(hdl, vname, child[c], indent + 2); 1653 free(vname); 1654 } 1655} 1656 1657void 1658zpool_print_unsup_feat(nvlist_t *config) 1659{ 1660 nvlist_t *nvinfo, *unsup_feat; 1661 1662 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1663 0); 1664 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1665 &unsup_feat) == 0); 1666 1667 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1668 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1669 char *desc; 1670 1671 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1672 verify(nvpair_value_string(nvp, &desc) == 0); 1673 1674 if (strlen(desc) > 0) 1675 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1676 else 1677 (void) printf("\t%s\n", nvpair_name(nvp)); 1678 } 1679} 1680 1681/* 1682 * Import the given pool using the known configuration and a list of 1683 * properties to be set. The configuration should have come from 1684 * zpool_find_import(). The 'newname' parameters control whether the pool 1685 * is imported with a different name. 1686 */ 1687int 1688zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1689 nvlist_t *props, int flags) 1690{ 1691 zfs_cmd_t zc = { 0 }; 1692 zpool_rewind_policy_t policy; 1693 nvlist_t *nv = NULL; 1694 nvlist_t *nvinfo = NULL; 1695 nvlist_t *missing = NULL; 1696 char *thename; 1697 char *origname; 1698 int ret; 1699 int error = 0; 1700 char errbuf[1024]; 1701 1702 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1703 &origname) == 0); 1704 1705 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1706 "cannot import pool '%s'"), origname); 1707 1708 if (newname != NULL) { 1709 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1710 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1711 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1712 newname)); 1713 thename = (char *)newname; 1714 } else { 1715 thename = origname; 1716 } 1717 1718 if (props != NULL) { 1719 uint64_t version; 1720 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1721 1722 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1723 &version) == 0); 1724 1725 if ((props = zpool_valid_proplist(hdl, origname, 1726 props, version, flags, errbuf)) == NULL) 1727 return (-1); 1728 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1729 nvlist_free(props); 1730 return (-1); 1731 } 1732 nvlist_free(props); 1733 } 1734 1735 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1736 1737 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1738 &zc.zc_guid) == 0); 1739 1740 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1741 zcmd_free_nvlists(&zc); 1742 return (-1); 1743 } 1744 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1745 zcmd_free_nvlists(&zc); 1746 return (-1); 1747 } 1748 1749 zc.zc_cookie = flags; 1750 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1751 errno == ENOMEM) { 1752 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1753 zcmd_free_nvlists(&zc); 1754 return (-1); 1755 } 1756 } 1757 if (ret != 0) 1758 error = errno; 1759 1760 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1761 1762 zcmd_free_nvlists(&zc); 1763 1764 zpool_get_rewind_policy(config, &policy); 1765 1766 if (error) { 1767 char desc[1024]; 1768 1769 /* 1770 * Dry-run failed, but we print out what success 1771 * looks like if we found a best txg 1772 */ 1773 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1774 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1775 B_TRUE, nv); 1776 nvlist_free(nv); 1777 return (-1); 1778 } 1779 1780 if (newname == NULL) 1781 (void) snprintf(desc, sizeof (desc), 1782 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1783 thename); 1784 else 1785 (void) snprintf(desc, sizeof (desc), 1786 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1787 origname, thename); 1788 1789 switch (error) { 1790 case ENOTSUP: 1791 if (nv != NULL && nvlist_lookup_nvlist(nv, 1792 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1793 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1794 (void) printf(dgettext(TEXT_DOMAIN, "This " 1795 "pool uses the following feature(s) not " 1796 "supported by this system:\n")); 1797 zpool_print_unsup_feat(nv); 1798 if (nvlist_exists(nvinfo, 1799 ZPOOL_CONFIG_CAN_RDONLY)) { 1800 (void) printf(dgettext(TEXT_DOMAIN, 1801 "All unsupported features are only " 1802 "required for writing to the pool." 1803 "\nThe pool can be imported using " 1804 "'-o readonly=on'.\n")); 1805 } 1806 } 1807 /* 1808 * Unsupported version. 1809 */ 1810 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1811 break; 1812 1813 case EINVAL: 1814 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1815 break; 1816 1817 case EROFS: 1818 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1819 "one or more devices is read only")); 1820 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1821 break; 1822 1823 case ENXIO: 1824 if (nv && nvlist_lookup_nvlist(nv, 1825 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1826 nvlist_lookup_nvlist(nvinfo, 1827 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1828 (void) printf(dgettext(TEXT_DOMAIN, 1829 "The devices below are missing, use " 1830 "'-m' to import the pool anyway:\n")); 1831 print_vdev_tree(hdl, NULL, missing, 2); 1832 (void) printf("\n"); 1833 } 1834 (void) zpool_standard_error(hdl, error, desc); 1835 break; 1836 1837 case EEXIST: 1838 (void) zpool_standard_error(hdl, error, desc); 1839 break; 1840 1841 default: 1842 (void) zpool_standard_error(hdl, error, desc); 1843 zpool_explain_recover(hdl, 1844 newname ? origname : thename, -error, nv); 1845 break; 1846 } 1847 1848 nvlist_free(nv); 1849 ret = -1; 1850 } else { 1851 zpool_handle_t *zhp; 1852 1853 /* 1854 * This should never fail, but play it safe anyway. 1855 */ 1856 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1857 ret = -1; 1858 else if (zhp != NULL) 1859 zpool_close(zhp); 1860 if (policy.zrp_request & 1861 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1862 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1863 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1864 } 1865 nvlist_free(nv); 1866 return (0); 1867 } 1868 1869 return (ret); 1870} 1871 1872/* 1873 * Scan the pool. 1874 */ 1875int 1876zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1877{ 1878 zfs_cmd_t zc = { 0 }; 1879 char msg[1024]; 1880 libzfs_handle_t *hdl = zhp->zpool_hdl; 1881 1882 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1883 zc.zc_cookie = func; 1884 1885 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1886 (errno == ENOENT && func != POOL_SCAN_NONE)) 1887 return (0); 1888 1889 if (func == POOL_SCAN_SCRUB) { 1890 (void) snprintf(msg, sizeof (msg), 1891 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1892 } else if (func == POOL_SCAN_NONE) { 1893 (void) snprintf(msg, sizeof (msg), 1894 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1895 zc.zc_name); 1896 } else { 1897 assert(!"unexpected result"); 1898 } 1899 1900 if (errno == EBUSY) { 1901 nvlist_t *nvroot; 1902 pool_scan_stat_t *ps = NULL; 1903 uint_t psc; 1904 1905 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1906 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1907 (void) nvlist_lookup_uint64_array(nvroot, 1908 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1909 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1910 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1911 else 1912 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1913 } else if (errno == ENOENT) { 1914 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1915 } else { 1916 return (zpool_standard_error(hdl, errno, msg)); 1917 } 1918} 1919 1920#ifdef illumos 1921/* 1922 * This provides a very minimal check whether a given string is likely a 1923 * c#t#d# style string. Users of this are expected to do their own 1924 * verification of the s# part. 1925 */ 1926#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1927 1928/* 1929 * More elaborate version for ones which may start with "/dev/dsk/" 1930 * and the like. 1931 */ 1932static int 1933ctd_check_path(char *str) { 1934 /* 1935 * If it starts with a slash, check the last component. 1936 */ 1937 if (str && str[0] == '/') { 1938 char *tmp = strrchr(str, '/'); 1939 1940 /* 1941 * If it ends in "/old", check the second-to-last 1942 * component of the string instead. 1943 */ 1944 if (tmp != str && strcmp(tmp, "/old") == 0) { 1945 for (tmp--; *tmp != '/'; tmp--) 1946 ; 1947 } 1948 str = tmp + 1; 1949 } 1950 return (CTD_CHECK(str)); 1951} 1952#endif 1953 1954/* 1955 * Find a vdev that matches the search criteria specified. We use the 1956 * the nvpair name to determine how we should look for the device. 1957 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1958 * spare; but FALSE if its an INUSE spare. 1959 */ 1960static nvlist_t * 1961vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1962 boolean_t *l2cache, boolean_t *log) 1963{ 1964 uint_t c, children; 1965 nvlist_t **child; 1966 nvlist_t *ret; 1967 uint64_t is_log; 1968 char *srchkey; 1969 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1970 1971 /* Nothing to look for */ 1972 if (search == NULL || pair == NULL) 1973 return (NULL); 1974 1975 /* Obtain the key we will use to search */ 1976 srchkey = nvpair_name(pair); 1977 1978 switch (nvpair_type(pair)) { 1979 case DATA_TYPE_UINT64: 1980 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1981 uint64_t srchval, theguid; 1982 1983 verify(nvpair_value_uint64(pair, &srchval) == 0); 1984 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1985 &theguid) == 0); 1986 if (theguid == srchval) 1987 return (nv); 1988 } 1989 break; 1990 1991 case DATA_TYPE_STRING: { 1992 char *srchval, *val; 1993 1994 verify(nvpair_value_string(pair, &srchval) == 0); 1995 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1996 break; 1997 1998 /* 1999 * Search for the requested value. Special cases: 2000 * 2001 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 2002 * "s0" or "s0/old". The "s0" part is hidden from the user, 2003 * but included in the string, so this matches around it. 2004 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 2005 * 2006 * Otherwise, all other searches are simple string compares. 2007 */ 2008#ifdef illumos 2009 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 2010 ctd_check_path(val)) { 2011 uint64_t wholedisk = 0; 2012 2013 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 2014 &wholedisk); 2015 if (wholedisk) { 2016 int slen = strlen(srchval); 2017 int vlen = strlen(val); 2018 2019 if (slen != vlen - 2) 2020 break; 2021 2022 /* 2023 * make_leaf_vdev() should only set 2024 * wholedisk for ZPOOL_CONFIG_PATHs which 2025 * will include "/dev/dsk/", giving plenty of 2026 * room for the indices used next. 2027 */ 2028 ASSERT(vlen >= 6); 2029 2030 /* 2031 * strings identical except trailing "s0" 2032 */ 2033 if (strcmp(&val[vlen - 2], "s0") == 0 && 2034 strncmp(srchval, val, slen) == 0) 2035 return (nv); 2036 2037 /* 2038 * strings identical except trailing "s0/old" 2039 */ 2040 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2041 strcmp(&srchval[slen - 4], "/old") == 0 && 2042 strncmp(srchval, val, slen - 4) == 0) 2043 return (nv); 2044 2045 break; 2046 } 2047 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2048#else 2049 if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2050#endif 2051 char *type, *idx, *end, *p; 2052 uint64_t id, vdev_id; 2053 2054 /* 2055 * Determine our vdev type, keeping in mind 2056 * that the srchval is composed of a type and 2057 * vdev id pair (i.e. mirror-4). 2058 */ 2059 if ((type = strdup(srchval)) == NULL) 2060 return (NULL); 2061 2062 if ((p = strrchr(type, '-')) == NULL) { 2063 free(type); 2064 break; 2065 } 2066 idx = p + 1; 2067 *p = '\0'; 2068 2069 /* 2070 * If the types don't match then keep looking. 2071 */ 2072 if (strncmp(val, type, strlen(val)) != 0) { 2073 free(type); 2074 break; 2075 } 2076 2077 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2078 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2079 strncmp(type, VDEV_TYPE_MIRROR, 2080 strlen(VDEV_TYPE_MIRROR)) == 0); 2081 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2082 &id) == 0); 2083 2084 errno = 0; 2085 vdev_id = strtoull(idx, &end, 10); 2086 2087 free(type); 2088 if (errno != 0) 2089 return (NULL); 2090 2091 /* 2092 * Now verify that we have the correct vdev id. 2093 */ 2094 if (vdev_id == id) 2095 return (nv); 2096 } 2097 2098 /* 2099 * Common case 2100 */ 2101 if (strcmp(srchval, val) == 0) 2102 return (nv); 2103 break; 2104 } 2105 2106 default: 2107 break; 2108 } 2109 2110 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2111 &child, &children) != 0) 2112 return (NULL); 2113 2114 for (c = 0; c < children; c++) { 2115 if ((ret = vdev_to_nvlist_iter(child[c], search, 2116 avail_spare, l2cache, NULL)) != NULL) { 2117 /* 2118 * The 'is_log' value is only set for the toplevel 2119 * vdev, not the leaf vdevs. So we always lookup the 2120 * log device from the root of the vdev tree (where 2121 * 'log' is non-NULL). 2122 */ 2123 if (log != NULL && 2124 nvlist_lookup_uint64(child[c], 2125 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2126 is_log) { 2127 *log = B_TRUE; 2128 } 2129 return (ret); 2130 } 2131 } 2132 2133 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2134 &child, &children) == 0) { 2135 for (c = 0; c < children; c++) { 2136 if ((ret = vdev_to_nvlist_iter(child[c], search, 2137 avail_spare, l2cache, NULL)) != NULL) { 2138 *avail_spare = B_TRUE; 2139 return (ret); 2140 } 2141 } 2142 } 2143 2144 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2145 &child, &children) == 0) { 2146 for (c = 0; c < children; c++) { 2147 if ((ret = vdev_to_nvlist_iter(child[c], search, 2148 avail_spare, l2cache, NULL)) != NULL) { 2149 *l2cache = B_TRUE; 2150 return (ret); 2151 } 2152 } 2153 } 2154 2155 return (NULL); 2156} 2157 2158/* 2159 * Given a physical path (minus the "/devices" prefix), find the 2160 * associated vdev. 2161 */ 2162nvlist_t * 2163zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2164 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2165{ 2166 nvlist_t *search, *nvroot, *ret; 2167 2168 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2169 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2170 2171 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2172 &nvroot) == 0); 2173 2174 *avail_spare = B_FALSE; 2175 *l2cache = B_FALSE; 2176 if (log != NULL) 2177 *log = B_FALSE; 2178 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2179 nvlist_free(search); 2180 2181 return (ret); 2182} 2183 2184/* 2185 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2186 */ 2187boolean_t 2188zpool_vdev_is_interior(const char *name) 2189{ 2190 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2191 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2192 return (B_TRUE); 2193 return (B_FALSE); 2194} 2195 2196nvlist_t * 2197zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2198 boolean_t *l2cache, boolean_t *log) 2199{ 2200 char buf[MAXPATHLEN]; 2201 char *end; 2202 nvlist_t *nvroot, *search, *ret; 2203 uint64_t guid; 2204 2205 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2206 2207 guid = strtoull(path, &end, 10); 2208 if (guid != 0 && *end == '\0') { 2209 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2210 } else if (zpool_vdev_is_interior(path)) { 2211 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2212 } else if (path[0] != '/') { 2213 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2214 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2215 } else { 2216 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2217 } 2218 2219 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2220 &nvroot) == 0); 2221 2222 *avail_spare = B_FALSE; 2223 *l2cache = B_FALSE; 2224 if (log != NULL) 2225 *log = B_FALSE; 2226 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2227 nvlist_free(search); 2228 2229 return (ret); 2230} 2231 2232static int 2233vdev_online(nvlist_t *nv) 2234{ 2235 uint64_t ival; 2236 2237 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2238 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2239 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2240 return (0); 2241 2242 return (1); 2243} 2244 2245/* 2246 * Helper function for zpool_get_physpaths(). 2247 */ 2248static int 2249vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2250 size_t *bytes_written) 2251{ 2252 size_t bytes_left, pos, rsz; 2253 char *tmppath; 2254 const char *format; 2255 2256 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2257 &tmppath) != 0) 2258 return (EZFS_NODEVICE); 2259 2260 pos = *bytes_written; 2261 bytes_left = physpath_size - pos; 2262 format = (pos == 0) ? "%s" : " %s"; 2263 2264 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2265 *bytes_written += rsz; 2266 2267 if (rsz >= bytes_left) { 2268 /* if physpath was not copied properly, clear it */ 2269 if (bytes_left != 0) { 2270 physpath[pos] = 0; 2271 } 2272 return (EZFS_NOSPC); 2273 } 2274 return (0); 2275} 2276 2277static int 2278vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2279 size_t *rsz, boolean_t is_spare) 2280{ 2281 char *type; 2282 int ret; 2283 2284 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2285 return (EZFS_INVALCONFIG); 2286 2287 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2288 /* 2289 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2290 * For a spare vdev, we only want to boot from the active 2291 * spare device. 2292 */ 2293 if (is_spare) { 2294 uint64_t spare = 0; 2295 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2296 &spare); 2297 if (!spare) 2298 return (EZFS_INVALCONFIG); 2299 } 2300 2301 if (vdev_online(nv)) { 2302 if ((ret = vdev_get_one_physpath(nv, physpath, 2303 phypath_size, rsz)) != 0) 2304 return (ret); 2305 } 2306 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2307 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2308 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2309 nvlist_t **child; 2310 uint_t count; 2311 int i, ret; 2312 2313 if (nvlist_lookup_nvlist_array(nv, 2314 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2315 return (EZFS_INVALCONFIG); 2316 2317 for (i = 0; i < count; i++) { 2318 ret = vdev_get_physpaths(child[i], physpath, 2319 phypath_size, rsz, is_spare); 2320 if (ret == EZFS_NOSPC) 2321 return (ret); 2322 } 2323 } 2324 2325 return (EZFS_POOL_INVALARG); 2326} 2327 2328/* 2329 * Get phys_path for a root pool config. 2330 * Return 0 on success; non-zero on failure. 2331 */ 2332static int 2333zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2334{ 2335 size_t rsz; 2336 nvlist_t *vdev_root; 2337 nvlist_t **child; 2338 uint_t count; 2339 char *type; 2340 2341 rsz = 0; 2342 2343 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2344 &vdev_root) != 0) 2345 return (EZFS_INVALCONFIG); 2346 2347 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2348 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2349 &child, &count) != 0) 2350 return (EZFS_INVALCONFIG); 2351 2352 /* 2353 * root pool can not have EFI labeled disks and can only have 2354 * a single top-level vdev. 2355 */ 2356 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 || 2357 pool_uses_efi(vdev_root)) 2358 return (EZFS_POOL_INVALARG); 2359 2360 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2361 B_FALSE); 2362 2363 /* No online devices */ 2364 if (rsz == 0) 2365 return (EZFS_NODEVICE); 2366 2367 return (0); 2368} 2369 2370/* 2371 * Get phys_path for a root pool 2372 * Return 0 on success; non-zero on failure. 2373 */ 2374int 2375zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2376{ 2377 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2378 phypath_size)); 2379} 2380 2381/* 2382 * If the device has being dynamically expanded then we need to relabel 2383 * the disk to use the new unallocated space. 2384 */ 2385static int 2386zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2387{ 2388#ifdef sun 2389 char path[MAXPATHLEN]; 2390 char errbuf[1024]; 2391 int fd, error; 2392 int (*_efi_use_whole_disk)(int); 2393 2394 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2395 "efi_use_whole_disk")) == NULL) 2396 return (-1); 2397 2398 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name); 2399 2400 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2401 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2402 "relabel '%s': unable to open device"), name); 2403 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2404 } 2405 2406 /* 2407 * It's possible that we might encounter an error if the device 2408 * does not have any unallocated space left. If so, we simply 2409 * ignore that error and continue on. 2410 */ 2411 error = _efi_use_whole_disk(fd); 2412 (void) close(fd); 2413 if (error && error != VT_ENOSPC) { 2414 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2415 "relabel '%s': unable to read disk capacity"), name); 2416 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2417 } 2418#endif /* sun */ 2419 return (0); 2420} 2421 2422/* 2423 * Bring the specified vdev online. The 'flags' parameter is a set of the 2424 * ZFS_ONLINE_* flags. 2425 */ 2426int 2427zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2428 vdev_state_t *newstate) 2429{ 2430 zfs_cmd_t zc = { 0 }; 2431 char msg[1024]; 2432 nvlist_t *tgt; 2433 boolean_t avail_spare, l2cache, islog; 2434 libzfs_handle_t *hdl = zhp->zpool_hdl; 2435 2436 if (flags & ZFS_ONLINE_EXPAND) { 2437 (void) snprintf(msg, sizeof (msg), 2438 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2439 } else { 2440 (void) snprintf(msg, sizeof (msg), 2441 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2442 } 2443 2444 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2445 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2446 &islog)) == NULL) 2447 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2448 2449 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2450 2451 if (avail_spare) 2452 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2453 2454 if (flags & ZFS_ONLINE_EXPAND || 2455 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2456 char *pathname = NULL; 2457 uint64_t wholedisk = 0; 2458 2459 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2460 &wholedisk); 2461 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2462 &pathname) == 0); 2463 2464 /* 2465 * XXX - L2ARC 1.0 devices can't support expansion. 2466 */ 2467 if (l2cache) { 2468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2469 "cannot expand cache devices")); 2470 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2471 } 2472 2473 if (wholedisk) { 2474 pathname += strlen(DISK_ROOT) + 1; 2475 (void) zpool_relabel_disk(hdl, pathname); 2476 } 2477 } 2478 2479 zc.zc_cookie = VDEV_STATE_ONLINE; 2480 zc.zc_obj = flags; 2481 2482 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2483 if (errno == EINVAL) { 2484 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2485 "from this pool into a new one. Use '%s' " 2486 "instead"), "zpool detach"); 2487 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2488 } 2489 return (zpool_standard_error(hdl, errno, msg)); 2490 } 2491 2492 *newstate = zc.zc_cookie; 2493 return (0); 2494} 2495 2496/* 2497 * Take the specified vdev offline 2498 */ 2499int 2500zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2501{ 2502 zfs_cmd_t zc = { 0 }; 2503 char msg[1024]; 2504 nvlist_t *tgt; 2505 boolean_t avail_spare, l2cache; 2506 libzfs_handle_t *hdl = zhp->zpool_hdl; 2507 2508 (void) snprintf(msg, sizeof (msg), 2509 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2510 2511 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2512 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2513 NULL)) == NULL) 2514 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2515 2516 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2517 2518 if (avail_spare) 2519 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2520 2521 zc.zc_cookie = VDEV_STATE_OFFLINE; 2522 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2523 2524 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2525 return (0); 2526 2527 switch (errno) { 2528 case EBUSY: 2529 2530 /* 2531 * There are no other replicas of this device. 2532 */ 2533 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2534 2535 case EEXIST: 2536 /* 2537 * The log device has unplayed logs 2538 */ 2539 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2540 2541 default: 2542 return (zpool_standard_error(hdl, errno, msg)); 2543 } 2544} 2545 2546/* 2547 * Mark the given vdev faulted. 2548 */ 2549int 2550zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2551{ 2552 zfs_cmd_t zc = { 0 }; 2553 char msg[1024]; 2554 libzfs_handle_t *hdl = zhp->zpool_hdl; 2555 2556 (void) snprintf(msg, sizeof (msg), 2557 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2558 2559 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2560 zc.zc_guid = guid; 2561 zc.zc_cookie = VDEV_STATE_FAULTED; 2562 zc.zc_obj = aux; 2563 2564 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2565 return (0); 2566 2567 switch (errno) { 2568 case EBUSY: 2569 2570 /* 2571 * There are no other replicas of this device. 2572 */ 2573 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2574 2575 default: 2576 return (zpool_standard_error(hdl, errno, msg)); 2577 } 2578 2579} 2580 2581/* 2582 * Mark the given vdev degraded. 2583 */ 2584int 2585zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2586{ 2587 zfs_cmd_t zc = { 0 }; 2588 char msg[1024]; 2589 libzfs_handle_t *hdl = zhp->zpool_hdl; 2590 2591 (void) snprintf(msg, sizeof (msg), 2592 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2593 2594 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2595 zc.zc_guid = guid; 2596 zc.zc_cookie = VDEV_STATE_DEGRADED; 2597 zc.zc_obj = aux; 2598 2599 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2600 return (0); 2601 2602 return (zpool_standard_error(hdl, errno, msg)); 2603} 2604 2605/* 2606 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2607 * a hot spare. 2608 */ 2609static boolean_t 2610is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2611{ 2612 nvlist_t **child; 2613 uint_t c, children; 2614 char *type; 2615 2616 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2617 &children) == 0) { 2618 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2619 &type) == 0); 2620 2621 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2622 children == 2 && child[which] == tgt) 2623 return (B_TRUE); 2624 2625 for (c = 0; c < children; c++) 2626 if (is_replacing_spare(child[c], tgt, which)) 2627 return (B_TRUE); 2628 } 2629 2630 return (B_FALSE); 2631} 2632 2633/* 2634 * Attach new_disk (fully described by nvroot) to old_disk. 2635 * If 'replacing' is specified, the new disk will replace the old one. 2636 */ 2637int 2638zpool_vdev_attach(zpool_handle_t *zhp, 2639 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2640{ 2641 zfs_cmd_t zc = { 0 }; 2642 char msg[1024]; 2643 int ret; 2644 nvlist_t *tgt; 2645 boolean_t avail_spare, l2cache, islog; 2646 uint64_t val; 2647 char *newname; 2648 nvlist_t **child; 2649 uint_t children; 2650 nvlist_t *config_root; 2651 libzfs_handle_t *hdl = zhp->zpool_hdl; 2652 boolean_t rootpool = zpool_is_bootable(zhp); 2653 2654 if (replacing) 2655 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2656 "cannot replace %s with %s"), old_disk, new_disk); 2657 else 2658 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2659 "cannot attach %s to %s"), new_disk, old_disk); 2660 2661 /* 2662 * If this is a root pool, make sure that we're not attaching an 2663 * EFI labeled device. 2664 */ 2665 if (rootpool && pool_uses_efi(nvroot)) { 2666 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2667 "EFI labeled devices are not supported on root pools.")); 2668 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg)); 2669 } 2670 2671 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2672 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2673 &islog)) == 0) 2674 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2675 2676 if (avail_spare) 2677 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2678 2679 if (l2cache) 2680 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2681 2682 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2683 zc.zc_cookie = replacing; 2684 2685 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2686 &child, &children) != 0 || children != 1) { 2687 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2688 "new device must be a single disk")); 2689 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2690 } 2691 2692 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2693 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2694 2695 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2696 return (-1); 2697 2698 /* 2699 * If the target is a hot spare that has been swapped in, we can only 2700 * replace it with another hot spare. 2701 */ 2702 if (replacing && 2703 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2704 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2705 NULL) == NULL || !avail_spare) && 2706 is_replacing_spare(config_root, tgt, 1)) { 2707 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2708 "can only be replaced by another hot spare")); 2709 free(newname); 2710 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2711 } 2712 2713 free(newname); 2714 2715 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2716 return (-1); 2717 2718 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2719 2720 zcmd_free_nvlists(&zc); 2721 2722 if (ret == 0) { 2723 if (rootpool) { 2724 /* 2725 * XXX need a better way to prevent user from 2726 * booting up a half-baked vdev. 2727 */ 2728 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2729 "sure to wait until resilver is done " 2730 "before rebooting.\n")); 2731 (void) fprintf(stderr, "\n"); 2732 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2733 "you boot from pool '%s', you may need to update\n" 2734 "boot code on newly attached disk '%s'.\n\n" 2735 "Assuming you use GPT partitioning and 'da0' is " 2736 "your new boot disk\n" 2737 "you may use the following command:\n\n" 2738 "\tgpart bootcode -b /boot/pmbr -p " 2739 "/boot/gptzfsboot -i 1 da0\n\n"), 2740 zhp->zpool_name, new_disk); 2741 } 2742 return (0); 2743 } 2744 2745 switch (errno) { 2746 case ENOTSUP: 2747 /* 2748 * Can't attach to or replace this type of vdev. 2749 */ 2750 if (replacing) { 2751 uint64_t version = zpool_get_prop_int(zhp, 2752 ZPOOL_PROP_VERSION, NULL); 2753 2754 if (islog) 2755 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2756 "cannot replace a log with a spare")); 2757 else if (version >= SPA_VERSION_MULTI_REPLACE) 2758 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2759 "already in replacing/spare config; wait " 2760 "for completion or use 'zpool detach'")); 2761 else 2762 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2763 "cannot replace a replacing device")); 2764 } else { 2765 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2766 "can only attach to mirrors and top-level " 2767 "disks")); 2768 } 2769 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2770 break; 2771 2772 case EINVAL: 2773 /* 2774 * The new device must be a single disk. 2775 */ 2776 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2777 "new device must be a single disk")); 2778 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2779 break; 2780 2781 case EBUSY: 2782 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2783 new_disk); 2784 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2785 break; 2786 2787 case EOVERFLOW: 2788 /* 2789 * The new device is too small. 2790 */ 2791 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2792 "device is too small")); 2793 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2794 break; 2795 2796 case EDOM: 2797 /* 2798 * The new device has a different alignment requirement. 2799 */ 2800 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2801 "devices have different sector alignment")); 2802 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2803 break; 2804 2805 case ENAMETOOLONG: 2806 /* 2807 * The resulting top-level vdev spec won't fit in the label. 2808 */ 2809 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2810 break; 2811 2812 default: 2813 (void) zpool_standard_error(hdl, errno, msg); 2814 } 2815 2816 return (-1); 2817} 2818 2819/* 2820 * Detach the specified device. 2821 */ 2822int 2823zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2824{ 2825 zfs_cmd_t zc = { 0 }; 2826 char msg[1024]; 2827 nvlist_t *tgt; 2828 boolean_t avail_spare, l2cache; 2829 libzfs_handle_t *hdl = zhp->zpool_hdl; 2830 2831 (void) snprintf(msg, sizeof (msg), 2832 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2833 2834 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2835 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2836 NULL)) == 0) 2837 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2838 2839 if (avail_spare) 2840 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2841 2842 if (l2cache) 2843 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2844 2845 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2846 2847 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2848 return (0); 2849 2850 switch (errno) { 2851 2852 case ENOTSUP: 2853 /* 2854 * Can't detach from this type of vdev. 2855 */ 2856 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2857 "applicable to mirror and replacing vdevs")); 2858 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2859 break; 2860 2861 case EBUSY: 2862 /* 2863 * There are no other replicas of this device. 2864 */ 2865 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2866 break; 2867 2868 default: 2869 (void) zpool_standard_error(hdl, errno, msg); 2870 } 2871 2872 return (-1); 2873} 2874 2875/* 2876 * Find a mirror vdev in the source nvlist. 2877 * 2878 * The mchild array contains a list of disks in one of the top-level mirrors 2879 * of the source pool. The schild array contains a list of disks that the 2880 * user specified on the command line. We loop over the mchild array to 2881 * see if any entry in the schild array matches. 2882 * 2883 * If a disk in the mchild array is found in the schild array, we return 2884 * the index of that entry. Otherwise we return -1. 2885 */ 2886static int 2887find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2888 nvlist_t **schild, uint_t schildren) 2889{ 2890 uint_t mc; 2891 2892 for (mc = 0; mc < mchildren; mc++) { 2893 uint_t sc; 2894 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2895 mchild[mc], B_FALSE); 2896 2897 for (sc = 0; sc < schildren; sc++) { 2898 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2899 schild[sc], B_FALSE); 2900 boolean_t result = (strcmp(mpath, spath) == 0); 2901 2902 free(spath); 2903 if (result) { 2904 free(mpath); 2905 return (mc); 2906 } 2907 } 2908 2909 free(mpath); 2910 } 2911 2912 return (-1); 2913} 2914 2915/* 2916 * Split a mirror pool. If newroot points to null, then a new nvlist 2917 * is generated and it is the responsibility of the caller to free it. 2918 */ 2919int 2920zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2921 nvlist_t *props, splitflags_t flags) 2922{ 2923 zfs_cmd_t zc = { 0 }; 2924 char msg[1024]; 2925 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2926 nvlist_t **varray = NULL, *zc_props = NULL; 2927 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2928 libzfs_handle_t *hdl = zhp->zpool_hdl; 2929 uint64_t vers; 2930 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2931 int retval = 0; 2932 2933 (void) snprintf(msg, sizeof (msg), 2934 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2935 2936 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2937 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2938 2939 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2940 (void) fprintf(stderr, gettext("Internal error: unable to " 2941 "retrieve pool configuration\n")); 2942 return (-1); 2943 } 2944 2945 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2946 == 0); 2947 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2948 2949 if (props) { 2950 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2951 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2952 props, vers, flags, msg)) == NULL) 2953 return (-1); 2954 } 2955 2956 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2957 &children) != 0) { 2958 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2959 "Source pool is missing vdev tree")); 2960 if (zc_props) 2961 nvlist_free(zc_props); 2962 return (-1); 2963 } 2964 2965 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2966 vcount = 0; 2967 2968 if (*newroot == NULL || 2969 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2970 &newchild, &newchildren) != 0) 2971 newchildren = 0; 2972 2973 for (c = 0; c < children; c++) { 2974 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2975 char *type; 2976 nvlist_t **mchild, *vdev; 2977 uint_t mchildren; 2978 int entry; 2979 2980 /* 2981 * Unlike cache & spares, slogs are stored in the 2982 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2983 */ 2984 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2985 &is_log); 2986 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2987 &is_hole); 2988 if (is_log || is_hole) { 2989 /* 2990 * Create a hole vdev and put it in the config. 2991 */ 2992 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2993 goto out; 2994 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2995 VDEV_TYPE_HOLE) != 0) 2996 goto out; 2997 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2998 1) != 0) 2999 goto out; 3000 if (lastlog == 0) 3001 lastlog = vcount; 3002 varray[vcount++] = vdev; 3003 continue; 3004 } 3005 lastlog = 0; 3006 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 3007 == 0); 3008 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 3009 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3010 "Source pool must be composed only of mirrors\n")); 3011 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3012 goto out; 3013 } 3014 3015 verify(nvlist_lookup_nvlist_array(child[c], 3016 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 3017 3018 /* find or add an entry for this top-level vdev */ 3019 if (newchildren > 0 && 3020 (entry = find_vdev_entry(zhp, mchild, mchildren, 3021 newchild, newchildren)) >= 0) { 3022 /* We found a disk that the user specified. */ 3023 vdev = mchild[entry]; 3024 ++found; 3025 } else { 3026 /* User didn't specify a disk for this vdev. */ 3027 vdev = mchild[mchildren - 1]; 3028 } 3029 3030 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 3031 goto out; 3032 } 3033 3034 /* did we find every disk the user specified? */ 3035 if (found != newchildren) { 3036 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 3037 "include at most one disk from each mirror")); 3038 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 3039 goto out; 3040 } 3041 3042 /* Prepare the nvlist for populating. */ 3043 if (*newroot == NULL) { 3044 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3045 goto out; 3046 freelist = B_TRUE; 3047 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3048 VDEV_TYPE_ROOT) != 0) 3049 goto out; 3050 } else { 3051 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3052 } 3053 3054 /* Add all the children we found */ 3055 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3056 lastlog == 0 ? vcount : lastlog) != 0) 3057 goto out; 3058 3059 /* 3060 * If we're just doing a dry run, exit now with success. 3061 */ 3062 if (flags.dryrun) { 3063 memory_err = B_FALSE; 3064 freelist = B_FALSE; 3065 goto out; 3066 } 3067 3068 /* now build up the config list & call the ioctl */ 3069 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3070 goto out; 3071 3072 if (nvlist_add_nvlist(newconfig, 3073 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3074 nvlist_add_string(newconfig, 3075 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3076 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3077 goto out; 3078 3079 /* 3080 * The new pool is automatically part of the namespace unless we 3081 * explicitly export it. 3082 */ 3083 if (!flags.import) 3084 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3085 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3086 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3087 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3088 goto out; 3089 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3090 goto out; 3091 3092 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3093 retval = zpool_standard_error(hdl, errno, msg); 3094 goto out; 3095 } 3096 3097 freelist = B_FALSE; 3098 memory_err = B_FALSE; 3099 3100out: 3101 if (varray != NULL) { 3102 int v; 3103 3104 for (v = 0; v < vcount; v++) 3105 nvlist_free(varray[v]); 3106 free(varray); 3107 } 3108 zcmd_free_nvlists(&zc); 3109 if (zc_props) 3110 nvlist_free(zc_props); 3111 if (newconfig) 3112 nvlist_free(newconfig); 3113 if (freelist) { 3114 nvlist_free(*newroot); 3115 *newroot = NULL; 3116 } 3117 3118 if (retval != 0) 3119 return (retval); 3120 3121 if (memory_err) 3122 return (no_memory(hdl)); 3123 3124 return (0); 3125} 3126 3127/* 3128 * Remove the given device. Currently, this is supported only for hot spares 3129 * and level 2 cache devices. 3130 */ 3131int 3132zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3133{ 3134 zfs_cmd_t zc = { 0 }; 3135 char msg[1024]; 3136 nvlist_t *tgt; 3137 boolean_t avail_spare, l2cache, islog; 3138 libzfs_handle_t *hdl = zhp->zpool_hdl; 3139 uint64_t version; 3140 3141 (void) snprintf(msg, sizeof (msg), 3142 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3143 3144 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3145 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3146 &islog)) == 0) 3147 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3148 /* 3149 * XXX - this should just go away. 3150 */ 3151 if (!avail_spare && !l2cache && !islog) { 3152 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3153 "only inactive hot spares, cache, top-level, " 3154 "or log devices can be removed")); 3155 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3156 } 3157 3158 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3159 if (islog && version < SPA_VERSION_HOLES) { 3160 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3161 "pool must be upgrade to support log removal")); 3162 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3163 } 3164 3165 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3166 3167 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3168 return (0); 3169 3170 return (zpool_standard_error(hdl, errno, msg)); 3171} 3172 3173/* 3174 * Clear the errors for the pool, or the particular device if specified. 3175 */ 3176int 3177zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3178{ 3179 zfs_cmd_t zc = { 0 }; 3180 char msg[1024]; 3181 nvlist_t *tgt; 3182 zpool_rewind_policy_t policy; 3183 boolean_t avail_spare, l2cache; 3184 libzfs_handle_t *hdl = zhp->zpool_hdl; 3185 nvlist_t *nvi = NULL; 3186 int error; 3187 3188 if (path) 3189 (void) snprintf(msg, sizeof (msg), 3190 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3191 path); 3192 else 3193 (void) snprintf(msg, sizeof (msg), 3194 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3195 zhp->zpool_name); 3196 3197 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3198 if (path) { 3199 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3200 &l2cache, NULL)) == 0) 3201 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3202 3203 /* 3204 * Don't allow error clearing for hot spares. Do allow 3205 * error clearing for l2cache devices. 3206 */ 3207 if (avail_spare) 3208 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3209 3210 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3211 &zc.zc_guid) == 0); 3212 } 3213 3214 zpool_get_rewind_policy(rewindnvl, &policy); 3215 zc.zc_cookie = policy.zrp_request; 3216 3217 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3218 return (-1); 3219 3220 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3221 return (-1); 3222 3223 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3224 errno == ENOMEM) { 3225 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3226 zcmd_free_nvlists(&zc); 3227 return (-1); 3228 } 3229 } 3230 3231 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3232 errno != EPERM && errno != EACCES)) { 3233 if (policy.zrp_request & 3234 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3235 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3236 zpool_rewind_exclaim(hdl, zc.zc_name, 3237 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3238 nvi); 3239 nvlist_free(nvi); 3240 } 3241 zcmd_free_nvlists(&zc); 3242 return (0); 3243 } 3244 3245 zcmd_free_nvlists(&zc); 3246 return (zpool_standard_error(hdl, errno, msg)); 3247} 3248 3249/* 3250 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3251 */ 3252int 3253zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3254{ 3255 zfs_cmd_t zc = { 0 }; 3256 char msg[1024]; 3257 libzfs_handle_t *hdl = zhp->zpool_hdl; 3258 3259 (void) snprintf(msg, sizeof (msg), 3260 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3261 guid); 3262 3263 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3264 zc.zc_guid = guid; 3265 zc.zc_cookie = ZPOOL_NO_REWIND; 3266 3267 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3268 return (0); 3269 3270 return (zpool_standard_error(hdl, errno, msg)); 3271} 3272 3273/* 3274 * Change the GUID for a pool. 3275 */ 3276int 3277zpool_reguid(zpool_handle_t *zhp) 3278{ 3279 char msg[1024]; 3280 libzfs_handle_t *hdl = zhp->zpool_hdl; 3281 zfs_cmd_t zc = { 0 }; 3282 3283 (void) snprintf(msg, sizeof (msg), 3284 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3285 3286 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3287 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3288 return (0); 3289 3290 return (zpool_standard_error(hdl, errno, msg)); 3291} 3292 3293/* 3294 * Reopen the pool. 3295 */ 3296int 3297zpool_reopen(zpool_handle_t *zhp) 3298{ 3299 zfs_cmd_t zc = { 0 }; 3300 char msg[1024]; 3301 libzfs_handle_t *hdl = zhp->zpool_hdl; 3302 3303 (void) snprintf(msg, sizeof (msg), 3304 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3305 zhp->zpool_name); 3306 3307 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3308 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3309 return (0); 3310 return (zpool_standard_error(hdl, errno, msg)); 3311} 3312 3313/* 3314 * Convert from a devid string to a path. 3315 */ 3316static char * 3317devid_to_path(char *devid_str) 3318{ 3319 ddi_devid_t devid; 3320 char *minor; 3321 char *path; 3322 devid_nmlist_t *list = NULL; 3323 int ret; 3324 3325 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3326 return (NULL); 3327 3328 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3329 3330 devid_str_free(minor); 3331 devid_free(devid); 3332 3333 if (ret != 0) 3334 return (NULL); 3335 3336 /* 3337 * In a case the strdup() fails, we will just return NULL below. 3338 */ 3339 path = strdup(list[0].devname); 3340 3341 devid_free_nmlist(list); 3342 3343 return (path); 3344} 3345 3346/* 3347 * Convert from a path to a devid string. 3348 */ 3349static char * 3350path_to_devid(const char *path) 3351{ 3352#ifdef have_devid 3353 int fd; 3354 ddi_devid_t devid; 3355 char *minor, *ret; 3356 3357 if ((fd = open(path, O_RDONLY)) < 0) 3358 return (NULL); 3359 3360 minor = NULL; 3361 ret = NULL; 3362 if (devid_get(fd, &devid) == 0) { 3363 if (devid_get_minor_name(fd, &minor) == 0) 3364 ret = devid_str_encode(devid, minor); 3365 if (minor != NULL) 3366 devid_str_free(minor); 3367 devid_free(devid); 3368 } 3369 (void) close(fd); 3370 3371 return (ret); 3372#else 3373 return (NULL); 3374#endif 3375} 3376 3377/* 3378 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3379 * ignore any failure here, since a common case is for an unprivileged user to 3380 * type 'zpool status', and we'll display the correct information anyway. 3381 */ 3382static void 3383set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3384{ 3385 zfs_cmd_t zc = { 0 }; 3386 3387 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3388 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3389 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3390 &zc.zc_guid) == 0); 3391 3392 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3393} 3394 3395/* 3396 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3397 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3398 * We also check if this is a whole disk, in which case we strip off the 3399 * trailing 's0' slice name. 3400 * 3401 * This routine is also responsible for identifying when disks have been 3402 * reconfigured in a new location. The kernel will have opened the device by 3403 * devid, but the path will still refer to the old location. To catch this, we 3404 * first do a path -> devid translation (which is fast for the common case). If 3405 * the devid matches, we're done. If not, we do a reverse devid -> path 3406 * translation and issue the appropriate ioctl() to update the path of the vdev. 3407 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3408 * of these checks. 3409 */ 3410char * 3411zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3412 boolean_t verbose) 3413{ 3414 char *path, *devid; 3415 uint64_t value; 3416 char buf[64]; 3417 vdev_stat_t *vs; 3418 uint_t vsc; 3419 int have_stats; 3420 int have_path; 3421 3422 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3423 (uint64_t **)&vs, &vsc) == 0; 3424 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3425 3426 /* 3427 * If the device is not currently present, assume it will not 3428 * come back at the same device path. Display the device by GUID. 3429 */ 3430 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3431 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3432 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3433 &value) == 0); 3434 (void) snprintf(buf, sizeof (buf), "%llu", 3435 (u_longlong_t)value); 3436 path = buf; 3437 } else if (have_path) { 3438 3439 /* 3440 * If the device is dead (faulted, offline, etc) then don't 3441 * bother opening it. Otherwise we may be forcing the user to 3442 * open a misbehaving device, which can have undesirable 3443 * effects. 3444 */ 3445 if ((have_stats == 0 || 3446 vs->vs_state >= VDEV_STATE_DEGRADED) && 3447 zhp != NULL && 3448 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3449 /* 3450 * Determine if the current path is correct. 3451 */ 3452 char *newdevid = path_to_devid(path); 3453 3454 if (newdevid == NULL || 3455 strcmp(devid, newdevid) != 0) { 3456 char *newpath; 3457 3458 if ((newpath = devid_to_path(devid)) != NULL) { 3459 /* 3460 * Update the path appropriately. 3461 */ 3462 set_path(zhp, nv, newpath); 3463 if (nvlist_add_string(nv, 3464 ZPOOL_CONFIG_PATH, newpath) == 0) 3465 verify(nvlist_lookup_string(nv, 3466 ZPOOL_CONFIG_PATH, 3467 &path) == 0); 3468 free(newpath); 3469 } 3470 } 3471 3472 if (newdevid) 3473 devid_str_free(newdevid); 3474 } 3475 3476#ifdef sun 3477 if (strncmp(path, "/dev/dsk/", 9) == 0) 3478 path += 9; 3479 3480 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3481 &value) == 0 && value) { 3482 int pathlen = strlen(path); 3483 char *tmp = zfs_strdup(hdl, path); 3484 3485 /* 3486 * If it starts with c#, and ends with "s0", chop 3487 * the "s0" off, or if it ends with "s0/old", remove 3488 * the "s0" from the middle. 3489 */ 3490 if (CTD_CHECK(tmp)) { 3491 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3492 tmp[pathlen - 2] = '\0'; 3493 } else if (pathlen > 6 && 3494 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3495 (void) strcpy(&tmp[pathlen - 6], 3496 "/old"); 3497 } 3498 } 3499 return (tmp); 3500 } 3501#else /* !sun */ 3502 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3503 path += sizeof(_PATH_DEV) - 1; 3504#endif /* !sun */ 3505 } else { 3506 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3507 3508 /* 3509 * If it's a raidz device, we need to stick in the parity level. 3510 */ 3511 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3512 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3513 &value) == 0); 3514 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3515 (u_longlong_t)value); 3516 path = buf; 3517 } 3518 3519 /* 3520 * We identify each top-level vdev by using a <type-id> 3521 * naming convention. 3522 */ 3523 if (verbose) { 3524 uint64_t id; 3525 3526 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3527 &id) == 0); 3528 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3529 (u_longlong_t)id); 3530 path = buf; 3531 } 3532 } 3533 3534 return (zfs_strdup(hdl, path)); 3535} 3536 3537static int 3538zbookmark_compare(const void *a, const void *b) 3539{ 3540 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3541} 3542 3543/* 3544 * Retrieve the persistent error log, uniquify the members, and return to the 3545 * caller. 3546 */ 3547int 3548zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3549{ 3550 zfs_cmd_t zc = { 0 }; 3551 uint64_t count; 3552 zbookmark_phys_t *zb = NULL; 3553 int i; 3554 3555 /* 3556 * Retrieve the raw error list from the kernel. If the number of errors 3557 * has increased, allocate more space and continue until we get the 3558 * entire list. 3559 */ 3560 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3561 &count) == 0); 3562 if (count == 0) 3563 return (0); 3564 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3565 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3566 return (-1); 3567 zc.zc_nvlist_dst_size = count; 3568 (void) strcpy(zc.zc_name, zhp->zpool_name); 3569 for (;;) { 3570 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3571 &zc) != 0) { 3572 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3573 if (errno == ENOMEM) { 3574 void *dst; 3575 3576 count = zc.zc_nvlist_dst_size; 3577 dst = zfs_alloc(zhp->zpool_hdl, count * 3578 sizeof (zbookmark_phys_t)); 3579 if (dst == NULL) 3580 return (-1); 3581 zc.zc_nvlist_dst = (uintptr_t)dst; 3582 } else { 3583 return (-1); 3584 } 3585 } else { 3586 break; 3587 } 3588 } 3589 3590 /* 3591 * Sort the resulting bookmarks. This is a little confusing due to the 3592 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3593 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3594 * _not_ copied as part of the process. So we point the start of our 3595 * array appropriate and decrement the total number of elements. 3596 */ 3597 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3598 zc.zc_nvlist_dst_size; 3599 count -= zc.zc_nvlist_dst_size; 3600 3601 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_compare); 3602 3603 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3604 3605 /* 3606 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3607 */ 3608 for (i = 0; i < count; i++) { 3609 nvlist_t *nv; 3610 3611 /* ignoring zb_blkid and zb_level for now */ 3612 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3613 zb[i-1].zb_object == zb[i].zb_object) 3614 continue; 3615 3616 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3617 goto nomem; 3618 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3619 zb[i].zb_objset) != 0) { 3620 nvlist_free(nv); 3621 goto nomem; 3622 } 3623 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3624 zb[i].zb_object) != 0) { 3625 nvlist_free(nv); 3626 goto nomem; 3627 } 3628 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3629 nvlist_free(nv); 3630 goto nomem; 3631 } 3632 nvlist_free(nv); 3633 } 3634 3635 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3636 return (0); 3637 3638nomem: 3639 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3640 return (no_memory(zhp->zpool_hdl)); 3641} 3642 3643/* 3644 * Upgrade a ZFS pool to the latest on-disk version. 3645 */ 3646int 3647zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3648{ 3649 zfs_cmd_t zc = { 0 }; 3650 libzfs_handle_t *hdl = zhp->zpool_hdl; 3651 3652 (void) strcpy(zc.zc_name, zhp->zpool_name); 3653 zc.zc_cookie = new_version; 3654 3655 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3656 return (zpool_standard_error_fmt(hdl, errno, 3657 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3658 zhp->zpool_name)); 3659 return (0); 3660} 3661 3662void 3663zfs_save_arguments(int argc, char **argv, char *string, int len) 3664{ 3665 (void) strlcpy(string, basename(argv[0]), len); 3666 for (int i = 1; i < argc; i++) { 3667 (void) strlcat(string, " ", len); 3668 (void) strlcat(string, argv[i], len); 3669 } 3670} 3671 3672int 3673zpool_log_history(libzfs_handle_t *hdl, const char *message) 3674{ 3675 zfs_cmd_t zc = { 0 }; 3676 nvlist_t *args; 3677 int err; 3678 3679 args = fnvlist_alloc(); 3680 fnvlist_add_string(args, "message", message); 3681 err = zcmd_write_src_nvlist(hdl, &zc, args); 3682 if (err == 0) 3683 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3684 nvlist_free(args); 3685 zcmd_free_nvlists(&zc); 3686 return (err); 3687} 3688 3689/* 3690 * Perform ioctl to get some command history of a pool. 3691 * 3692 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3693 * logical offset of the history buffer to start reading from. 3694 * 3695 * Upon return, 'off' is the next logical offset to read from and 3696 * 'len' is the actual amount of bytes read into 'buf'. 3697 */ 3698static int 3699get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3700{ 3701 zfs_cmd_t zc = { 0 }; 3702 libzfs_handle_t *hdl = zhp->zpool_hdl; 3703 3704 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3705 3706 zc.zc_history = (uint64_t)(uintptr_t)buf; 3707 zc.zc_history_len = *len; 3708 zc.zc_history_offset = *off; 3709 3710 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3711 switch (errno) { 3712 case EPERM: 3713 return (zfs_error_fmt(hdl, EZFS_PERM, 3714 dgettext(TEXT_DOMAIN, 3715 "cannot show history for pool '%s'"), 3716 zhp->zpool_name)); 3717 case ENOENT: 3718 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3719 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3720 "'%s'"), zhp->zpool_name)); 3721 case ENOTSUP: 3722 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3723 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3724 "'%s', pool must be upgraded"), zhp->zpool_name)); 3725 default: 3726 return (zpool_standard_error_fmt(hdl, errno, 3727 dgettext(TEXT_DOMAIN, 3728 "cannot get history for '%s'"), zhp->zpool_name)); 3729 } 3730 } 3731 3732 *len = zc.zc_history_len; 3733 *off = zc.zc_history_offset; 3734 3735 return (0); 3736} 3737 3738/* 3739 * Process the buffer of nvlists, unpacking and storing each nvlist record 3740 * into 'records'. 'leftover' is set to the number of bytes that weren't 3741 * processed as there wasn't a complete record. 3742 */ 3743int 3744zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3745 nvlist_t ***records, uint_t *numrecords) 3746{ 3747 uint64_t reclen; 3748 nvlist_t *nv; 3749 int i; 3750 3751 while (bytes_read > sizeof (reclen)) { 3752 3753 /* get length of packed record (stored as little endian) */ 3754 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3755 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3756 3757 if (bytes_read < sizeof (reclen) + reclen) 3758 break; 3759 3760 /* unpack record */ 3761 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3762 return (ENOMEM); 3763 bytes_read -= sizeof (reclen) + reclen; 3764 buf += sizeof (reclen) + reclen; 3765 3766 /* add record to nvlist array */ 3767 (*numrecords)++; 3768 if (ISP2(*numrecords + 1)) { 3769 *records = realloc(*records, 3770 *numrecords * 2 * sizeof (nvlist_t *)); 3771 } 3772 (*records)[*numrecords - 1] = nv; 3773 } 3774 3775 *leftover = bytes_read; 3776 return (0); 3777} 3778 3779/* from spa_history.c: spa_history_create_obj() */ 3780#define HIS_BUF_LEN_DEF (128 << 10) 3781#define HIS_BUF_LEN_MAX (1 << 30) 3782 3783/* 3784 * Retrieve the command history of a pool. 3785 */ 3786int 3787zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3788{ 3789 char *buf = NULL; 3790 uint64_t bufsize = HIS_BUF_LEN_DEF; 3791 uint64_t off = 0; 3792 nvlist_t **records = NULL; 3793 uint_t numrecords = 0; 3794 int err, i; 3795 3796 if ((buf = malloc(bufsize)) == NULL) 3797 return (ENOMEM); 3798 do { 3799 uint64_t bytes_read = bufsize; 3800 uint64_t leftover; 3801 3802 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3803 break; 3804 3805 /* if nothing else was read in, we're at EOF, just return */ 3806 if (bytes_read == 0) 3807 break; 3808 3809 if ((err = zpool_history_unpack(buf, bytes_read, 3810 &leftover, &records, &numrecords)) != 0) 3811 break; 3812 off -= leftover; 3813 3814 /* 3815 * If the history block is too big, double the buffer 3816 * size and try again. 3817 */ 3818 if (leftover == bytes_read) { 3819 free(buf); 3820 buf = NULL; 3821 3822 bufsize <<= 1; 3823 if ((bufsize >= HIS_BUF_LEN_MAX) || 3824 ((buf = malloc(bufsize)) == NULL)) { 3825 err = ENOMEM; 3826 break; 3827 } 3828 } 3829 3830 /* CONSTCOND */ 3831 } while (1); 3832 free(buf); 3833 3834 if (!err) { 3835 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3836 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3837 records, numrecords) == 0); 3838 } 3839 for (i = 0; i < numrecords; i++) 3840 nvlist_free(records[i]); 3841 free(records); 3842 3843 return (err); 3844} 3845 3846void 3847zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3848 char *pathname, size_t len) 3849{ 3850 zfs_cmd_t zc = { 0 }; 3851 boolean_t mounted = B_FALSE; 3852 char *mntpnt = NULL; 3853 char dsname[MAXNAMELEN]; 3854 3855 if (dsobj == 0) { 3856 /* special case for the MOS */ 3857 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3858 return; 3859 } 3860 3861 /* get the dataset's name */ 3862 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3863 zc.zc_obj = dsobj; 3864 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3865 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3866 /* just write out a path of two object numbers */ 3867 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3868 dsobj, obj); 3869 return; 3870 } 3871 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3872 3873 /* find out if the dataset is mounted */ 3874 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3875 3876 /* get the corrupted object's path */ 3877 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3878 zc.zc_obj = obj; 3879 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3880 &zc) == 0) { 3881 if (mounted) { 3882 (void) snprintf(pathname, len, "%s%s", mntpnt, 3883 zc.zc_value); 3884 } else { 3885 (void) snprintf(pathname, len, "%s:%s", 3886 dsname, zc.zc_value); 3887 } 3888 } else { 3889 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3890 } 3891 free(mntpnt); 3892} 3893 3894#ifdef sun 3895/* 3896 * Read the EFI label from the config, if a label does not exist then 3897 * pass back the error to the caller. If the caller has passed a non-NULL 3898 * diskaddr argument then we set it to the starting address of the EFI 3899 * partition. 3900 */ 3901static int 3902read_efi_label(nvlist_t *config, diskaddr_t *sb) 3903{ 3904 char *path; 3905 int fd; 3906 char diskname[MAXPATHLEN]; 3907 int err = -1; 3908 3909 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3910 return (err); 3911 3912 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT, 3913 strrchr(path, '/')); 3914 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3915 struct dk_gpt *vtoc; 3916 3917 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3918 if (sb != NULL) 3919 *sb = vtoc->efi_parts[0].p_start; 3920 efi_free(vtoc); 3921 } 3922 (void) close(fd); 3923 } 3924 return (err); 3925} 3926 3927/* 3928 * determine where a partition starts on a disk in the current 3929 * configuration 3930 */ 3931static diskaddr_t 3932find_start_block(nvlist_t *config) 3933{ 3934 nvlist_t **child; 3935 uint_t c, children; 3936 diskaddr_t sb = MAXOFFSET_T; 3937 uint64_t wholedisk; 3938 3939 if (nvlist_lookup_nvlist_array(config, 3940 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3941 if (nvlist_lookup_uint64(config, 3942 ZPOOL_CONFIG_WHOLE_DISK, 3943 &wholedisk) != 0 || !wholedisk) { 3944 return (MAXOFFSET_T); 3945 } 3946 if (read_efi_label(config, &sb) < 0) 3947 sb = MAXOFFSET_T; 3948 return (sb); 3949 } 3950 3951 for (c = 0; c < children; c++) { 3952 sb = find_start_block(child[c]); 3953 if (sb != MAXOFFSET_T) { 3954 return (sb); 3955 } 3956 } 3957 return (MAXOFFSET_T); 3958} 3959#endif /* sun */ 3960 3961/* 3962 * Label an individual disk. The name provided is the short name, 3963 * stripped of any leading /dev path. 3964 */ 3965int 3966zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3967{ 3968#ifdef sun 3969 char path[MAXPATHLEN]; 3970 struct dk_gpt *vtoc; 3971 int fd; 3972 size_t resv = EFI_MIN_RESV_SIZE; 3973 uint64_t slice_size; 3974 diskaddr_t start_block; 3975 char errbuf[1024]; 3976 3977 /* prepare an error message just in case */ 3978 (void) snprintf(errbuf, sizeof (errbuf), 3979 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3980 3981 if (zhp) { 3982 nvlist_t *nvroot; 3983 3984 if (zpool_is_bootable(zhp)) { 3985 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3986 "EFI labeled devices are not supported on root " 3987 "pools.")); 3988 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf)); 3989 } 3990 3991 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3992 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3993 3994 if (zhp->zpool_start_block == 0) 3995 start_block = find_start_block(nvroot); 3996 else 3997 start_block = zhp->zpool_start_block; 3998 zhp->zpool_start_block = start_block; 3999 } else { 4000 /* new pool */ 4001 start_block = NEW_START_BLOCK; 4002 } 4003 4004 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name, 4005 BACKUP_SLICE); 4006 4007 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 4008 /* 4009 * This shouldn't happen. We've long since verified that this 4010 * is a valid device. 4011 */ 4012 zfs_error_aux(hdl, 4013 dgettext(TEXT_DOMAIN, "unable to open device")); 4014 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 4015 } 4016 4017 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 4018 /* 4019 * The only way this can fail is if we run out of memory, or we 4020 * were unable to read the disk's capacity 4021 */ 4022 if (errno == ENOMEM) 4023 (void) no_memory(hdl); 4024 4025 (void) close(fd); 4026 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4027 "unable to read disk capacity"), name); 4028 4029 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 4030 } 4031 4032 slice_size = vtoc->efi_last_u_lba + 1; 4033 slice_size -= EFI_MIN_RESV_SIZE; 4034 if (start_block == MAXOFFSET_T) 4035 start_block = NEW_START_BLOCK; 4036 slice_size -= start_block; 4037 4038 vtoc->efi_parts[0].p_start = start_block; 4039 vtoc->efi_parts[0].p_size = slice_size; 4040 4041 /* 4042 * Why we use V_USR: V_BACKUP confuses users, and is considered 4043 * disposable by some EFI utilities (since EFI doesn't have a backup 4044 * slice). V_UNASSIGNED is supposed to be used only for zero size 4045 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 4046 * etc. were all pretty specific. V_USR is as close to reality as we 4047 * can get, in the absence of V_OTHER. 4048 */ 4049 vtoc->efi_parts[0].p_tag = V_USR; 4050 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 4051 4052 vtoc->efi_parts[8].p_start = slice_size + start_block; 4053 vtoc->efi_parts[8].p_size = resv; 4054 vtoc->efi_parts[8].p_tag = V_RESERVED; 4055 4056 if (efi_write(fd, vtoc) != 0) { 4057 /* 4058 * Some block drivers (like pcata) may not support EFI 4059 * GPT labels. Print out a helpful error message dir- 4060 * ecting the user to manually label the disk and give 4061 * a specific slice. 4062 */ 4063 (void) close(fd); 4064 efi_free(vtoc); 4065 4066 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4067 "try using fdisk(1M) and then provide a specific slice")); 4068 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4069 } 4070 4071 (void) close(fd); 4072 efi_free(vtoc); 4073#endif /* sun */ 4074 return (0); 4075} 4076 4077static boolean_t 4078supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4079{ 4080 char *type; 4081 nvlist_t **child; 4082 uint_t children, c; 4083 4084 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4085 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4086 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4087 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4088 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4089 "vdev type '%s' is not supported"), type); 4090 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4091 return (B_FALSE); 4092 } 4093 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4094 &child, &children) == 0) { 4095 for (c = 0; c < children; c++) { 4096 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4097 return (B_FALSE); 4098 } 4099 } 4100 return (B_TRUE); 4101} 4102 4103/* 4104 * Check if this zvol is allowable for use as a dump device; zero if 4105 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4106 * 4107 * Allowable storage configurations include mirrors, all raidz variants, and 4108 * pools with log, cache, and spare devices. Pools which are backed by files or 4109 * have missing/hole vdevs are not suitable. 4110 */ 4111int 4112zvol_check_dump_config(char *arg) 4113{ 4114 zpool_handle_t *zhp = NULL; 4115 nvlist_t *config, *nvroot; 4116 char *p, *volname; 4117 nvlist_t **top; 4118 uint_t toplevels; 4119 libzfs_handle_t *hdl; 4120 char errbuf[1024]; 4121 char poolname[ZPOOL_MAXNAMELEN]; 4122 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4123 int ret = 1; 4124 4125 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4126 return (-1); 4127 } 4128 4129 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4130 "dump is not supported on device '%s'"), arg); 4131 4132 if ((hdl = libzfs_init()) == NULL) 4133 return (1); 4134 libzfs_print_on_error(hdl, B_TRUE); 4135 4136 volname = arg + pathlen; 4137 4138 /* check the configuration of the pool */ 4139 if ((p = strchr(volname, '/')) == NULL) { 4140 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4141 "malformed dataset name")); 4142 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4143 return (1); 4144 } else if (p - volname >= ZFS_MAXNAMELEN) { 4145 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4146 "dataset name is too long")); 4147 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4148 return (1); 4149 } else { 4150 (void) strncpy(poolname, volname, p - volname); 4151 poolname[p - volname] = '\0'; 4152 } 4153 4154 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4155 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4156 "could not open pool '%s'"), poolname); 4157 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4158 goto out; 4159 } 4160 config = zpool_get_config(zhp, NULL); 4161 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4162 &nvroot) != 0) { 4163 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4164 "could not obtain vdev configuration for '%s'"), poolname); 4165 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4166 goto out; 4167 } 4168 4169 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4170 &top, &toplevels) == 0); 4171 4172 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4173 goto out; 4174 } 4175 ret = 0; 4176 4177out: 4178 if (zhp) 4179 zpool_close(zhp); 4180 libzfs_fini(hdl); 4181 return (ret); 4182} 4183