libzfs_pool.c revision 325150
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26 * Copyright 2016 Nexenta Systems, Inc. 27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com> 28 */ 29 30#include <sys/types.h> 31#include <sys/stat.h> 32#include <ctype.h> 33#include <errno.h> 34#include <devid.h> 35#include <fcntl.h> 36#include <libintl.h> 37#include <stdio.h> 38#include <stdlib.h> 39#include <strings.h> 40#include <unistd.h> 41#include <libgen.h> 42#include <sys/zfs_ioctl.h> 43#include <dlfcn.h> 44 45#include "zfs_namecheck.h" 46#include "zfs_prop.h" 47#include "libzfs_impl.h" 48#include "zfs_comutil.h" 49#include "zfeature_common.h" 50 51static int read_efi_label(nvlist_t *config, diskaddr_t *sb); 52 53#define BACKUP_SLICE "s2" 54 55typedef struct prop_flags { 56 int create:1; /* Validate property on creation */ 57 int import:1; /* Validate property on import */ 58} prop_flags_t; 59 60/* 61 * ==================================================================== 62 * zpool property functions 63 * ==================================================================== 64 */ 65 66static int 67zpool_get_all_props(zpool_handle_t *zhp) 68{ 69 zfs_cmd_t zc = { 0 }; 70 libzfs_handle_t *hdl = zhp->zpool_hdl; 71 72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 73 74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) 75 return (-1); 76 77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) { 78 if (errno == ENOMEM) { 79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 80 zcmd_free_nvlists(&zc); 81 return (-1); 82 } 83 } else { 84 zcmd_free_nvlists(&zc); 85 return (-1); 86 } 87 } 88 89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) { 90 zcmd_free_nvlists(&zc); 91 return (-1); 92 } 93 94 zcmd_free_nvlists(&zc); 95 96 return (0); 97} 98 99static int 100zpool_props_refresh(zpool_handle_t *zhp) 101{ 102 nvlist_t *old_props; 103 104 old_props = zhp->zpool_props; 105 106 if (zpool_get_all_props(zhp) != 0) 107 return (-1); 108 109 nvlist_free(old_props); 110 return (0); 111} 112 113static char * 114zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop, 115 zprop_source_t *src) 116{ 117 nvlist_t *nv, *nvl; 118 uint64_t ival; 119 char *value; 120 zprop_source_t source; 121 122 nvl = zhp->zpool_props; 123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0); 125 source = ival; 126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0); 127 } else { 128 source = ZPROP_SRC_DEFAULT; 129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL) 130 value = "-"; 131 } 132 133 if (src) 134 *src = source; 135 136 return (value); 137} 138 139uint64_t 140zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src) 141{ 142 nvlist_t *nv, *nvl; 143 uint64_t value; 144 zprop_source_t source; 145 146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) { 147 /* 148 * zpool_get_all_props() has most likely failed because 149 * the pool is faulted, but if all we need is the top level 150 * vdev's guid then get it from the zhp config nvlist. 151 */ 152 if ((prop == ZPOOL_PROP_GUID) && 153 (nvlist_lookup_nvlist(zhp->zpool_config, 154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) && 155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) 156 == 0)) { 157 return (value); 158 } 159 return (zpool_prop_default_numeric(prop)); 160 } 161 162 nvl = zhp->zpool_props; 163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) { 164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0); 165 source = value; 166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0); 167 } else { 168 source = ZPROP_SRC_DEFAULT; 169 value = zpool_prop_default_numeric(prop); 170 } 171 172 if (src) 173 *src = source; 174 175 return (value); 176} 177 178/* 179 * Map VDEV STATE to printed strings. 180 */ 181const char * 182zpool_state_to_name(vdev_state_t state, vdev_aux_t aux) 183{ 184 switch (state) { 185 case VDEV_STATE_CLOSED: 186 case VDEV_STATE_OFFLINE: 187 return (gettext("OFFLINE")); 188 case VDEV_STATE_REMOVED: 189 return (gettext("REMOVED")); 190 case VDEV_STATE_CANT_OPEN: 191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG) 192 return (gettext("FAULTED")); 193 else if (aux == VDEV_AUX_SPLIT_POOL) 194 return (gettext("SPLIT")); 195 else 196 return (gettext("UNAVAIL")); 197 case VDEV_STATE_FAULTED: 198 return (gettext("FAULTED")); 199 case VDEV_STATE_DEGRADED: 200 return (gettext("DEGRADED")); 201 case VDEV_STATE_HEALTHY: 202 return (gettext("ONLINE")); 203 204 default: 205 break; 206 } 207 208 return (gettext("UNKNOWN")); 209} 210 211/* 212 * Map POOL STATE to printed strings. 213 */ 214const char * 215zpool_pool_state_to_name(pool_state_t state) 216{ 217 switch (state) { 218 case POOL_STATE_ACTIVE: 219 return (gettext("ACTIVE")); 220 case POOL_STATE_EXPORTED: 221 return (gettext("EXPORTED")); 222 case POOL_STATE_DESTROYED: 223 return (gettext("DESTROYED")); 224 case POOL_STATE_SPARE: 225 return (gettext("SPARE")); 226 case POOL_STATE_L2CACHE: 227 return (gettext("L2CACHE")); 228 case POOL_STATE_UNINITIALIZED: 229 return (gettext("UNINITIALIZED")); 230 case POOL_STATE_UNAVAIL: 231 return (gettext("UNAVAIL")); 232 case POOL_STATE_POTENTIALLY_ACTIVE: 233 return (gettext("POTENTIALLY_ACTIVE")); 234 } 235 236 return (gettext("UNKNOWN")); 237} 238 239/* 240 * Get a zpool property value for 'prop' and return the value in 241 * a pre-allocated buffer. 242 */ 243int 244zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len, 245 zprop_source_t *srctype, boolean_t literal) 246{ 247 uint64_t intval; 248 const char *strval; 249 zprop_source_t src = ZPROP_SRC_NONE; 250 nvlist_t *nvroot; 251 vdev_stat_t *vs; 252 uint_t vsc; 253 254 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) { 255 switch (prop) { 256 case ZPOOL_PROP_NAME: 257 (void) strlcpy(buf, zpool_get_name(zhp), len); 258 break; 259 260 case ZPOOL_PROP_HEALTH: 261 (void) strlcpy(buf, 262 zpool_pool_state_to_name(POOL_STATE_UNAVAIL), len); 263 break; 264 265 case ZPOOL_PROP_GUID: 266 intval = zpool_get_prop_int(zhp, prop, &src); 267 (void) snprintf(buf, len, "%llu", intval); 268 break; 269 270 case ZPOOL_PROP_ALTROOT: 271 case ZPOOL_PROP_CACHEFILE: 272 case ZPOOL_PROP_COMMENT: 273 if (zhp->zpool_props != NULL || 274 zpool_get_all_props(zhp) == 0) { 275 (void) strlcpy(buf, 276 zpool_get_prop_string(zhp, prop, &src), 277 len); 278 break; 279 } 280 /* FALLTHROUGH */ 281 default: 282 (void) strlcpy(buf, "-", len); 283 break; 284 } 285 286 if (srctype != NULL) 287 *srctype = src; 288 return (0); 289 } 290 291 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) && 292 prop != ZPOOL_PROP_NAME) 293 return (-1); 294 295 switch (zpool_prop_get_type(prop)) { 296 case PROP_TYPE_STRING: 297 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src), 298 len); 299 break; 300 301 case PROP_TYPE_NUMBER: 302 intval = zpool_get_prop_int(zhp, prop, &src); 303 304 switch (prop) { 305 case ZPOOL_PROP_SIZE: 306 case ZPOOL_PROP_ALLOCATED: 307 case ZPOOL_PROP_FREE: 308 case ZPOOL_PROP_FREEING: 309 case ZPOOL_PROP_LEAKED: 310 if (literal) { 311 (void) snprintf(buf, len, "%llu", 312 (u_longlong_t)intval); 313 } else { 314 (void) zfs_nicenum(intval, buf, len); 315 } 316 break; 317 case ZPOOL_PROP_EXPANDSZ: 318 if (intval == 0) { 319 (void) strlcpy(buf, "-", len); 320 } else if (literal) { 321 (void) snprintf(buf, len, "%llu", 322 (u_longlong_t)intval); 323 } else { 324 (void) zfs_nicenum(intval, buf, len); 325 } 326 break; 327 case ZPOOL_PROP_CAPACITY: 328 if (literal) { 329 (void) snprintf(buf, len, "%llu", 330 (u_longlong_t)intval); 331 } else { 332 (void) snprintf(buf, len, "%llu%%", 333 (u_longlong_t)intval); 334 } 335 break; 336 case ZPOOL_PROP_FRAGMENTATION: 337 if (intval == UINT64_MAX) { 338 (void) strlcpy(buf, "-", len); 339 } else { 340 (void) snprintf(buf, len, "%llu%%", 341 (u_longlong_t)intval); 342 } 343 break; 344 case ZPOOL_PROP_DEDUPRATIO: 345 (void) snprintf(buf, len, "%llu.%02llux", 346 (u_longlong_t)(intval / 100), 347 (u_longlong_t)(intval % 100)); 348 break; 349 case ZPOOL_PROP_HEALTH: 350 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 351 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 352 verify(nvlist_lookup_uint64_array(nvroot, 353 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 354 == 0); 355 356 (void) strlcpy(buf, zpool_state_to_name(intval, 357 vs->vs_aux), len); 358 break; 359 case ZPOOL_PROP_VERSION: 360 if (intval >= SPA_VERSION_FEATURES) { 361 (void) snprintf(buf, len, "-"); 362 break; 363 } 364 /* FALLTHROUGH */ 365 default: 366 (void) snprintf(buf, len, "%llu", intval); 367 } 368 break; 369 370 case PROP_TYPE_INDEX: 371 intval = zpool_get_prop_int(zhp, prop, &src); 372 if (zpool_prop_index_to_string(prop, intval, &strval) 373 != 0) 374 return (-1); 375 (void) strlcpy(buf, strval, len); 376 break; 377 378 default: 379 abort(); 380 } 381 382 if (srctype) 383 *srctype = src; 384 385 return (0); 386} 387 388/* 389 * Check if the bootfs name has the same pool name as it is set to. 390 * Assuming bootfs is a valid dataset name. 391 */ 392static boolean_t 393bootfs_name_valid(const char *pool, char *bootfs) 394{ 395 int len = strlen(pool); 396 397 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT)) 398 return (B_FALSE); 399 400 if (strncmp(pool, bootfs, len) == 0 && 401 (bootfs[len] == '/' || bootfs[len] == '\0')) 402 return (B_TRUE); 403 404 return (B_FALSE); 405} 406 407boolean_t 408zpool_is_bootable(zpool_handle_t *zhp) 409{ 410 char bootfs[ZFS_MAX_DATASET_NAME_LEN]; 411 412 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs, 413 sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-", 414 sizeof (bootfs)) != 0); 415} 416 417 418/* 419 * Given an nvlist of zpool properties to be set, validate that they are 420 * correct, and parse any numeric properties (index, boolean, etc) if they are 421 * specified as strings. 422 */ 423static nvlist_t * 424zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname, 425 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf) 426{ 427 nvpair_t *elem; 428 nvlist_t *retprops; 429 zpool_prop_t prop; 430 char *strval; 431 uint64_t intval; 432 char *slash, *check; 433 struct stat64 statbuf; 434 zpool_handle_t *zhp; 435 436 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) { 437 (void) no_memory(hdl); 438 return (NULL); 439 } 440 441 elem = NULL; 442 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 443 const char *propname = nvpair_name(elem); 444 445 prop = zpool_name_to_prop(propname); 446 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) { 447 int err; 448 char *fname = strchr(propname, '@') + 1; 449 450 err = zfeature_lookup_name(fname, NULL); 451 if (err != 0) { 452 ASSERT3U(err, ==, ENOENT); 453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 454 "invalid feature '%s'"), fname); 455 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 456 goto error; 457 } 458 459 if (nvpair_type(elem) != DATA_TYPE_STRING) { 460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 461 "'%s' must be a string"), propname); 462 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 463 goto error; 464 } 465 466 (void) nvpair_value_string(elem, &strval); 467 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) { 468 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 469 "property '%s' can only be set to " 470 "'enabled'"), propname); 471 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 472 goto error; 473 } 474 475 if (nvlist_add_uint64(retprops, propname, 0) != 0) { 476 (void) no_memory(hdl); 477 goto error; 478 } 479 continue; 480 } 481 482 /* 483 * Make sure this property is valid and applies to this type. 484 */ 485 if (prop == ZPROP_INVAL) { 486 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 487 "invalid property '%s'"), propname); 488 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 489 goto error; 490 } 491 492 if (zpool_prop_readonly(prop)) { 493 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 494 "is readonly"), propname); 495 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf); 496 goto error; 497 } 498 499 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops, 500 &strval, &intval, errbuf) != 0) 501 goto error; 502 503 /* 504 * Perform additional checking for specific properties. 505 */ 506 switch (prop) { 507 case ZPOOL_PROP_VERSION: 508 if (intval < version || 509 !SPA_VERSION_IS_SUPPORTED(intval)) { 510 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 511 "property '%s' number %d is invalid."), 512 propname, intval); 513 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 514 goto error; 515 } 516 break; 517 518 case ZPOOL_PROP_BOOTFS: 519 if (flags.create || flags.import) { 520 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 521 "property '%s' cannot be set at creation " 522 "or import time"), propname); 523 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 524 goto error; 525 } 526 527 if (version < SPA_VERSION_BOOTFS) { 528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 529 "pool must be upgraded to support " 530 "'%s' property"), propname); 531 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf); 532 goto error; 533 } 534 535 /* 536 * bootfs property value has to be a dataset name and 537 * the dataset has to be in the same pool as it sets to. 538 */ 539 if (strval[0] != '\0' && !bootfs_name_valid(poolname, 540 strval)) { 541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' " 542 "is an invalid name"), strval); 543 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 544 goto error; 545 } 546 547 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) { 548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 549 "could not open pool '%s'"), poolname); 550 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 551 goto error; 552 } 553 zpool_close(zhp); 554 break; 555 556 case ZPOOL_PROP_ALTROOT: 557 if (!flags.create && !flags.import) { 558 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 559 "property '%s' can only be set during pool " 560 "creation or import"), propname); 561 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 562 goto error; 563 } 564 565 if (strval[0] != '/') { 566 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 567 "bad alternate root '%s'"), strval); 568 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 569 goto error; 570 } 571 break; 572 573 case ZPOOL_PROP_CACHEFILE: 574 if (strval[0] == '\0') 575 break; 576 577 if (strcmp(strval, "none") == 0) 578 break; 579 580 if (strval[0] != '/') { 581 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 582 "property '%s' must be empty, an " 583 "absolute path, or 'none'"), propname); 584 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 585 goto error; 586 } 587 588 slash = strrchr(strval, '/'); 589 590 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 591 strcmp(slash, "/..") == 0) { 592 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 593 "'%s' is not a valid file"), strval); 594 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 595 goto error; 596 } 597 598 *slash = '\0'; 599 600 if (strval[0] != '\0' && 601 (stat64(strval, &statbuf) != 0 || 602 !S_ISDIR(statbuf.st_mode))) { 603 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 604 "'%s' is not a valid directory"), 605 strval); 606 (void) zfs_error(hdl, EZFS_BADPATH, errbuf); 607 goto error; 608 } 609 610 *slash = '/'; 611 break; 612 613 case ZPOOL_PROP_COMMENT: 614 for (check = strval; *check != '\0'; check++) { 615 if (!isprint(*check)) { 616 zfs_error_aux(hdl, 617 dgettext(TEXT_DOMAIN, 618 "comment may only have printable " 619 "characters")); 620 (void) zfs_error(hdl, EZFS_BADPROP, 621 errbuf); 622 goto error; 623 } 624 } 625 if (strlen(strval) > ZPROP_MAX_COMMENT) { 626 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 627 "comment must not exceed %d characters"), 628 ZPROP_MAX_COMMENT); 629 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 630 goto error; 631 } 632 break; 633 case ZPOOL_PROP_READONLY: 634 if (!flags.import) { 635 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 636 "property '%s' can only be set at " 637 "import time"), propname); 638 (void) zfs_error(hdl, EZFS_BADPROP, errbuf); 639 goto error; 640 } 641 break; 642 643 default: 644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 645 "property '%s'(%d) not defined"), propname, prop); 646 break; 647 } 648 } 649 650 return (retprops); 651error: 652 nvlist_free(retprops); 653 return (NULL); 654} 655 656/* 657 * Set zpool property : propname=propval. 658 */ 659int 660zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval) 661{ 662 zfs_cmd_t zc = { 0 }; 663 int ret = -1; 664 char errbuf[1024]; 665 nvlist_t *nvl = NULL; 666 nvlist_t *realprops; 667 uint64_t version; 668 prop_flags_t flags = { 0 }; 669 670 (void) snprintf(errbuf, sizeof (errbuf), 671 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"), 672 zhp->zpool_name); 673 674 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) 675 return (no_memory(zhp->zpool_hdl)); 676 677 if (nvlist_add_string(nvl, propname, propval) != 0) { 678 nvlist_free(nvl); 679 return (no_memory(zhp->zpool_hdl)); 680 } 681 682 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 683 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl, 684 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) { 685 nvlist_free(nvl); 686 return (-1); 687 } 688 689 nvlist_free(nvl); 690 nvl = realprops; 691 692 /* 693 * Execute the corresponding ioctl() to set this property. 694 */ 695 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 696 697 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) { 698 nvlist_free(nvl); 699 return (-1); 700 } 701 702 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc); 703 704 zcmd_free_nvlists(&zc); 705 nvlist_free(nvl); 706 707 if (ret) 708 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf); 709 else 710 (void) zpool_props_refresh(zhp); 711 712 return (ret); 713} 714 715int 716zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp) 717{ 718 libzfs_handle_t *hdl = zhp->zpool_hdl; 719 zprop_list_t *entry; 720 char buf[ZFS_MAXPROPLEN]; 721 nvlist_t *features = NULL; 722 zprop_list_t **last; 723 boolean_t firstexpand = (NULL == *plp); 724 725 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0) 726 return (-1); 727 728 last = plp; 729 while (*last != NULL) 730 last = &(*last)->pl_next; 731 732 if ((*plp)->pl_all) 733 features = zpool_get_features(zhp); 734 735 if ((*plp)->pl_all && firstexpand) { 736 for (int i = 0; i < SPA_FEATURES; i++) { 737 zprop_list_t *entry = zfs_alloc(hdl, 738 sizeof (zprop_list_t)); 739 entry->pl_prop = ZPROP_INVAL; 740 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s", 741 spa_feature_table[i].fi_uname); 742 entry->pl_width = strlen(entry->pl_user_prop); 743 entry->pl_all = B_TRUE; 744 745 *last = entry; 746 last = &entry->pl_next; 747 } 748 } 749 750 /* add any unsupported features */ 751 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL); 752 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) { 753 char *propname; 754 boolean_t found; 755 zprop_list_t *entry; 756 757 if (zfeature_is_supported(nvpair_name(nvp))) 758 continue; 759 760 propname = zfs_asprintf(hdl, "unsupported@%s", 761 nvpair_name(nvp)); 762 763 /* 764 * Before adding the property to the list make sure that no 765 * other pool already added the same property. 766 */ 767 found = B_FALSE; 768 entry = *plp; 769 while (entry != NULL) { 770 if (entry->pl_user_prop != NULL && 771 strcmp(propname, entry->pl_user_prop) == 0) { 772 found = B_TRUE; 773 break; 774 } 775 entry = entry->pl_next; 776 } 777 if (found) { 778 free(propname); 779 continue; 780 } 781 782 entry = zfs_alloc(hdl, sizeof (zprop_list_t)); 783 entry->pl_prop = ZPROP_INVAL; 784 entry->pl_user_prop = propname; 785 entry->pl_width = strlen(entry->pl_user_prop); 786 entry->pl_all = B_TRUE; 787 788 *last = entry; 789 last = &entry->pl_next; 790 } 791 792 for (entry = *plp; entry != NULL; entry = entry->pl_next) { 793 794 if (entry->pl_fixed) 795 continue; 796 797 if (entry->pl_prop != ZPROP_INVAL && 798 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf), 799 NULL, B_FALSE) == 0) { 800 if (strlen(buf) > entry->pl_width) 801 entry->pl_width = strlen(buf); 802 } 803 } 804 805 return (0); 806} 807 808/* 809 * Get the state for the given feature on the given ZFS pool. 810 */ 811int 812zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf, 813 size_t len) 814{ 815 uint64_t refcount; 816 boolean_t found = B_FALSE; 817 nvlist_t *features = zpool_get_features(zhp); 818 boolean_t supported; 819 const char *feature = strchr(propname, '@') + 1; 820 821 supported = zpool_prop_feature(propname); 822 ASSERT(supported || zpool_prop_unsupported(propname)); 823 824 /* 825 * Convert from feature name to feature guid. This conversion is 826 * unecessary for unsupported@... properties because they already 827 * use guids. 828 */ 829 if (supported) { 830 int ret; 831 spa_feature_t fid; 832 833 ret = zfeature_lookup_name(feature, &fid); 834 if (ret != 0) { 835 (void) strlcpy(buf, "-", len); 836 return (ENOTSUP); 837 } 838 feature = spa_feature_table[fid].fi_guid; 839 } 840 841 if (nvlist_lookup_uint64(features, feature, &refcount) == 0) 842 found = B_TRUE; 843 844 if (supported) { 845 if (!found) { 846 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len); 847 } else { 848 if (refcount == 0) 849 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len); 850 else 851 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len); 852 } 853 } else { 854 if (found) { 855 if (refcount == 0) { 856 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE); 857 } else { 858 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY); 859 } 860 } else { 861 (void) strlcpy(buf, "-", len); 862 return (ENOTSUP); 863 } 864 } 865 866 return (0); 867} 868 869/* 870 * Don't start the slice at the default block of 34; many storage 871 * devices will use a stripe width of 128k, so start there instead. 872 */ 873#define NEW_START_BLOCK 256 874 875/* 876 * Validate the given pool name, optionally putting an extended error message in 877 * 'buf'. 878 */ 879boolean_t 880zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool) 881{ 882 namecheck_err_t why; 883 char what; 884 int ret; 885 886 ret = pool_namecheck(pool, &why, &what); 887 888 /* 889 * The rules for reserved pool names were extended at a later point. 890 * But we need to support users with existing pools that may now be 891 * invalid. So we only check for this expanded set of names during a 892 * create (or import), and only in userland. 893 */ 894 if (ret == 0 && !isopen && 895 (strncmp(pool, "mirror", 6) == 0 || 896 strncmp(pool, "raidz", 5) == 0 || 897 strncmp(pool, "spare", 5) == 0 || 898 strcmp(pool, "log") == 0)) { 899 if (hdl != NULL) 900 zfs_error_aux(hdl, 901 dgettext(TEXT_DOMAIN, "name is reserved")); 902 return (B_FALSE); 903 } 904 905 906 if (ret != 0) { 907 if (hdl != NULL) { 908 switch (why) { 909 case NAME_ERR_TOOLONG: 910 zfs_error_aux(hdl, 911 dgettext(TEXT_DOMAIN, "name is too long")); 912 break; 913 914 case NAME_ERR_INVALCHAR: 915 zfs_error_aux(hdl, 916 dgettext(TEXT_DOMAIN, "invalid character " 917 "'%c' in pool name"), what); 918 break; 919 920 case NAME_ERR_NOLETTER: 921 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 922 "name must begin with a letter")); 923 break; 924 925 case NAME_ERR_RESERVED: 926 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 927 "name is reserved")); 928 break; 929 930 case NAME_ERR_DISKLIKE: 931 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 932 "pool name is reserved")); 933 break; 934 935 case NAME_ERR_LEADING_SLASH: 936 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 937 "leading slash in name")); 938 break; 939 940 case NAME_ERR_EMPTY_COMPONENT: 941 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 942 "empty component in name")); 943 break; 944 945 case NAME_ERR_TRAILING_SLASH: 946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 947 "trailing slash in name")); 948 break; 949 950 case NAME_ERR_MULTIPLE_AT: 951 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 952 "multiple '@' delimiters in name")); 953 break; 954 955 default: 956 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 957 "(%d) not defined"), why); 958 break; 959 } 960 } 961 return (B_FALSE); 962 } 963 964 return (B_TRUE); 965} 966 967/* 968 * Open a handle to the given pool, even if the pool is currently in the FAULTED 969 * state. 970 */ 971zpool_handle_t * 972zpool_open_canfail(libzfs_handle_t *hdl, const char *pool) 973{ 974 zpool_handle_t *zhp; 975 boolean_t missing; 976 977 /* 978 * Make sure the pool name is valid. 979 */ 980 if (!zpool_name_valid(hdl, B_TRUE, pool)) { 981 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME, 982 dgettext(TEXT_DOMAIN, "cannot open '%s'"), 983 pool); 984 return (NULL); 985 } 986 987 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 988 return (NULL); 989 990 zhp->zpool_hdl = hdl; 991 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 992 993 if (zpool_refresh_stats(zhp, &missing) != 0) { 994 zpool_close(zhp); 995 return (NULL); 996 } 997 998 if (missing) { 999 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool")); 1000 (void) zfs_error_fmt(hdl, EZFS_NOENT, 1001 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool); 1002 zpool_close(zhp); 1003 return (NULL); 1004 } 1005 1006 return (zhp); 1007} 1008 1009/* 1010 * Like the above, but silent on error. Used when iterating over pools (because 1011 * the configuration cache may be out of date). 1012 */ 1013int 1014zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret) 1015{ 1016 zpool_handle_t *zhp; 1017 boolean_t missing; 1018 1019 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL) 1020 return (-1); 1021 1022 zhp->zpool_hdl = hdl; 1023 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name)); 1024 1025 if (zpool_refresh_stats(zhp, &missing) != 0) { 1026 zpool_close(zhp); 1027 return (-1); 1028 } 1029 1030 if (missing) { 1031 zpool_close(zhp); 1032 *ret = NULL; 1033 return (0); 1034 } 1035 1036 *ret = zhp; 1037 return (0); 1038} 1039 1040/* 1041 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted 1042 * state. 1043 */ 1044zpool_handle_t * 1045zpool_open(libzfs_handle_t *hdl, const char *pool) 1046{ 1047 zpool_handle_t *zhp; 1048 1049 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL) 1050 return (NULL); 1051 1052 if (zhp->zpool_state == POOL_STATE_UNAVAIL) { 1053 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL, 1054 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name); 1055 zpool_close(zhp); 1056 return (NULL); 1057 } 1058 1059 return (zhp); 1060} 1061 1062/* 1063 * Close the handle. Simply frees the memory associated with the handle. 1064 */ 1065void 1066zpool_close(zpool_handle_t *zhp) 1067{ 1068 nvlist_free(zhp->zpool_config); 1069 nvlist_free(zhp->zpool_old_config); 1070 nvlist_free(zhp->zpool_props); 1071 free(zhp); 1072} 1073 1074/* 1075 * Return the name of the pool. 1076 */ 1077const char * 1078zpool_get_name(zpool_handle_t *zhp) 1079{ 1080 return (zhp->zpool_name); 1081} 1082 1083 1084/* 1085 * Return the state of the pool (ACTIVE or UNAVAILABLE) 1086 */ 1087int 1088zpool_get_state(zpool_handle_t *zhp) 1089{ 1090 return (zhp->zpool_state); 1091} 1092 1093/* 1094 * Create the named pool, using the provided vdev list. It is assumed 1095 * that the consumer has already validated the contents of the nvlist, so we 1096 * don't have to worry about error semantics. 1097 */ 1098int 1099zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot, 1100 nvlist_t *props, nvlist_t *fsprops) 1101{ 1102 zfs_cmd_t zc = { 0 }; 1103 nvlist_t *zc_fsprops = NULL; 1104 nvlist_t *zc_props = NULL; 1105 char msg[1024]; 1106 int ret = -1; 1107 1108 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1109 "cannot create '%s'"), pool); 1110 1111 if (!zpool_name_valid(hdl, B_FALSE, pool)) 1112 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 1113 1114 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1115 return (-1); 1116 1117 if (props) { 1118 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE }; 1119 1120 if ((zc_props = zpool_valid_proplist(hdl, pool, props, 1121 SPA_VERSION_1, flags, msg)) == NULL) { 1122 goto create_failed; 1123 } 1124 } 1125 1126 if (fsprops) { 1127 uint64_t zoned; 1128 char *zonestr; 1129 1130 zoned = ((nvlist_lookup_string(fsprops, 1131 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) && 1132 strcmp(zonestr, "on") == 0); 1133 1134 if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM, 1135 fsprops, zoned, NULL, NULL, msg)) == NULL) { 1136 goto create_failed; 1137 } 1138 if (!zc_props && 1139 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) { 1140 goto create_failed; 1141 } 1142 if (nvlist_add_nvlist(zc_props, 1143 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) { 1144 goto create_failed; 1145 } 1146 } 1147 1148 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 1149 goto create_failed; 1150 1151 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name)); 1152 1153 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) { 1154 1155 zcmd_free_nvlists(&zc); 1156 nvlist_free(zc_props); 1157 nvlist_free(zc_fsprops); 1158 1159 switch (errno) { 1160 case EBUSY: 1161 /* 1162 * This can happen if the user has specified the same 1163 * device multiple times. We can't reliably detect this 1164 * until we try to add it and see we already have a 1165 * label. 1166 */ 1167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1168 "one or more vdevs refer to the same device")); 1169 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1170 1171 case ERANGE: 1172 /* 1173 * This happens if the record size is smaller or larger 1174 * than the allowed size range, or not a power of 2. 1175 * 1176 * NOTE: although zfs_valid_proplist is called earlier, 1177 * this case may have slipped through since the 1178 * pool does not exist yet and it is therefore 1179 * impossible to read properties e.g. max blocksize 1180 * from the pool. 1181 */ 1182 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1183 "record size invalid")); 1184 return (zfs_error(hdl, EZFS_BADPROP, msg)); 1185 1186 case EOVERFLOW: 1187 /* 1188 * This occurs when one of the devices is below 1189 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1190 * device was the problem device since there's no 1191 * reliable way to determine device size from userland. 1192 */ 1193 { 1194 char buf[64]; 1195 1196 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1197 1198 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1199 "one or more devices is less than the " 1200 "minimum size (%s)"), buf); 1201 } 1202 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1203 1204 case ENOSPC: 1205 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1206 "one or more devices is out of space")); 1207 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1208 1209 case ENOTBLK: 1210 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1211 "cache device must be a disk or disk slice")); 1212 return (zfs_error(hdl, EZFS_BADDEV, msg)); 1213 1214 default: 1215 return (zpool_standard_error(hdl, errno, msg)); 1216 } 1217 } 1218 1219create_failed: 1220 zcmd_free_nvlists(&zc); 1221 nvlist_free(zc_props); 1222 nvlist_free(zc_fsprops); 1223 return (ret); 1224} 1225 1226/* 1227 * Destroy the given pool. It is up to the caller to ensure that there are no 1228 * datasets left in the pool. 1229 */ 1230int 1231zpool_destroy(zpool_handle_t *zhp, const char *log_str) 1232{ 1233 zfs_cmd_t zc = { 0 }; 1234 zfs_handle_t *zfp = NULL; 1235 libzfs_handle_t *hdl = zhp->zpool_hdl; 1236 char msg[1024]; 1237 1238 if (zhp->zpool_state == POOL_STATE_ACTIVE && 1239 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL) 1240 return (-1); 1241 1242 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1243 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1244 1245 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) { 1246 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1247 "cannot destroy '%s'"), zhp->zpool_name); 1248 1249 if (errno == EROFS) { 1250 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1251 "one or more devices is read only")); 1252 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1253 } else { 1254 (void) zpool_standard_error(hdl, errno, msg); 1255 } 1256 1257 if (zfp) 1258 zfs_close(zfp); 1259 return (-1); 1260 } 1261 1262 if (zfp) { 1263 remove_mountpoint(zfp); 1264 zfs_close(zfp); 1265 } 1266 1267 return (0); 1268} 1269 1270/* 1271 * Add the given vdevs to the pool. The caller must have already performed the 1272 * necessary verification to ensure that the vdev specification is well-formed. 1273 */ 1274int 1275zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot) 1276{ 1277 zfs_cmd_t zc = { 0 }; 1278 int ret; 1279 libzfs_handle_t *hdl = zhp->zpool_hdl; 1280 char msg[1024]; 1281 nvlist_t **spares, **l2cache; 1282 uint_t nspares, nl2cache; 1283 1284 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1285 "cannot add to '%s'"), zhp->zpool_name); 1286 1287 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1288 SPA_VERSION_SPARES && 1289 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 1290 &spares, &nspares) == 0) { 1291 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1292 "upgraded to add hot spares")); 1293 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1294 } 1295 1296 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) < 1297 SPA_VERSION_L2CACHE && 1298 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 1299 &l2cache, &nl2cache) == 0) { 1300 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be " 1301 "upgraded to add cache devices")); 1302 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 1303 } 1304 1305 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 1306 return (-1); 1307 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1308 1309 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) { 1310 switch (errno) { 1311 case EBUSY: 1312 /* 1313 * This can happen if the user has specified the same 1314 * device multiple times. We can't reliably detect this 1315 * until we try to add it and see we already have a 1316 * label. 1317 */ 1318 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1319 "one or more vdevs refer to the same device")); 1320 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1321 break; 1322 1323 case EOVERFLOW: 1324 /* 1325 * This occurrs when one of the devices is below 1326 * SPA_MINDEVSIZE. Unfortunately, we can't detect which 1327 * device was the problem device since there's no 1328 * reliable way to determine device size from userland. 1329 */ 1330 { 1331 char buf[64]; 1332 1333 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf)); 1334 1335 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1336 "device is less than the minimum " 1337 "size (%s)"), buf); 1338 } 1339 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1340 break; 1341 1342 case ENOTSUP: 1343 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1344 "pool must be upgraded to add these vdevs")); 1345 (void) zfs_error(hdl, EZFS_BADVERSION, msg); 1346 break; 1347 1348 case EDOM: 1349 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1350 "root pool can not have multiple vdevs" 1351 " or separate logs")); 1352 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg); 1353 break; 1354 1355 case ENOTBLK: 1356 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1357 "cache device must be a disk or disk slice")); 1358 (void) zfs_error(hdl, EZFS_BADDEV, msg); 1359 break; 1360 1361 default: 1362 (void) zpool_standard_error(hdl, errno, msg); 1363 } 1364 1365 ret = -1; 1366 } else { 1367 ret = 0; 1368 } 1369 1370 zcmd_free_nvlists(&zc); 1371 1372 return (ret); 1373} 1374 1375/* 1376 * Exports the pool from the system. The caller must ensure that there are no 1377 * mounted datasets in the pool. 1378 */ 1379static int 1380zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce, 1381 const char *log_str) 1382{ 1383 zfs_cmd_t zc = { 0 }; 1384 char msg[1024]; 1385 1386 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 1387 "cannot export '%s'"), zhp->zpool_name); 1388 1389 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1390 zc.zc_cookie = force; 1391 zc.zc_guid = hardforce; 1392 zc.zc_history = (uint64_t)(uintptr_t)log_str; 1393 1394 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) { 1395 switch (errno) { 1396 case EXDEV: 1397 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN, 1398 "use '-f' to override the following errors:\n" 1399 "'%s' has an active shared spare which could be" 1400 " used by other pools once '%s' is exported."), 1401 zhp->zpool_name, zhp->zpool_name); 1402 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE, 1403 msg)); 1404 default: 1405 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno, 1406 msg)); 1407 } 1408 } 1409 1410 return (0); 1411} 1412 1413int 1414zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str) 1415{ 1416 return (zpool_export_common(zhp, force, B_FALSE, log_str)); 1417} 1418 1419int 1420zpool_export_force(zpool_handle_t *zhp, const char *log_str) 1421{ 1422 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str)); 1423} 1424 1425static void 1426zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun, 1427 nvlist_t *config) 1428{ 1429 nvlist_t *nv = NULL; 1430 uint64_t rewindto; 1431 int64_t loss = -1; 1432 struct tm t; 1433 char timestr[128]; 1434 1435 if (!hdl->libzfs_printerr || config == NULL) 1436 return; 1437 1438 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1439 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) { 1440 return; 1441 } 1442 1443 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1444 return; 1445 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1446 1447 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1448 strftime(timestr, 128, 0, &t) != 0) { 1449 if (dryrun) { 1450 (void) printf(dgettext(TEXT_DOMAIN, 1451 "Would be able to return %s " 1452 "to its state as of %s.\n"), 1453 name, timestr); 1454 } else { 1455 (void) printf(dgettext(TEXT_DOMAIN, 1456 "Pool %s returned to its state as of %s.\n"), 1457 name, timestr); 1458 } 1459 if (loss > 120) { 1460 (void) printf(dgettext(TEXT_DOMAIN, 1461 "%s approximately %lld "), 1462 dryrun ? "Would discard" : "Discarded", 1463 (loss + 30) / 60); 1464 (void) printf(dgettext(TEXT_DOMAIN, 1465 "minutes of transactions.\n")); 1466 } else if (loss > 0) { 1467 (void) printf(dgettext(TEXT_DOMAIN, 1468 "%s approximately %lld "), 1469 dryrun ? "Would discard" : "Discarded", loss); 1470 (void) printf(dgettext(TEXT_DOMAIN, 1471 "seconds of transactions.\n")); 1472 } 1473 } 1474} 1475 1476void 1477zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason, 1478 nvlist_t *config) 1479{ 1480 nvlist_t *nv = NULL; 1481 int64_t loss = -1; 1482 uint64_t edata = UINT64_MAX; 1483 uint64_t rewindto; 1484 struct tm t; 1485 char timestr[128]; 1486 1487 if (!hdl->libzfs_printerr) 1488 return; 1489 1490 if (reason >= 0) 1491 (void) printf(dgettext(TEXT_DOMAIN, "action: ")); 1492 else 1493 (void) printf(dgettext(TEXT_DOMAIN, "\t")); 1494 1495 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */ 1496 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 || 1497 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 || 1498 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0) 1499 goto no_info; 1500 1501 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss); 1502 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS, 1503 &edata); 1504 1505 (void) printf(dgettext(TEXT_DOMAIN, 1506 "Recovery is possible, but will result in some data loss.\n")); 1507 1508 if (localtime_r((time_t *)&rewindto, &t) != NULL && 1509 strftime(timestr, 128, 0, &t) != 0) { 1510 (void) printf(dgettext(TEXT_DOMAIN, 1511 "\tReturning the pool to its state as of %s\n" 1512 "\tshould correct the problem. "), 1513 timestr); 1514 } else { 1515 (void) printf(dgettext(TEXT_DOMAIN, 1516 "\tReverting the pool to an earlier state " 1517 "should correct the problem.\n\t")); 1518 } 1519 1520 if (loss > 120) { 1521 (void) printf(dgettext(TEXT_DOMAIN, 1522 "Approximately %lld minutes of data\n" 1523 "\tmust be discarded, irreversibly. "), (loss + 30) / 60); 1524 } else if (loss > 0) { 1525 (void) printf(dgettext(TEXT_DOMAIN, 1526 "Approximately %lld seconds of data\n" 1527 "\tmust be discarded, irreversibly. "), loss); 1528 } 1529 if (edata != 0 && edata != UINT64_MAX) { 1530 if (edata == 1) { 1531 (void) printf(dgettext(TEXT_DOMAIN, 1532 "After rewind, at least\n" 1533 "\tone persistent user-data error will remain. ")); 1534 } else { 1535 (void) printf(dgettext(TEXT_DOMAIN, 1536 "After rewind, several\n" 1537 "\tpersistent user-data errors will remain. ")); 1538 } 1539 } 1540 (void) printf(dgettext(TEXT_DOMAIN, 1541 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "), 1542 reason >= 0 ? "clear" : "import", name); 1543 1544 (void) printf(dgettext(TEXT_DOMAIN, 1545 "A scrub of the pool\n" 1546 "\tis strongly recommended after recovery.\n")); 1547 return; 1548 1549no_info: 1550 (void) printf(dgettext(TEXT_DOMAIN, 1551 "Destroy and re-create the pool from\n\ta backup source.\n")); 1552} 1553 1554/* 1555 * zpool_import() is a contracted interface. Should be kept the same 1556 * if possible. 1557 * 1558 * Applications should use zpool_import_props() to import a pool with 1559 * new properties value to be set. 1560 */ 1561int 1562zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1563 char *altroot) 1564{ 1565 nvlist_t *props = NULL; 1566 int ret; 1567 1568 if (altroot != NULL) { 1569 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) { 1570 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1571 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1572 newname)); 1573 } 1574 1575 if (nvlist_add_string(props, 1576 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 || 1577 nvlist_add_string(props, 1578 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) { 1579 nvlist_free(props); 1580 return (zfs_error_fmt(hdl, EZFS_NOMEM, 1581 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1582 newname)); 1583 } 1584 } 1585 1586 ret = zpool_import_props(hdl, config, newname, props, 1587 ZFS_IMPORT_NORMAL); 1588 nvlist_free(props); 1589 return (ret); 1590} 1591 1592static void 1593print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, 1594 int indent) 1595{ 1596 nvlist_t **child; 1597 uint_t c, children; 1598 char *vname; 1599 uint64_t is_log = 0; 1600 1601 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, 1602 &is_log); 1603 1604 if (name != NULL) 1605 (void) printf("\t%*s%s%s\n", indent, "", name, 1606 is_log ? " [log]" : ""); 1607 1608 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1609 &child, &children) != 0) 1610 return; 1611 1612 for (c = 0; c < children; c++) { 1613 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); 1614 print_vdev_tree(hdl, vname, child[c], indent + 2); 1615 free(vname); 1616 } 1617} 1618 1619void 1620zpool_print_unsup_feat(nvlist_t *config) 1621{ 1622 nvlist_t *nvinfo, *unsup_feat; 1623 1624 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 1625 0); 1626 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT, 1627 &unsup_feat) == 0); 1628 1629 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL; 1630 nvp = nvlist_next_nvpair(unsup_feat, nvp)) { 1631 char *desc; 1632 1633 verify(nvpair_type(nvp) == DATA_TYPE_STRING); 1634 verify(nvpair_value_string(nvp, &desc) == 0); 1635 1636 if (strlen(desc) > 0) 1637 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc); 1638 else 1639 (void) printf("\t%s\n", nvpair_name(nvp)); 1640 } 1641} 1642 1643/* 1644 * Import the given pool using the known configuration and a list of 1645 * properties to be set. The configuration should have come from 1646 * zpool_find_import(). The 'newname' parameters control whether the pool 1647 * is imported with a different name. 1648 */ 1649int 1650zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname, 1651 nvlist_t *props, int flags) 1652{ 1653 zfs_cmd_t zc = { 0 }; 1654 zpool_rewind_policy_t policy; 1655 nvlist_t *nv = NULL; 1656 nvlist_t *nvinfo = NULL; 1657 nvlist_t *missing = NULL; 1658 char *thename; 1659 char *origname; 1660 int ret; 1661 int error = 0; 1662 char errbuf[1024]; 1663 1664 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 1665 &origname) == 0); 1666 1667 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 1668 "cannot import pool '%s'"), origname); 1669 1670 if (newname != NULL) { 1671 if (!zpool_name_valid(hdl, B_FALSE, newname)) 1672 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME, 1673 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1674 newname)); 1675 thename = (char *)newname; 1676 } else { 1677 thename = origname; 1678 } 1679 1680 if (props != NULL) { 1681 uint64_t version; 1682 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 1683 1684 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 1685 &version) == 0); 1686 1687 if ((props = zpool_valid_proplist(hdl, origname, 1688 props, version, flags, errbuf)) == NULL) 1689 return (-1); 1690 if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) { 1691 nvlist_free(props); 1692 return (-1); 1693 } 1694 nvlist_free(props); 1695 } 1696 1697 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name)); 1698 1699 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 1700 &zc.zc_guid) == 0); 1701 1702 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) { 1703 zcmd_free_nvlists(&zc); 1704 return (-1); 1705 } 1706 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) { 1707 zcmd_free_nvlists(&zc); 1708 return (-1); 1709 } 1710 1711 zc.zc_cookie = flags; 1712 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 && 1713 errno == ENOMEM) { 1714 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 1715 zcmd_free_nvlists(&zc); 1716 return (-1); 1717 } 1718 } 1719 if (ret != 0) 1720 error = errno; 1721 1722 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv); 1723 1724 zcmd_free_nvlists(&zc); 1725 1726 zpool_get_rewind_policy(config, &policy); 1727 1728 if (error) { 1729 char desc[1024]; 1730 1731 /* 1732 * Dry-run failed, but we print out what success 1733 * looks like if we found a best txg 1734 */ 1735 if (policy.zrp_request & ZPOOL_TRY_REWIND) { 1736 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1737 B_TRUE, nv); 1738 nvlist_free(nv); 1739 return (-1); 1740 } 1741 1742 if (newname == NULL) 1743 (void) snprintf(desc, sizeof (desc), 1744 dgettext(TEXT_DOMAIN, "cannot import '%s'"), 1745 thename); 1746 else 1747 (void) snprintf(desc, sizeof (desc), 1748 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"), 1749 origname, thename); 1750 1751 switch (error) { 1752 case ENOTSUP: 1753 if (nv != NULL && nvlist_lookup_nvlist(nv, 1754 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1755 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) { 1756 (void) printf(dgettext(TEXT_DOMAIN, "This " 1757 "pool uses the following feature(s) not " 1758 "supported by this system:\n")); 1759 zpool_print_unsup_feat(nv); 1760 if (nvlist_exists(nvinfo, 1761 ZPOOL_CONFIG_CAN_RDONLY)) { 1762 (void) printf(dgettext(TEXT_DOMAIN, 1763 "All unsupported features are only " 1764 "required for writing to the pool." 1765 "\nThe pool can be imported using " 1766 "'-o readonly=on'.\n")); 1767 } 1768 } 1769 /* 1770 * Unsupported version. 1771 */ 1772 (void) zfs_error(hdl, EZFS_BADVERSION, desc); 1773 break; 1774 1775 case EINVAL: 1776 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc); 1777 break; 1778 1779 case EROFS: 1780 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1781 "one or more devices is read only")); 1782 (void) zfs_error(hdl, EZFS_BADDEV, desc); 1783 break; 1784 1785 case ENXIO: 1786 if (nv && nvlist_lookup_nvlist(nv, 1787 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 && 1788 nvlist_lookup_nvlist(nvinfo, 1789 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) { 1790 (void) printf(dgettext(TEXT_DOMAIN, 1791 "The devices below are missing, use " 1792 "'-m' to import the pool anyway:\n")); 1793 print_vdev_tree(hdl, NULL, missing, 2); 1794 (void) printf("\n"); 1795 } 1796 (void) zpool_standard_error(hdl, error, desc); 1797 break; 1798 1799 case EEXIST: 1800 (void) zpool_standard_error(hdl, error, desc); 1801 break; 1802 case ENAMETOOLONG: 1803 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 1804 "new name of at least one dataset is longer than " 1805 "the maximum allowable length")); 1806 (void) zfs_error(hdl, EZFS_NAMETOOLONG, desc); 1807 break; 1808 default: 1809 (void) zpool_standard_error(hdl, error, desc); 1810 zpool_explain_recover(hdl, 1811 newname ? origname : thename, -error, nv); 1812 break; 1813 } 1814 1815 nvlist_free(nv); 1816 ret = -1; 1817 } else { 1818 zpool_handle_t *zhp; 1819 1820 /* 1821 * This should never fail, but play it safe anyway. 1822 */ 1823 if (zpool_open_silent(hdl, thename, &zhp) != 0) 1824 ret = -1; 1825 else if (zhp != NULL) 1826 zpool_close(zhp); 1827 if (policy.zrp_request & 1828 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 1829 zpool_rewind_exclaim(hdl, newname ? origname : thename, 1830 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv); 1831 } 1832 nvlist_free(nv); 1833 return (0); 1834 } 1835 1836 return (ret); 1837} 1838 1839/* 1840 * Scan the pool. 1841 */ 1842int 1843zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func) 1844{ 1845 zfs_cmd_t zc = { 0 }; 1846 char msg[1024]; 1847 libzfs_handle_t *hdl = zhp->zpool_hdl; 1848 1849 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 1850 zc.zc_cookie = func; 1851 1852 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 || 1853 (errno == ENOENT && func != POOL_SCAN_NONE)) 1854 return (0); 1855 1856 if (func == POOL_SCAN_SCRUB) { 1857 (void) snprintf(msg, sizeof (msg), 1858 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name); 1859 } else if (func == POOL_SCAN_NONE) { 1860 (void) snprintf(msg, sizeof (msg), 1861 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"), 1862 zc.zc_name); 1863 } else { 1864 assert(!"unexpected result"); 1865 } 1866 1867 if (errno == EBUSY) { 1868 nvlist_t *nvroot; 1869 pool_scan_stat_t *ps = NULL; 1870 uint_t psc; 1871 1872 verify(nvlist_lookup_nvlist(zhp->zpool_config, 1873 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 1874 (void) nvlist_lookup_uint64_array(nvroot, 1875 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc); 1876 if (ps && ps->pss_func == POOL_SCAN_SCRUB) 1877 return (zfs_error(hdl, EZFS_SCRUBBING, msg)); 1878 else 1879 return (zfs_error(hdl, EZFS_RESILVERING, msg)); 1880 } else if (errno == ENOENT) { 1881 return (zfs_error(hdl, EZFS_NO_SCRUB, msg)); 1882 } else { 1883 return (zpool_standard_error(hdl, errno, msg)); 1884 } 1885} 1886 1887#ifdef illumos 1888/* 1889 * This provides a very minimal check whether a given string is likely a 1890 * c#t#d# style string. Users of this are expected to do their own 1891 * verification of the s# part. 1892 */ 1893#define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1])) 1894 1895/* 1896 * More elaborate version for ones which may start with "/dev/dsk/" 1897 * and the like. 1898 */ 1899static int 1900ctd_check_path(char *str) 1901{ 1902 /* 1903 * If it starts with a slash, check the last component. 1904 */ 1905 if (str && str[0] == '/') { 1906 char *tmp = strrchr(str, '/'); 1907 1908 /* 1909 * If it ends in "/old", check the second-to-last 1910 * component of the string instead. 1911 */ 1912 if (tmp != str && strcmp(tmp, "/old") == 0) { 1913 for (tmp--; *tmp != '/'; tmp--) 1914 ; 1915 } 1916 str = tmp + 1; 1917 } 1918 return (CTD_CHECK(str)); 1919} 1920#endif 1921 1922/* 1923 * Find a vdev that matches the search criteria specified. We use the 1924 * the nvpair name to determine how we should look for the device. 1925 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL 1926 * spare; but FALSE if its an INUSE spare. 1927 */ 1928static nvlist_t * 1929vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare, 1930 boolean_t *l2cache, boolean_t *log) 1931{ 1932 uint_t c, children; 1933 nvlist_t **child; 1934 nvlist_t *ret; 1935 uint64_t is_log; 1936 char *srchkey; 1937 nvpair_t *pair = nvlist_next_nvpair(search, NULL); 1938 1939 /* Nothing to look for */ 1940 if (search == NULL || pair == NULL) 1941 return (NULL); 1942 1943 /* Obtain the key we will use to search */ 1944 srchkey = nvpair_name(pair); 1945 1946 switch (nvpair_type(pair)) { 1947 case DATA_TYPE_UINT64: 1948 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) { 1949 uint64_t srchval, theguid; 1950 1951 verify(nvpair_value_uint64(pair, &srchval) == 0); 1952 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 1953 &theguid) == 0); 1954 if (theguid == srchval) 1955 return (nv); 1956 } 1957 break; 1958 1959 case DATA_TYPE_STRING: { 1960 char *srchval, *val; 1961 1962 verify(nvpair_value_string(pair, &srchval) == 0); 1963 if (nvlist_lookup_string(nv, srchkey, &val) != 0) 1964 break; 1965 1966 /* 1967 * Search for the requested value. Special cases: 1968 * 1969 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in 1970 * "s0" or "s0/old". The "s0" part is hidden from the user, 1971 * but included in the string, so this matches around it. 1972 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE). 1973 * 1974 * Otherwise, all other searches are simple string compares. 1975 */ 1976#ifdef illumos 1977 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 && 1978 ctd_check_path(val)) { 1979 uint64_t wholedisk = 0; 1980 1981 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 1982 &wholedisk); 1983 if (wholedisk) { 1984 int slen = strlen(srchval); 1985 int vlen = strlen(val); 1986 1987 if (slen != vlen - 2) 1988 break; 1989 1990 /* 1991 * make_leaf_vdev() should only set 1992 * wholedisk for ZPOOL_CONFIG_PATHs which 1993 * will include "/dev/dsk/", giving plenty of 1994 * room for the indices used next. 1995 */ 1996 ASSERT(vlen >= 6); 1997 1998 /* 1999 * strings identical except trailing "s0" 2000 */ 2001 if (strcmp(&val[vlen - 2], "s0") == 0 && 2002 strncmp(srchval, val, slen) == 0) 2003 return (nv); 2004 2005 /* 2006 * strings identical except trailing "s0/old" 2007 */ 2008 if (strcmp(&val[vlen - 6], "s0/old") == 0 && 2009 strcmp(&srchval[slen - 4], "/old") == 0 && 2010 strncmp(srchval, val, slen - 4) == 0) 2011 return (nv); 2012 2013 break; 2014 } 2015 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2016#else 2017 if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) { 2018#endif 2019 char *type, *idx, *end, *p; 2020 uint64_t id, vdev_id; 2021 2022 /* 2023 * Determine our vdev type, keeping in mind 2024 * that the srchval is composed of a type and 2025 * vdev id pair (i.e. mirror-4). 2026 */ 2027 if ((type = strdup(srchval)) == NULL) 2028 return (NULL); 2029 2030 if ((p = strrchr(type, '-')) == NULL) { 2031 free(type); 2032 break; 2033 } 2034 idx = p + 1; 2035 *p = '\0'; 2036 2037 /* 2038 * If the types don't match then keep looking. 2039 */ 2040 if (strncmp(val, type, strlen(val)) != 0) { 2041 free(type); 2042 break; 2043 } 2044 2045 verify(strncmp(type, VDEV_TYPE_RAIDZ, 2046 strlen(VDEV_TYPE_RAIDZ)) == 0 || 2047 strncmp(type, VDEV_TYPE_MIRROR, 2048 strlen(VDEV_TYPE_MIRROR)) == 0); 2049 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 2050 &id) == 0); 2051 2052 errno = 0; 2053 vdev_id = strtoull(idx, &end, 10); 2054 2055 free(type); 2056 if (errno != 0) 2057 return (NULL); 2058 2059 /* 2060 * Now verify that we have the correct vdev id. 2061 */ 2062 if (vdev_id == id) 2063 return (nv); 2064 } 2065 2066 /* 2067 * Common case 2068 */ 2069 if (strcmp(srchval, val) == 0) 2070 return (nv); 2071 break; 2072 } 2073 2074 default: 2075 break; 2076 } 2077 2078 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 2079 &child, &children) != 0) 2080 return (NULL); 2081 2082 for (c = 0; c < children; c++) { 2083 if ((ret = vdev_to_nvlist_iter(child[c], search, 2084 avail_spare, l2cache, NULL)) != NULL) { 2085 /* 2086 * The 'is_log' value is only set for the toplevel 2087 * vdev, not the leaf vdevs. So we always lookup the 2088 * log device from the root of the vdev tree (where 2089 * 'log' is non-NULL). 2090 */ 2091 if (log != NULL && 2092 nvlist_lookup_uint64(child[c], 2093 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 && 2094 is_log) { 2095 *log = B_TRUE; 2096 } 2097 return (ret); 2098 } 2099 } 2100 2101 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, 2102 &child, &children) == 0) { 2103 for (c = 0; c < children; c++) { 2104 if ((ret = vdev_to_nvlist_iter(child[c], search, 2105 avail_spare, l2cache, NULL)) != NULL) { 2106 *avail_spare = B_TRUE; 2107 return (ret); 2108 } 2109 } 2110 } 2111 2112 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, 2113 &child, &children) == 0) { 2114 for (c = 0; c < children; c++) { 2115 if ((ret = vdev_to_nvlist_iter(child[c], search, 2116 avail_spare, l2cache, NULL)) != NULL) { 2117 *l2cache = B_TRUE; 2118 return (ret); 2119 } 2120 } 2121 } 2122 2123 return (NULL); 2124} 2125 2126/* 2127 * Given a physical path (minus the "/devices" prefix), find the 2128 * associated vdev. 2129 */ 2130nvlist_t * 2131zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath, 2132 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log) 2133{ 2134 nvlist_t *search, *nvroot, *ret; 2135 2136 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2137 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0); 2138 2139 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2140 &nvroot) == 0); 2141 2142 *avail_spare = B_FALSE; 2143 *l2cache = B_FALSE; 2144 if (log != NULL) 2145 *log = B_FALSE; 2146 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2147 nvlist_free(search); 2148 2149 return (ret); 2150} 2151 2152/* 2153 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz). 2154 */ 2155boolean_t 2156zpool_vdev_is_interior(const char *name) 2157{ 2158 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 || 2159 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) 2160 return (B_TRUE); 2161 return (B_FALSE); 2162} 2163 2164nvlist_t * 2165zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare, 2166 boolean_t *l2cache, boolean_t *log) 2167{ 2168 char buf[MAXPATHLEN]; 2169 char *end; 2170 nvlist_t *nvroot, *search, *ret; 2171 uint64_t guid; 2172 2173 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0); 2174 2175 guid = strtoull(path, &end, 10); 2176 if (guid != 0 && *end == '\0') { 2177 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0); 2178 } else if (zpool_vdev_is_interior(path)) { 2179 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0); 2180 } else if (path[0] != '/') { 2181 (void) snprintf(buf, sizeof (buf), "%s%s", _PATH_DEV, path); 2182 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0); 2183 } else { 2184 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0); 2185 } 2186 2187 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE, 2188 &nvroot) == 0); 2189 2190 *avail_spare = B_FALSE; 2191 *l2cache = B_FALSE; 2192 if (log != NULL) 2193 *log = B_FALSE; 2194 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log); 2195 nvlist_free(search); 2196 2197 return (ret); 2198} 2199 2200static int 2201vdev_online(nvlist_t *nv) 2202{ 2203 uint64_t ival; 2204 2205 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 || 2206 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 || 2207 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0) 2208 return (0); 2209 2210 return (1); 2211} 2212 2213/* 2214 * Helper function for zpool_get_physpaths(). 2215 */ 2216static int 2217vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size, 2218 size_t *bytes_written) 2219{ 2220 size_t bytes_left, pos, rsz; 2221 char *tmppath; 2222 const char *format; 2223 2224 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH, 2225 &tmppath) != 0) 2226 return (EZFS_NODEVICE); 2227 2228 pos = *bytes_written; 2229 bytes_left = physpath_size - pos; 2230 format = (pos == 0) ? "%s" : " %s"; 2231 2232 rsz = snprintf(physpath + pos, bytes_left, format, tmppath); 2233 *bytes_written += rsz; 2234 2235 if (rsz >= bytes_left) { 2236 /* if physpath was not copied properly, clear it */ 2237 if (bytes_left != 0) { 2238 physpath[pos] = 0; 2239 } 2240 return (EZFS_NOSPC); 2241 } 2242 return (0); 2243} 2244 2245static int 2246vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size, 2247 size_t *rsz, boolean_t is_spare) 2248{ 2249 char *type; 2250 int ret; 2251 2252 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) 2253 return (EZFS_INVALCONFIG); 2254 2255 if (strcmp(type, VDEV_TYPE_DISK) == 0) { 2256 /* 2257 * An active spare device has ZPOOL_CONFIG_IS_SPARE set. 2258 * For a spare vdev, we only want to boot from the active 2259 * spare device. 2260 */ 2261 if (is_spare) { 2262 uint64_t spare = 0; 2263 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 2264 &spare); 2265 if (!spare) 2266 return (EZFS_INVALCONFIG); 2267 } 2268 2269 if (vdev_online(nv)) { 2270 if ((ret = vdev_get_one_physpath(nv, physpath, 2271 phypath_size, rsz)) != 0) 2272 return (ret); 2273 } 2274 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 || 2275 strcmp(type, VDEV_TYPE_RAIDZ) == 0 || 2276 strcmp(type, VDEV_TYPE_REPLACING) == 0 || 2277 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) { 2278 nvlist_t **child; 2279 uint_t count; 2280 int i, ret; 2281 2282 if (nvlist_lookup_nvlist_array(nv, 2283 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0) 2284 return (EZFS_INVALCONFIG); 2285 2286 for (i = 0; i < count; i++) { 2287 ret = vdev_get_physpaths(child[i], physpath, 2288 phypath_size, rsz, is_spare); 2289 if (ret == EZFS_NOSPC) 2290 return (ret); 2291 } 2292 } 2293 2294 return (EZFS_POOL_INVALARG); 2295} 2296 2297/* 2298 * Get phys_path for a root pool config. 2299 * Return 0 on success; non-zero on failure. 2300 */ 2301static int 2302zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size) 2303{ 2304 size_t rsz; 2305 nvlist_t *vdev_root; 2306 nvlist_t **child; 2307 uint_t count; 2308 char *type; 2309 2310 rsz = 0; 2311 2312 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 2313 &vdev_root) != 0) 2314 return (EZFS_INVALCONFIG); 2315 2316 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 || 2317 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN, 2318 &child, &count) != 0) 2319 return (EZFS_INVALCONFIG); 2320 2321 /* 2322 * root pool can only have a single top-level vdev. 2323 */ 2324 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1) 2325 return (EZFS_POOL_INVALARG); 2326 2327 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz, 2328 B_FALSE); 2329 2330 /* No online devices */ 2331 if (rsz == 0) 2332 return (EZFS_NODEVICE); 2333 2334 return (0); 2335} 2336 2337/* 2338 * Get phys_path for a root pool 2339 * Return 0 on success; non-zero on failure. 2340 */ 2341int 2342zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size) 2343{ 2344 return (zpool_get_config_physpath(zhp->zpool_config, physpath, 2345 phypath_size)); 2346} 2347 2348/* 2349 * If the device has being dynamically expanded then we need to relabel 2350 * the disk to use the new unallocated space. 2351 */ 2352static int 2353zpool_relabel_disk(libzfs_handle_t *hdl, const char *name) 2354{ 2355#ifdef illumos 2356 char path[MAXPATHLEN]; 2357 char errbuf[1024]; 2358 int fd, error; 2359 int (*_efi_use_whole_disk)(int); 2360 2361 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT, 2362 "efi_use_whole_disk")) == NULL) 2363 return (-1); 2364 2365 (void) snprintf(path, sizeof (path), "%s/%s", ZFS_RDISK_ROOT, name); 2366 2367 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 2368 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2369 "relabel '%s': unable to open device"), name); 2370 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 2371 } 2372 2373 /* 2374 * It's possible that we might encounter an error if the device 2375 * does not have any unallocated space left. If so, we simply 2376 * ignore that error and continue on. 2377 */ 2378 error = _efi_use_whole_disk(fd); 2379 (void) close(fd); 2380 if (error && error != VT_ENOSPC) { 2381 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot " 2382 "relabel '%s': unable to read disk capacity"), name); 2383 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 2384 } 2385#endif /* illumos */ 2386 return (0); 2387} 2388 2389/* 2390 * Bring the specified vdev online. The 'flags' parameter is a set of the 2391 * ZFS_ONLINE_* flags. 2392 */ 2393int 2394zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags, 2395 vdev_state_t *newstate) 2396{ 2397 zfs_cmd_t zc = { 0 }; 2398 char msg[1024]; 2399 nvlist_t *tgt; 2400 boolean_t avail_spare, l2cache, islog; 2401 libzfs_handle_t *hdl = zhp->zpool_hdl; 2402 2403 if (flags & ZFS_ONLINE_EXPAND) { 2404 (void) snprintf(msg, sizeof (msg), 2405 dgettext(TEXT_DOMAIN, "cannot expand %s"), path); 2406 } else { 2407 (void) snprintf(msg, sizeof (msg), 2408 dgettext(TEXT_DOMAIN, "cannot online %s"), path); 2409 } 2410 2411 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2412 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2413 &islog)) == NULL) 2414 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2415 2416 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2417 2418 if (avail_spare) 2419 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2420 2421 if (flags & ZFS_ONLINE_EXPAND || 2422 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) { 2423 char *pathname = NULL; 2424 uint64_t wholedisk = 0; 2425 2426 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK, 2427 &wholedisk); 2428 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, 2429 &pathname) == 0); 2430 2431 /* 2432 * XXX - L2ARC 1.0 devices can't support expansion. 2433 */ 2434 if (l2cache) { 2435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2436 "cannot expand cache devices")); 2437 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg)); 2438 } 2439 2440 if (wholedisk) { 2441 pathname += strlen(ZFS_DISK_ROOT) + 1; 2442 (void) zpool_relabel_disk(hdl, pathname); 2443 } 2444 } 2445 2446 zc.zc_cookie = VDEV_STATE_ONLINE; 2447 zc.zc_obj = flags; 2448 2449 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) { 2450 if (errno == EINVAL) { 2451 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split " 2452 "from this pool into a new one. Use '%s' " 2453 "instead"), "zpool detach"); 2454 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg)); 2455 } 2456 return (zpool_standard_error(hdl, errno, msg)); 2457 } 2458 2459 *newstate = zc.zc_cookie; 2460 return (0); 2461} 2462 2463/* 2464 * Take the specified vdev offline 2465 */ 2466int 2467zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp) 2468{ 2469 zfs_cmd_t zc = { 0 }; 2470 char msg[1024]; 2471 nvlist_t *tgt; 2472 boolean_t avail_spare, l2cache; 2473 libzfs_handle_t *hdl = zhp->zpool_hdl; 2474 2475 (void) snprintf(msg, sizeof (msg), 2476 dgettext(TEXT_DOMAIN, "cannot offline %s"), path); 2477 2478 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2479 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2480 NULL)) == NULL) 2481 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2482 2483 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2484 2485 if (avail_spare) 2486 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2487 2488 zc.zc_cookie = VDEV_STATE_OFFLINE; 2489 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0; 2490 2491 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2492 return (0); 2493 2494 switch (errno) { 2495 case EBUSY: 2496 2497 /* 2498 * There are no other replicas of this device. 2499 */ 2500 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2501 2502 case EEXIST: 2503 /* 2504 * The log device has unplayed logs 2505 */ 2506 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg)); 2507 2508 default: 2509 return (zpool_standard_error(hdl, errno, msg)); 2510 } 2511} 2512 2513/* 2514 * Mark the given vdev faulted. 2515 */ 2516int 2517zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2518{ 2519 zfs_cmd_t zc = { 0 }; 2520 char msg[1024]; 2521 libzfs_handle_t *hdl = zhp->zpool_hdl; 2522 2523 (void) snprintf(msg, sizeof (msg), 2524 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid); 2525 2526 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2527 zc.zc_guid = guid; 2528 zc.zc_cookie = VDEV_STATE_FAULTED; 2529 zc.zc_obj = aux; 2530 2531 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2532 return (0); 2533 2534 switch (errno) { 2535 case EBUSY: 2536 2537 /* 2538 * There are no other replicas of this device. 2539 */ 2540 return (zfs_error(hdl, EZFS_NOREPLICAS, msg)); 2541 2542 default: 2543 return (zpool_standard_error(hdl, errno, msg)); 2544 } 2545 2546} 2547 2548/* 2549 * Mark the given vdev degraded. 2550 */ 2551int 2552zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux) 2553{ 2554 zfs_cmd_t zc = { 0 }; 2555 char msg[1024]; 2556 libzfs_handle_t *hdl = zhp->zpool_hdl; 2557 2558 (void) snprintf(msg, sizeof (msg), 2559 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid); 2560 2561 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2562 zc.zc_guid = guid; 2563 zc.zc_cookie = VDEV_STATE_DEGRADED; 2564 zc.zc_obj = aux; 2565 2566 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0) 2567 return (0); 2568 2569 return (zpool_standard_error(hdl, errno, msg)); 2570} 2571 2572/* 2573 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as 2574 * a hot spare. 2575 */ 2576static boolean_t 2577is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which) 2578{ 2579 nvlist_t **child; 2580 uint_t c, children; 2581 char *type; 2582 2583 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child, 2584 &children) == 0) { 2585 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE, 2586 &type) == 0); 2587 2588 if (strcmp(type, VDEV_TYPE_SPARE) == 0 && 2589 children == 2 && child[which] == tgt) 2590 return (B_TRUE); 2591 2592 for (c = 0; c < children; c++) 2593 if (is_replacing_spare(child[c], tgt, which)) 2594 return (B_TRUE); 2595 } 2596 2597 return (B_FALSE); 2598} 2599 2600/* 2601 * Attach new_disk (fully described by nvroot) to old_disk. 2602 * If 'replacing' is specified, the new disk will replace the old one. 2603 */ 2604int 2605zpool_vdev_attach(zpool_handle_t *zhp, 2606 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing) 2607{ 2608 zfs_cmd_t zc = { 0 }; 2609 char msg[1024]; 2610 int ret; 2611 nvlist_t *tgt; 2612 boolean_t avail_spare, l2cache, islog; 2613 uint64_t val; 2614 char *newname; 2615 nvlist_t **child; 2616 uint_t children; 2617 nvlist_t *config_root; 2618 libzfs_handle_t *hdl = zhp->zpool_hdl; 2619 boolean_t rootpool = zpool_is_bootable(zhp); 2620 2621 if (replacing) 2622 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2623 "cannot replace %s with %s"), old_disk, new_disk); 2624 else 2625 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN, 2626 "cannot attach %s to %s"), new_disk, old_disk); 2627 2628 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2629 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache, 2630 &islog)) == 0) 2631 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2632 2633 if (avail_spare) 2634 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2635 2636 if (l2cache) 2637 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2638 2639 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2640 zc.zc_cookie = replacing; 2641 2642 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 2643 &child, &children) != 0 || children != 1) { 2644 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2645 "new device must be a single disk")); 2646 return (zfs_error(hdl, EZFS_INVALCONFIG, msg)); 2647 } 2648 2649 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), 2650 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); 2651 2652 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) 2653 return (-1); 2654 2655 /* 2656 * If the target is a hot spare that has been swapped in, we can only 2657 * replace it with another hot spare. 2658 */ 2659 if (replacing && 2660 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 && 2661 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache, 2662 NULL) == NULL || !avail_spare) && 2663 is_replacing_spare(config_root, tgt, 1)) { 2664 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2665 "can only be replaced by another hot spare")); 2666 free(newname); 2667 return (zfs_error(hdl, EZFS_BADTARGET, msg)); 2668 } 2669 2670 free(newname); 2671 2672 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0) 2673 return (-1); 2674 2675 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc); 2676 2677 zcmd_free_nvlists(&zc); 2678 2679 if (ret == 0) { 2680 if (rootpool) { 2681 /* 2682 * XXX need a better way to prevent user from 2683 * booting up a half-baked vdev. 2684 */ 2685 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make " 2686 "sure to wait until resilver is done " 2687 "before rebooting.\n")); 2688 (void) fprintf(stderr, "\n"); 2689 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "If " 2690 "you boot from pool '%s', you may need to update\n" 2691 "boot code on newly attached disk '%s'.\n\n" 2692 "Assuming you use GPT partitioning and 'da0' is " 2693 "your new boot disk\n" 2694 "you may use the following command:\n\n" 2695 "\tgpart bootcode -b /boot/pmbr -p " 2696 "/boot/gptzfsboot -i 1 da0\n\n"), 2697 zhp->zpool_name, new_disk); 2698 } 2699 return (0); 2700 } 2701 2702 switch (errno) { 2703 case ENOTSUP: 2704 /* 2705 * Can't attach to or replace this type of vdev. 2706 */ 2707 if (replacing) { 2708 uint64_t version = zpool_get_prop_int(zhp, 2709 ZPOOL_PROP_VERSION, NULL); 2710 2711 if (islog) 2712 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2713 "cannot replace a log with a spare")); 2714 else if (version >= SPA_VERSION_MULTI_REPLACE) 2715 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2716 "already in replacing/spare config; wait " 2717 "for completion or use 'zpool detach'")); 2718 else 2719 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2720 "cannot replace a replacing device")); 2721 } else { 2722 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2723 "can only attach to mirrors and top-level " 2724 "disks")); 2725 } 2726 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2727 break; 2728 2729 case EINVAL: 2730 /* 2731 * The new device must be a single disk. 2732 */ 2733 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2734 "new device must be a single disk")); 2735 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg); 2736 break; 2737 2738 case EBUSY: 2739 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"), 2740 new_disk); 2741 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2742 break; 2743 2744 case EOVERFLOW: 2745 /* 2746 * The new device is too small. 2747 */ 2748 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2749 "device is too small")); 2750 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2751 break; 2752 2753 case EDOM: 2754 /* 2755 * The new device has a different alignment requirement. 2756 */ 2757 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2758 "devices have different sector alignment")); 2759 (void) zfs_error(hdl, EZFS_BADDEV, msg); 2760 break; 2761 2762 case ENAMETOOLONG: 2763 /* 2764 * The resulting top-level vdev spec won't fit in the label. 2765 */ 2766 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg); 2767 break; 2768 2769 default: 2770 (void) zpool_standard_error(hdl, errno, msg); 2771 } 2772 2773 return (-1); 2774} 2775 2776/* 2777 * Detach the specified device. 2778 */ 2779int 2780zpool_vdev_detach(zpool_handle_t *zhp, const char *path) 2781{ 2782 zfs_cmd_t zc = { 0 }; 2783 char msg[1024]; 2784 nvlist_t *tgt; 2785 boolean_t avail_spare, l2cache; 2786 libzfs_handle_t *hdl = zhp->zpool_hdl; 2787 2788 (void) snprintf(msg, sizeof (msg), 2789 dgettext(TEXT_DOMAIN, "cannot detach %s"), path); 2790 2791 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 2792 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 2793 NULL)) == 0) 2794 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 2795 2796 if (avail_spare) 2797 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 2798 2799 if (l2cache) 2800 return (zfs_error(hdl, EZFS_ISL2CACHE, msg)); 2801 2802 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 2803 2804 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0) 2805 return (0); 2806 2807 switch (errno) { 2808 2809 case ENOTSUP: 2810 /* 2811 * Can't detach from this type of vdev. 2812 */ 2813 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only " 2814 "applicable to mirror and replacing vdevs")); 2815 (void) zfs_error(hdl, EZFS_BADTARGET, msg); 2816 break; 2817 2818 case EBUSY: 2819 /* 2820 * There are no other replicas of this device. 2821 */ 2822 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg); 2823 break; 2824 2825 default: 2826 (void) zpool_standard_error(hdl, errno, msg); 2827 } 2828 2829 return (-1); 2830} 2831 2832/* 2833 * Find a mirror vdev in the source nvlist. 2834 * 2835 * The mchild array contains a list of disks in one of the top-level mirrors 2836 * of the source pool. The schild array contains a list of disks that the 2837 * user specified on the command line. We loop over the mchild array to 2838 * see if any entry in the schild array matches. 2839 * 2840 * If a disk in the mchild array is found in the schild array, we return 2841 * the index of that entry. Otherwise we return -1. 2842 */ 2843static int 2844find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, 2845 nvlist_t **schild, uint_t schildren) 2846{ 2847 uint_t mc; 2848 2849 for (mc = 0; mc < mchildren; mc++) { 2850 uint_t sc; 2851 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2852 mchild[mc], B_FALSE); 2853 2854 for (sc = 0; sc < schildren; sc++) { 2855 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, 2856 schild[sc], B_FALSE); 2857 boolean_t result = (strcmp(mpath, spath) == 0); 2858 2859 free(spath); 2860 if (result) { 2861 free(mpath); 2862 return (mc); 2863 } 2864 } 2865 2866 free(mpath); 2867 } 2868 2869 return (-1); 2870} 2871 2872/* 2873 * Split a mirror pool. If newroot points to null, then a new nvlist 2874 * is generated and it is the responsibility of the caller to free it. 2875 */ 2876int 2877zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot, 2878 nvlist_t *props, splitflags_t flags) 2879{ 2880 zfs_cmd_t zc = { 0 }; 2881 char msg[1024]; 2882 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL; 2883 nvlist_t **varray = NULL, *zc_props = NULL; 2884 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0; 2885 libzfs_handle_t *hdl = zhp->zpool_hdl; 2886 uint64_t vers; 2887 boolean_t freelist = B_FALSE, memory_err = B_TRUE; 2888 int retval = 0; 2889 2890 (void) snprintf(msg, sizeof (msg), 2891 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name); 2892 2893 if (!zpool_name_valid(hdl, B_FALSE, newname)) 2894 return (zfs_error(hdl, EZFS_INVALIDNAME, msg)); 2895 2896 if ((config = zpool_get_config(zhp, NULL)) == NULL) { 2897 (void) fprintf(stderr, gettext("Internal error: unable to " 2898 "retrieve pool configuration\n")); 2899 return (-1); 2900 } 2901 2902 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) 2903 == 0); 2904 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0); 2905 2906 if (props) { 2907 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE }; 2908 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name, 2909 props, vers, flags, msg)) == NULL) 2910 return (-1); 2911 } 2912 2913 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child, 2914 &children) != 0) { 2915 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2916 "Source pool is missing vdev tree")); 2917 nvlist_free(zc_props); 2918 return (-1); 2919 } 2920 2921 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *)); 2922 vcount = 0; 2923 2924 if (*newroot == NULL || 2925 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, 2926 &newchild, &newchildren) != 0) 2927 newchildren = 0; 2928 2929 for (c = 0; c < children; c++) { 2930 uint64_t is_log = B_FALSE, is_hole = B_FALSE; 2931 char *type; 2932 nvlist_t **mchild, *vdev; 2933 uint_t mchildren; 2934 int entry; 2935 2936 /* 2937 * Unlike cache & spares, slogs are stored in the 2938 * ZPOOL_CONFIG_CHILDREN array. We filter them out here. 2939 */ 2940 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG, 2941 &is_log); 2942 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 2943 &is_hole); 2944 if (is_log || is_hole) { 2945 /* 2946 * Create a hole vdev and put it in the config. 2947 */ 2948 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0) 2949 goto out; 2950 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE, 2951 VDEV_TYPE_HOLE) != 0) 2952 goto out; 2953 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE, 2954 1) != 0) 2955 goto out; 2956 if (lastlog == 0) 2957 lastlog = vcount; 2958 varray[vcount++] = vdev; 2959 continue; 2960 } 2961 lastlog = 0; 2962 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type) 2963 == 0); 2964 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) { 2965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 2966 "Source pool must be composed only of mirrors\n")); 2967 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2968 goto out; 2969 } 2970 2971 verify(nvlist_lookup_nvlist_array(child[c], 2972 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0); 2973 2974 /* find or add an entry for this top-level vdev */ 2975 if (newchildren > 0 && 2976 (entry = find_vdev_entry(zhp, mchild, mchildren, 2977 newchild, newchildren)) >= 0) { 2978 /* We found a disk that the user specified. */ 2979 vdev = mchild[entry]; 2980 ++found; 2981 } else { 2982 /* User didn't specify a disk for this vdev. */ 2983 vdev = mchild[mchildren - 1]; 2984 } 2985 2986 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0) 2987 goto out; 2988 } 2989 2990 /* did we find every disk the user specified? */ 2991 if (found != newchildren) { 2992 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must " 2993 "include at most one disk from each mirror")); 2994 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg); 2995 goto out; 2996 } 2997 2998 /* Prepare the nvlist for populating. */ 2999 if (*newroot == NULL) { 3000 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0) 3001 goto out; 3002 freelist = B_TRUE; 3003 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE, 3004 VDEV_TYPE_ROOT) != 0) 3005 goto out; 3006 } else { 3007 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0); 3008 } 3009 3010 /* Add all the children we found */ 3011 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray, 3012 lastlog == 0 ? vcount : lastlog) != 0) 3013 goto out; 3014 3015 /* 3016 * If we're just doing a dry run, exit now with success. 3017 */ 3018 if (flags.dryrun) { 3019 memory_err = B_FALSE; 3020 freelist = B_FALSE; 3021 goto out; 3022 } 3023 3024 /* now build up the config list & call the ioctl */ 3025 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0) 3026 goto out; 3027 3028 if (nvlist_add_nvlist(newconfig, 3029 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 || 3030 nvlist_add_string(newconfig, 3031 ZPOOL_CONFIG_POOL_NAME, newname) != 0 || 3032 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0) 3033 goto out; 3034 3035 /* 3036 * The new pool is automatically part of the namespace unless we 3037 * explicitly export it. 3038 */ 3039 if (!flags.import) 3040 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT; 3041 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3042 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string)); 3043 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0) 3044 goto out; 3045 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0) 3046 goto out; 3047 3048 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) { 3049 retval = zpool_standard_error(hdl, errno, msg); 3050 goto out; 3051 } 3052 3053 freelist = B_FALSE; 3054 memory_err = B_FALSE; 3055 3056out: 3057 if (varray != NULL) { 3058 int v; 3059 3060 for (v = 0; v < vcount; v++) 3061 nvlist_free(varray[v]); 3062 free(varray); 3063 } 3064 zcmd_free_nvlists(&zc); 3065 nvlist_free(zc_props); 3066 nvlist_free(newconfig); 3067 if (freelist) { 3068 nvlist_free(*newroot); 3069 *newroot = NULL; 3070 } 3071 3072 if (retval != 0) 3073 return (retval); 3074 3075 if (memory_err) 3076 return (no_memory(hdl)); 3077 3078 return (0); 3079} 3080 3081/* 3082 * Remove the given device. Currently, this is supported only for hot spares 3083 * and level 2 cache devices. 3084 */ 3085int 3086zpool_vdev_remove(zpool_handle_t *zhp, const char *path) 3087{ 3088 zfs_cmd_t zc = { 0 }; 3089 char msg[1024]; 3090 nvlist_t *tgt; 3091 boolean_t avail_spare, l2cache, islog; 3092 libzfs_handle_t *hdl = zhp->zpool_hdl; 3093 uint64_t version; 3094 3095 (void) snprintf(msg, sizeof (msg), 3096 dgettext(TEXT_DOMAIN, "cannot remove %s"), path); 3097 3098 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3099 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache, 3100 &islog)) == 0) 3101 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3102 /* 3103 * XXX - this should just go away. 3104 */ 3105 if (!avail_spare && !l2cache && !islog) { 3106 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3107 "only inactive hot spares, cache, top-level, " 3108 "or log devices can be removed")); 3109 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3110 } 3111 3112 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL); 3113 if (islog && version < SPA_VERSION_HOLES) { 3114 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3115 "pool must be upgrade to support log removal")); 3116 return (zfs_error(hdl, EZFS_BADVERSION, msg)); 3117 } 3118 3119 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0); 3120 3121 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0) 3122 return (0); 3123 3124 return (zpool_standard_error(hdl, errno, msg)); 3125} 3126 3127/* 3128 * Clear the errors for the pool, or the particular device if specified. 3129 */ 3130int 3131zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl) 3132{ 3133 zfs_cmd_t zc = { 0 }; 3134 char msg[1024]; 3135 nvlist_t *tgt; 3136 zpool_rewind_policy_t policy; 3137 boolean_t avail_spare, l2cache; 3138 libzfs_handle_t *hdl = zhp->zpool_hdl; 3139 nvlist_t *nvi = NULL; 3140 int error; 3141 3142 if (path) 3143 (void) snprintf(msg, sizeof (msg), 3144 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3145 path); 3146 else 3147 (void) snprintf(msg, sizeof (msg), 3148 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"), 3149 zhp->zpool_name); 3150 3151 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3152 if (path) { 3153 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, 3154 &l2cache, NULL)) == 0) 3155 return (zfs_error(hdl, EZFS_NODEVICE, msg)); 3156 3157 /* 3158 * Don't allow error clearing for hot spares. Do allow 3159 * error clearing for l2cache devices. 3160 */ 3161 if (avail_spare) 3162 return (zfs_error(hdl, EZFS_ISSPARE, msg)); 3163 3164 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, 3165 &zc.zc_guid) == 0); 3166 } 3167 3168 zpool_get_rewind_policy(rewindnvl, &policy); 3169 zc.zc_cookie = policy.zrp_request; 3170 3171 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0) 3172 return (-1); 3173 3174 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0) 3175 return (-1); 3176 3177 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 && 3178 errno == ENOMEM) { 3179 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) { 3180 zcmd_free_nvlists(&zc); 3181 return (-1); 3182 } 3183 } 3184 3185 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) && 3186 errno != EPERM && errno != EACCES)) { 3187 if (policy.zrp_request & 3188 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) { 3189 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi); 3190 zpool_rewind_exclaim(hdl, zc.zc_name, 3191 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), 3192 nvi); 3193 nvlist_free(nvi); 3194 } 3195 zcmd_free_nvlists(&zc); 3196 return (0); 3197 } 3198 3199 zcmd_free_nvlists(&zc); 3200 return (zpool_standard_error(hdl, errno, msg)); 3201} 3202 3203/* 3204 * Similar to zpool_clear(), but takes a GUID (used by fmd). 3205 */ 3206int 3207zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid) 3208{ 3209 zfs_cmd_t zc = { 0 }; 3210 char msg[1024]; 3211 libzfs_handle_t *hdl = zhp->zpool_hdl; 3212 3213 (void) snprintf(msg, sizeof (msg), 3214 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"), 3215 guid); 3216 3217 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3218 zc.zc_guid = guid; 3219 zc.zc_cookie = ZPOOL_NO_REWIND; 3220 3221 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0) 3222 return (0); 3223 3224 return (zpool_standard_error(hdl, errno, msg)); 3225} 3226 3227/* 3228 * Change the GUID for a pool. 3229 */ 3230int 3231zpool_reguid(zpool_handle_t *zhp) 3232{ 3233 char msg[1024]; 3234 libzfs_handle_t *hdl = zhp->zpool_hdl; 3235 zfs_cmd_t zc = { 0 }; 3236 3237 (void) snprintf(msg, sizeof (msg), 3238 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name); 3239 3240 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3241 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0) 3242 return (0); 3243 3244 return (zpool_standard_error(hdl, errno, msg)); 3245} 3246 3247/* 3248 * Reopen the pool. 3249 */ 3250int 3251zpool_reopen(zpool_handle_t *zhp) 3252{ 3253 zfs_cmd_t zc = { 0 }; 3254 char msg[1024]; 3255 libzfs_handle_t *hdl = zhp->zpool_hdl; 3256 3257 (void) snprintf(msg, sizeof (msg), 3258 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), 3259 zhp->zpool_name); 3260 3261 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3262 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0) 3263 return (0); 3264 return (zpool_standard_error(hdl, errno, msg)); 3265} 3266 3267/* 3268 * Convert from a devid string to a path. 3269 */ 3270static char * 3271devid_to_path(char *devid_str) 3272{ 3273 ddi_devid_t devid; 3274 char *minor; 3275 char *path; 3276 devid_nmlist_t *list = NULL; 3277 int ret; 3278 3279 if (devid_str_decode(devid_str, &devid, &minor) != 0) 3280 return (NULL); 3281 3282 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list); 3283 3284 devid_str_free(minor); 3285 devid_free(devid); 3286 3287 if (ret != 0) 3288 return (NULL); 3289 3290 /* 3291 * In a case the strdup() fails, we will just return NULL below. 3292 */ 3293 path = strdup(list[0].devname); 3294 3295 devid_free_nmlist(list); 3296 3297 return (path); 3298} 3299 3300/* 3301 * Convert from a path to a devid string. 3302 */ 3303static char * 3304path_to_devid(const char *path) 3305{ 3306#ifdef have_devid 3307 int fd; 3308 ddi_devid_t devid; 3309 char *minor, *ret; 3310 3311 if ((fd = open(path, O_RDONLY)) < 0) 3312 return (NULL); 3313 3314 minor = NULL; 3315 ret = NULL; 3316 if (devid_get(fd, &devid) == 0) { 3317 if (devid_get_minor_name(fd, &minor) == 0) 3318 ret = devid_str_encode(devid, minor); 3319 if (minor != NULL) 3320 devid_str_free(minor); 3321 devid_free(devid); 3322 } 3323 (void) close(fd); 3324 3325 return (ret); 3326#else 3327 return (NULL); 3328#endif 3329} 3330 3331/* 3332 * Issue the necessary ioctl() to update the stored path value for the vdev. We 3333 * ignore any failure here, since a common case is for an unprivileged user to 3334 * type 'zpool status', and we'll display the correct information anyway. 3335 */ 3336static void 3337set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path) 3338{ 3339 zfs_cmd_t zc = { 0 }; 3340 3341 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3342 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value)); 3343 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3344 &zc.zc_guid) == 0); 3345 3346 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc); 3347} 3348 3349/* 3350 * Given a vdev, return the name to display in iostat. If the vdev has a path, 3351 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type. 3352 * We also check if this is a whole disk, in which case we strip off the 3353 * trailing 's0' slice name. 3354 * 3355 * This routine is also responsible for identifying when disks have been 3356 * reconfigured in a new location. The kernel will have opened the device by 3357 * devid, but the path will still refer to the old location. To catch this, we 3358 * first do a path -> devid translation (which is fast for the common case). If 3359 * the devid matches, we're done. If not, we do a reverse devid -> path 3360 * translation and issue the appropriate ioctl() to update the path of the vdev. 3361 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any 3362 * of these checks. 3363 */ 3364char * 3365zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, 3366 boolean_t verbose) 3367{ 3368 char *path, *devid; 3369 uint64_t value; 3370 char buf[64]; 3371 vdev_stat_t *vs; 3372 uint_t vsc; 3373 int have_stats; 3374 int have_path; 3375 3376 have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, 3377 (uint64_t **)&vs, &vsc) == 0; 3378 have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0; 3379 3380 /* 3381 * If the device is not currently present, assume it will not 3382 * come back at the same device path. Display the device by GUID. 3383 */ 3384 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 || 3385 have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) { 3386 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, 3387 &value) == 0); 3388 (void) snprintf(buf, sizeof (buf), "%llu", 3389 (u_longlong_t)value); 3390 path = buf; 3391 } else if (have_path) { 3392 3393 /* 3394 * If the device is dead (faulted, offline, etc) then don't 3395 * bother opening it. Otherwise we may be forcing the user to 3396 * open a misbehaving device, which can have undesirable 3397 * effects. 3398 */ 3399 if ((have_stats == 0 || 3400 vs->vs_state >= VDEV_STATE_DEGRADED) && 3401 zhp != NULL && 3402 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) { 3403 /* 3404 * Determine if the current path is correct. 3405 */ 3406 char *newdevid = path_to_devid(path); 3407 3408 if (newdevid == NULL || 3409 strcmp(devid, newdevid) != 0) { 3410 char *newpath; 3411 3412 if ((newpath = devid_to_path(devid)) != NULL) { 3413 /* 3414 * Update the path appropriately. 3415 */ 3416 set_path(zhp, nv, newpath); 3417 if (nvlist_add_string(nv, 3418 ZPOOL_CONFIG_PATH, newpath) == 0) 3419 verify(nvlist_lookup_string(nv, 3420 ZPOOL_CONFIG_PATH, 3421 &path) == 0); 3422 free(newpath); 3423 } 3424 } 3425 3426 if (newdevid) 3427 devid_str_free(newdevid); 3428 } 3429 3430#ifdef illumos 3431 if (strncmp(path, ZFS_DISK_ROOTD, strlen(ZFS_DISK_ROOTD)) == 0) 3432 path += strlen(ZFS_DISK_ROOTD); 3433 3434 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, 3435 &value) == 0 && value) { 3436 int pathlen = strlen(path); 3437 char *tmp = zfs_strdup(hdl, path); 3438 3439 /* 3440 * If it starts with c#, and ends with "s0", chop 3441 * the "s0" off, or if it ends with "s0/old", remove 3442 * the "s0" from the middle. 3443 */ 3444 if (CTD_CHECK(tmp)) { 3445 if (strcmp(&tmp[pathlen - 2], "s0") == 0) { 3446 tmp[pathlen - 2] = '\0'; 3447 } else if (pathlen > 6 && 3448 strcmp(&tmp[pathlen - 6], "s0/old") == 0) { 3449 (void) strcpy(&tmp[pathlen - 6], 3450 "/old"); 3451 } 3452 } 3453 return (tmp); 3454 } 3455#else /* !illumos */ 3456 if (strncmp(path, _PATH_DEV, sizeof(_PATH_DEV) - 1) == 0) 3457 path += sizeof(_PATH_DEV) - 1; 3458#endif /* illumos */ 3459 } else { 3460 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0); 3461 3462 /* 3463 * If it's a raidz device, we need to stick in the parity level. 3464 */ 3465 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) { 3466 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY, 3467 &value) == 0); 3468 (void) snprintf(buf, sizeof (buf), "%s%llu", path, 3469 (u_longlong_t)value); 3470 path = buf; 3471 } 3472 3473 /* 3474 * We identify each top-level vdev by using a <type-id> 3475 * naming convention. 3476 */ 3477 if (verbose) { 3478 uint64_t id; 3479 3480 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, 3481 &id) == 0); 3482 (void) snprintf(buf, sizeof (buf), "%s-%llu", path, 3483 (u_longlong_t)id); 3484 path = buf; 3485 } 3486 } 3487 3488 return (zfs_strdup(hdl, path)); 3489} 3490 3491static int 3492zbookmark_mem_compare(const void *a, const void *b) 3493{ 3494 return (memcmp(a, b, sizeof (zbookmark_phys_t))); 3495} 3496 3497/* 3498 * Retrieve the persistent error log, uniquify the members, and return to the 3499 * caller. 3500 */ 3501int 3502zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp) 3503{ 3504 zfs_cmd_t zc = { 0 }; 3505 uint64_t count; 3506 zbookmark_phys_t *zb = NULL; 3507 int i; 3508 3509 /* 3510 * Retrieve the raw error list from the kernel. If the number of errors 3511 * has increased, allocate more space and continue until we get the 3512 * entire list. 3513 */ 3514 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT, 3515 &count) == 0); 3516 if (count == 0) 3517 return (0); 3518 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl, 3519 count * sizeof (zbookmark_phys_t))) == (uintptr_t)NULL) 3520 return (-1); 3521 zc.zc_nvlist_dst_size = count; 3522 (void) strcpy(zc.zc_name, zhp->zpool_name); 3523 for (;;) { 3524 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG, 3525 &zc) != 0) { 3526 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3527 if (errno == ENOMEM) { 3528 void *dst; 3529 3530 count = zc.zc_nvlist_dst_size; 3531 dst = zfs_alloc(zhp->zpool_hdl, count * 3532 sizeof (zbookmark_phys_t)); 3533 if (dst == NULL) 3534 return (-1); 3535 zc.zc_nvlist_dst = (uintptr_t)dst; 3536 } else { 3537 return (-1); 3538 } 3539 } else { 3540 break; 3541 } 3542 } 3543 3544 /* 3545 * Sort the resulting bookmarks. This is a little confusing due to the 3546 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last 3547 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks 3548 * _not_ copied as part of the process. So we point the start of our 3549 * array appropriate and decrement the total number of elements. 3550 */ 3551 zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) + 3552 zc.zc_nvlist_dst_size; 3553 count -= zc.zc_nvlist_dst_size; 3554 3555 qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare); 3556 3557 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0); 3558 3559 /* 3560 * Fill in the nverrlistp with nvlist's of dataset and object numbers. 3561 */ 3562 for (i = 0; i < count; i++) { 3563 nvlist_t *nv; 3564 3565 /* ignoring zb_blkid and zb_level for now */ 3566 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset && 3567 zb[i-1].zb_object == zb[i].zb_object) 3568 continue; 3569 3570 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0) 3571 goto nomem; 3572 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET, 3573 zb[i].zb_objset) != 0) { 3574 nvlist_free(nv); 3575 goto nomem; 3576 } 3577 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT, 3578 zb[i].zb_object) != 0) { 3579 nvlist_free(nv); 3580 goto nomem; 3581 } 3582 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) { 3583 nvlist_free(nv); 3584 goto nomem; 3585 } 3586 nvlist_free(nv); 3587 } 3588 3589 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3590 return (0); 3591 3592nomem: 3593 free((void *)(uintptr_t)zc.zc_nvlist_dst); 3594 return (no_memory(zhp->zpool_hdl)); 3595} 3596 3597/* 3598 * Upgrade a ZFS pool to the latest on-disk version. 3599 */ 3600int 3601zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version) 3602{ 3603 zfs_cmd_t zc = { 0 }; 3604 libzfs_handle_t *hdl = zhp->zpool_hdl; 3605 3606 (void) strcpy(zc.zc_name, zhp->zpool_name); 3607 zc.zc_cookie = new_version; 3608 3609 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0) 3610 return (zpool_standard_error_fmt(hdl, errno, 3611 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"), 3612 zhp->zpool_name)); 3613 return (0); 3614} 3615 3616void 3617zfs_save_arguments(int argc, char **argv, char *string, int len) 3618{ 3619 (void) strlcpy(string, basename(argv[0]), len); 3620 for (int i = 1; i < argc; i++) { 3621 (void) strlcat(string, " ", len); 3622 (void) strlcat(string, argv[i], len); 3623 } 3624} 3625 3626int 3627zpool_log_history(libzfs_handle_t *hdl, const char *message) 3628{ 3629 zfs_cmd_t zc = { 0 }; 3630 nvlist_t *args; 3631 int err; 3632 3633 args = fnvlist_alloc(); 3634 fnvlist_add_string(args, "message", message); 3635 err = zcmd_write_src_nvlist(hdl, &zc, args); 3636 if (err == 0) 3637 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc); 3638 nvlist_free(args); 3639 zcmd_free_nvlists(&zc); 3640 return (err); 3641} 3642 3643/* 3644 * Perform ioctl to get some command history of a pool. 3645 * 3646 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the 3647 * logical offset of the history buffer to start reading from. 3648 * 3649 * Upon return, 'off' is the next logical offset to read from and 3650 * 'len' is the actual amount of bytes read into 'buf'. 3651 */ 3652static int 3653get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len) 3654{ 3655 zfs_cmd_t zc = { 0 }; 3656 libzfs_handle_t *hdl = zhp->zpool_hdl; 3657 3658 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3659 3660 zc.zc_history = (uint64_t)(uintptr_t)buf; 3661 zc.zc_history_len = *len; 3662 zc.zc_history_offset = *off; 3663 3664 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) { 3665 switch (errno) { 3666 case EPERM: 3667 return (zfs_error_fmt(hdl, EZFS_PERM, 3668 dgettext(TEXT_DOMAIN, 3669 "cannot show history for pool '%s'"), 3670 zhp->zpool_name)); 3671 case ENOENT: 3672 return (zfs_error_fmt(hdl, EZFS_NOHISTORY, 3673 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3674 "'%s'"), zhp->zpool_name)); 3675 case ENOTSUP: 3676 return (zfs_error_fmt(hdl, EZFS_BADVERSION, 3677 dgettext(TEXT_DOMAIN, "cannot get history for pool " 3678 "'%s', pool must be upgraded"), zhp->zpool_name)); 3679 default: 3680 return (zpool_standard_error_fmt(hdl, errno, 3681 dgettext(TEXT_DOMAIN, 3682 "cannot get history for '%s'"), zhp->zpool_name)); 3683 } 3684 } 3685 3686 *len = zc.zc_history_len; 3687 *off = zc.zc_history_offset; 3688 3689 return (0); 3690} 3691 3692/* 3693 * Process the buffer of nvlists, unpacking and storing each nvlist record 3694 * into 'records'. 'leftover' is set to the number of bytes that weren't 3695 * processed as there wasn't a complete record. 3696 */ 3697int 3698zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover, 3699 nvlist_t ***records, uint_t *numrecords) 3700{ 3701 uint64_t reclen; 3702 nvlist_t *nv; 3703 int i; 3704 3705 while (bytes_read > sizeof (reclen)) { 3706 3707 /* get length of packed record (stored as little endian) */ 3708 for (i = 0, reclen = 0; i < sizeof (reclen); i++) 3709 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i); 3710 3711 if (bytes_read < sizeof (reclen) + reclen) 3712 break; 3713 3714 /* unpack record */ 3715 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0) 3716 return (ENOMEM); 3717 bytes_read -= sizeof (reclen) + reclen; 3718 buf += sizeof (reclen) + reclen; 3719 3720 /* add record to nvlist array */ 3721 (*numrecords)++; 3722 if (ISP2(*numrecords + 1)) { 3723 *records = realloc(*records, 3724 *numrecords * 2 * sizeof (nvlist_t *)); 3725 } 3726 (*records)[*numrecords - 1] = nv; 3727 } 3728 3729 *leftover = bytes_read; 3730 return (0); 3731} 3732 3733/* from spa_history.c: spa_history_create_obj() */ 3734#define HIS_BUF_LEN_DEF (128 << 10) 3735#define HIS_BUF_LEN_MAX (1 << 30) 3736 3737/* 3738 * Retrieve the command history of a pool. 3739 */ 3740int 3741zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp) 3742{ 3743 char *buf; 3744 uint64_t buflen = HIS_BUF_LEN_DEF; 3745 uint64_t off = 0; 3746 nvlist_t **records = NULL; 3747 uint_t numrecords = 0; 3748 int err, i; 3749 3750 buf = malloc(buflen); 3751 if (buf == NULL) 3752 return (ENOMEM); 3753 do { 3754 uint64_t bytes_read = buflen; 3755 uint64_t leftover; 3756 3757 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0) 3758 break; 3759 3760 /* if nothing else was read in, we're at EOF, just return */ 3761 if (bytes_read == 0) 3762 break; 3763 3764 if ((err = zpool_history_unpack(buf, bytes_read, 3765 &leftover, &records, &numrecords)) != 0) 3766 break; 3767 off -= leftover; 3768 if (leftover == bytes_read) { 3769 /* 3770 * no progress made, because buffer is not big enough 3771 * to hold this record; resize and retry. 3772 */ 3773 buflen *= 2; 3774 free(buf); 3775 buf = NULL; 3776 if ((buflen >= HIS_BUF_LEN_MAX) || 3777 ((buf = malloc(buflen)) == NULL)) { 3778 err = ENOMEM; 3779 break; 3780 } 3781 } 3782 3783 /* CONSTCOND */ 3784 } while (1); 3785 3786 free(buf); 3787 3788 if (!err) { 3789 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0); 3790 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD, 3791 records, numrecords) == 0); 3792 } 3793 for (i = 0; i < numrecords; i++) 3794 nvlist_free(records[i]); 3795 free(records); 3796 3797 return (err); 3798} 3799 3800void 3801zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj, 3802 char *pathname, size_t len) 3803{ 3804 zfs_cmd_t zc = { 0 }; 3805 boolean_t mounted = B_FALSE; 3806 char *mntpnt = NULL; 3807 char dsname[ZFS_MAX_DATASET_NAME_LEN]; 3808 3809 if (dsobj == 0) { 3810 /* special case for the MOS */ 3811 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj); 3812 return; 3813 } 3814 3815 /* get the dataset's name */ 3816 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name)); 3817 zc.zc_obj = dsobj; 3818 if (ioctl(zhp->zpool_hdl->libzfs_fd, 3819 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) { 3820 /* just write out a path of two object numbers */ 3821 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>", 3822 dsobj, obj); 3823 return; 3824 } 3825 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname)); 3826 3827 /* find out if the dataset is mounted */ 3828 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt); 3829 3830 /* get the corrupted object's path */ 3831 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name)); 3832 zc.zc_obj = obj; 3833 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH, 3834 &zc) == 0) { 3835 if (mounted) { 3836 (void) snprintf(pathname, len, "%s%s", mntpnt, 3837 zc.zc_value); 3838 } else { 3839 (void) snprintf(pathname, len, "%s:%s", 3840 dsname, zc.zc_value); 3841 } 3842 } else { 3843 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj); 3844 } 3845 free(mntpnt); 3846} 3847 3848#ifdef illumos 3849/* 3850 * Read the EFI label from the config, if a label does not exist then 3851 * pass back the error to the caller. If the caller has passed a non-NULL 3852 * diskaddr argument then we set it to the starting address of the EFI 3853 * partition. 3854 */ 3855static int 3856read_efi_label(nvlist_t *config, diskaddr_t *sb) 3857{ 3858 char *path; 3859 int fd; 3860 char diskname[MAXPATHLEN]; 3861 int err = -1; 3862 3863 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0) 3864 return (err); 3865 3866 (void) snprintf(diskname, sizeof (diskname), "%s%s", ZFS_RDISK_ROOT, 3867 strrchr(path, '/')); 3868 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) { 3869 struct dk_gpt *vtoc; 3870 3871 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) { 3872 if (sb != NULL) 3873 *sb = vtoc->efi_parts[0].p_start; 3874 efi_free(vtoc); 3875 } 3876 (void) close(fd); 3877 } 3878 return (err); 3879} 3880 3881/* 3882 * determine where a partition starts on a disk in the current 3883 * configuration 3884 */ 3885static diskaddr_t 3886find_start_block(nvlist_t *config) 3887{ 3888 nvlist_t **child; 3889 uint_t c, children; 3890 diskaddr_t sb = MAXOFFSET_T; 3891 uint64_t wholedisk; 3892 3893 if (nvlist_lookup_nvlist_array(config, 3894 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) { 3895 if (nvlist_lookup_uint64(config, 3896 ZPOOL_CONFIG_WHOLE_DISK, 3897 &wholedisk) != 0 || !wholedisk) { 3898 return (MAXOFFSET_T); 3899 } 3900 if (read_efi_label(config, &sb) < 0) 3901 sb = MAXOFFSET_T; 3902 return (sb); 3903 } 3904 3905 for (c = 0; c < children; c++) { 3906 sb = find_start_block(child[c]); 3907 if (sb != MAXOFFSET_T) { 3908 return (sb); 3909 } 3910 } 3911 return (MAXOFFSET_T); 3912} 3913#endif /* illumos */ 3914 3915/* 3916 * Label an individual disk. The name provided is the short name, 3917 * stripped of any leading /dev path. 3918 */ 3919int 3920zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, const char *name) 3921{ 3922#ifdef illumos 3923 char path[MAXPATHLEN]; 3924 struct dk_gpt *vtoc; 3925 int fd; 3926 size_t resv = EFI_MIN_RESV_SIZE; 3927 uint64_t slice_size; 3928 diskaddr_t start_block; 3929 char errbuf[1024]; 3930 3931 /* prepare an error message just in case */ 3932 (void) snprintf(errbuf, sizeof (errbuf), 3933 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name); 3934 3935 if (zhp) { 3936 nvlist_t *nvroot; 3937 3938 verify(nvlist_lookup_nvlist(zhp->zpool_config, 3939 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3940 3941 if (zhp->zpool_start_block == 0) 3942 start_block = find_start_block(nvroot); 3943 else 3944 start_block = zhp->zpool_start_block; 3945 zhp->zpool_start_block = start_block; 3946 } else { 3947 /* new pool */ 3948 start_block = NEW_START_BLOCK; 3949 } 3950 3951 (void) snprintf(path, sizeof (path), "%s/%s%s", ZFS_RDISK_ROOT, name, 3952 BACKUP_SLICE); 3953 3954 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) { 3955 /* 3956 * This shouldn't happen. We've long since verified that this 3957 * is a valid device. 3958 */ 3959 zfs_error_aux(hdl, 3960 dgettext(TEXT_DOMAIN, "unable to open device")); 3961 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf)); 3962 } 3963 3964 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) { 3965 /* 3966 * The only way this can fail is if we run out of memory, or we 3967 * were unable to read the disk's capacity 3968 */ 3969 if (errno == ENOMEM) 3970 (void) no_memory(hdl); 3971 3972 (void) close(fd); 3973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 3974 "unable to read disk capacity"), name); 3975 3976 return (zfs_error(hdl, EZFS_NOCAP, errbuf)); 3977 } 3978 3979 slice_size = vtoc->efi_last_u_lba + 1; 3980 slice_size -= EFI_MIN_RESV_SIZE; 3981 if (start_block == MAXOFFSET_T) 3982 start_block = NEW_START_BLOCK; 3983 slice_size -= start_block; 3984 3985 vtoc->efi_parts[0].p_start = start_block; 3986 vtoc->efi_parts[0].p_size = slice_size; 3987 3988 /* 3989 * Why we use V_USR: V_BACKUP confuses users, and is considered 3990 * disposable by some EFI utilities (since EFI doesn't have a backup 3991 * slice). V_UNASSIGNED is supposed to be used only for zero size 3992 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT, 3993 * etc. were all pretty specific. V_USR is as close to reality as we 3994 * can get, in the absence of V_OTHER. 3995 */ 3996 vtoc->efi_parts[0].p_tag = V_USR; 3997 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs"); 3998 3999 vtoc->efi_parts[8].p_start = slice_size + start_block; 4000 vtoc->efi_parts[8].p_size = resv; 4001 vtoc->efi_parts[8].p_tag = V_RESERVED; 4002 4003 if (efi_write(fd, vtoc) != 0) { 4004 /* 4005 * Some block drivers (like pcata) may not support EFI 4006 * GPT labels. Print out a helpful error message dir- 4007 * ecting the user to manually label the disk and give 4008 * a specific slice. 4009 */ 4010 (void) close(fd); 4011 efi_free(vtoc); 4012 4013 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4014 "try using fdisk(1M) and then provide a specific slice")); 4015 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf)); 4016 } 4017 4018 (void) close(fd); 4019 efi_free(vtoc); 4020#endif /* illumos */ 4021 return (0); 4022} 4023 4024static boolean_t 4025supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf) 4026{ 4027 char *type; 4028 nvlist_t **child; 4029 uint_t children, c; 4030 4031 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0); 4032 if (strcmp(type, VDEV_TYPE_FILE) == 0 || 4033 strcmp(type, VDEV_TYPE_HOLE) == 0 || 4034 strcmp(type, VDEV_TYPE_MISSING) == 0) { 4035 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4036 "vdev type '%s' is not supported"), type); 4037 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf); 4038 return (B_FALSE); 4039 } 4040 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 4041 &child, &children) == 0) { 4042 for (c = 0; c < children; c++) { 4043 if (!supported_dump_vdev_type(hdl, child[c], errbuf)) 4044 return (B_FALSE); 4045 } 4046 } 4047 return (B_TRUE); 4048} 4049 4050/* 4051 * Check if this zvol is allowable for use as a dump device; zero if 4052 * it is, > 0 if it isn't, < 0 if it isn't a zvol. 4053 * 4054 * Allowable storage configurations include mirrors, all raidz variants, and 4055 * pools with log, cache, and spare devices. Pools which are backed by files or 4056 * have missing/hole vdevs are not suitable. 4057 */ 4058int 4059zvol_check_dump_config(char *arg) 4060{ 4061 zpool_handle_t *zhp = NULL; 4062 nvlist_t *config, *nvroot; 4063 char *p, *volname; 4064 nvlist_t **top; 4065 uint_t toplevels; 4066 libzfs_handle_t *hdl; 4067 char errbuf[1024]; 4068 char poolname[ZFS_MAX_DATASET_NAME_LEN]; 4069 int pathlen = strlen(ZVOL_FULL_DEV_DIR); 4070 int ret = 1; 4071 4072 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) { 4073 return (-1); 4074 } 4075 4076 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN, 4077 "dump is not supported on device '%s'"), arg); 4078 4079 if ((hdl = libzfs_init()) == NULL) 4080 return (1); 4081 libzfs_print_on_error(hdl, B_TRUE); 4082 4083 volname = arg + pathlen; 4084 4085 /* check the configuration of the pool */ 4086 if ((p = strchr(volname, '/')) == NULL) { 4087 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4088 "malformed dataset name")); 4089 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf); 4090 return (1); 4091 } else if (p - volname >= ZFS_MAX_DATASET_NAME_LEN) { 4092 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4093 "dataset name is too long")); 4094 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf); 4095 return (1); 4096 } else { 4097 (void) strncpy(poolname, volname, p - volname); 4098 poolname[p - volname] = '\0'; 4099 } 4100 4101 if ((zhp = zpool_open(hdl, poolname)) == NULL) { 4102 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4103 "could not open pool '%s'"), poolname); 4104 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf); 4105 goto out; 4106 } 4107 config = zpool_get_config(zhp, NULL); 4108 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4109 &nvroot) != 0) { 4110 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, 4111 "could not obtain vdev configuration for '%s'"), poolname); 4112 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf); 4113 goto out; 4114 } 4115 4116 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4117 &top, &toplevels) == 0); 4118 4119 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) { 4120 goto out; 4121 } 4122 ret = 0; 4123 4124out: 4125 if (zhp) 4126 zpool_close(zhp); 4127 libzfs_fini(hdl); 4128 return (ret); 4129} 4130 4131int 4132zpool_nextboot(libzfs_handle_t *hdl, uint64_t pool_guid, uint64_t dev_guid, 4133 const char *command) 4134{ 4135 zfs_cmd_t zc = { 0 }; 4136 nvlist_t *args; 4137 char *packed; 4138 size_t size; 4139 int error; 4140 4141 args = fnvlist_alloc(); 4142 fnvlist_add_uint64(args, ZPOOL_CONFIG_POOL_GUID, pool_guid); 4143 fnvlist_add_uint64(args, ZPOOL_CONFIG_GUID, dev_guid); 4144 fnvlist_add_string(args, "command", command); 4145 error = zcmd_write_src_nvlist(hdl, &zc, args); 4146 if (error == 0) 4147 error = ioctl(hdl->libzfs_fd, ZFS_IOC_NEXTBOOT, &zc); 4148 zcmd_free_nvlists(&zc); 4149 nvlist_free(args); 4150 return (error); 4151} 4152