spa.c revision 260750
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2013 by Delphix. All rights reserved. 25 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 */ 28 29/* 30 * SPA: Storage Pool Allocator 31 * 32 * This file contains all the routines used when modifying on-disk SPA state. 33 * This includes opening, importing, destroying, exporting a pool, and syncing a 34 * pool. 35 */ 36 37#include <sys/zfs_context.h> 38#include <sys/fm/fs/zfs.h> 39#include <sys/spa_impl.h> 40#include <sys/zio.h> 41#include <sys/zio_checksum.h> 42#include <sys/dmu.h> 43#include <sys/dmu_tx.h> 44#include <sys/zap.h> 45#include <sys/zil.h> 46#include <sys/ddt.h> 47#include <sys/vdev_impl.h> 48#include <sys/metaslab.h> 49#include <sys/metaslab_impl.h> 50#include <sys/uberblock_impl.h> 51#include <sys/txg.h> 52#include <sys/avl.h> 53#include <sys/dmu_traverse.h> 54#include <sys/dmu_objset.h> 55#include <sys/unique.h> 56#include <sys/dsl_pool.h> 57#include <sys/dsl_dataset.h> 58#include <sys/dsl_dir.h> 59#include <sys/dsl_prop.h> 60#include <sys/dsl_synctask.h> 61#include <sys/fs/zfs.h> 62#include <sys/arc.h> 63#include <sys/callb.h> 64#include <sys/spa_boot.h> 65#include <sys/zfs_ioctl.h> 66#include <sys/dsl_scan.h> 67#include <sys/dmu_send.h> 68#include <sys/dsl_destroy.h> 69#include <sys/dsl_userhold.h> 70#include <sys/zfeature.h> 71#include <sys/zvol.h> 72#include <sys/trim_map.h> 73 74#ifdef _KERNEL 75#include <sys/callb.h> 76#include <sys/cpupart.h> 77#include <sys/zone.h> 78#endif /* _KERNEL */ 79 80#include "zfs_prop.h" 81#include "zfs_comutil.h" 82 83/* Check hostid on import? */ 84static int check_hostid = 1; 85 86SYSCTL_DECL(_vfs_zfs); 87TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid); 88SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RW, &check_hostid, 0, 89 "Check hostid on import?"); 90 91/* 92 * The interval, in seconds, at which failed configuration cache file writes 93 * should be retried. 94 */ 95static int zfs_ccw_retry_interval = 300; 96 97typedef enum zti_modes { 98 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 99 ZTI_MODE_ONLINE_PERCENT, /* value is % of online CPUs */ 100 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 101 ZTI_MODE_NULL, /* don't create a taskq */ 102 ZTI_NMODES 103} zti_modes_t; 104 105#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 106#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 } 107#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 108#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 109 110#define ZTI_N(n) ZTI_P(n, 1) 111#define ZTI_ONE ZTI_N(1) 112 113typedef struct zio_taskq_info { 114 zti_modes_t zti_mode; 115 uint_t zti_value; 116 uint_t zti_count; 117} zio_taskq_info_t; 118 119static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 120 "issue", "issue_high", "intr", "intr_high" 121}; 122 123/* 124 * This table defines the taskq settings for each ZFS I/O type. When 125 * initializing a pool, we use this table to create an appropriately sized 126 * taskq. Some operations are low volume and therefore have a small, static 127 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 128 * macros. Other operations process a large amount of data; the ZTI_BATCH 129 * macro causes us to create a taskq oriented for throughput. Some operations 130 * are so high frequency and short-lived that the taskq itself can become a a 131 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 132 * additional degree of parallelism specified by the number of threads per- 133 * taskq and the number of taskqs; when dispatching an event in this case, the 134 * particular taskq is chosen at random. 135 * 136 * The different taskq priorities are to handle the different contexts (issue 137 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 138 * need to be handled with minimum delay. 139 */ 140const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 141 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 142 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 143 { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */ 144 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 145 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 146 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 147 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 148}; 149 150static void spa_sync_version(void *arg, dmu_tx_t *tx); 151static void spa_sync_props(void *arg, dmu_tx_t *tx); 152static boolean_t spa_has_active_shared_spare(spa_t *spa); 153static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 154 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 155 char **ereport); 156static void spa_vdev_resilver_done(spa_t *spa); 157 158uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */ 159#ifdef PSRSET_BIND 160id_t zio_taskq_psrset_bind = PS_NONE; 161#endif 162#ifdef SYSDC 163boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 164#endif 165uint_t zio_taskq_basedc = 80; /* base duty cycle */ 166 167boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 168extern int zfs_sync_pass_deferred_free; 169 170#ifndef illumos 171extern void spa_deadman(void *arg); 172#endif 173 174/* 175 * This (illegal) pool name is used when temporarily importing a spa_t in order 176 * to get the vdev stats associated with the imported devices. 177 */ 178#define TRYIMPORT_NAME "$import" 179 180/* 181 * ========================================================================== 182 * SPA properties routines 183 * ========================================================================== 184 */ 185 186/* 187 * Add a (source=src, propname=propval) list to an nvlist. 188 */ 189static void 190spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 191 uint64_t intval, zprop_source_t src) 192{ 193 const char *propname = zpool_prop_to_name(prop); 194 nvlist_t *propval; 195 196 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 197 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 198 199 if (strval != NULL) 200 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 201 else 202 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 203 204 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 205 nvlist_free(propval); 206} 207 208/* 209 * Get property values from the spa configuration. 210 */ 211static void 212spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 213{ 214 vdev_t *rvd = spa->spa_root_vdev; 215 dsl_pool_t *pool = spa->spa_dsl_pool; 216 uint64_t size; 217 uint64_t alloc; 218 uint64_t space; 219 uint64_t cap, version; 220 zprop_source_t src = ZPROP_SRC_NONE; 221 spa_config_dirent_t *dp; 222 223 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 224 225 if (rvd != NULL) { 226 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 227 size = metaslab_class_get_space(spa_normal_class(spa)); 228 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 229 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 230 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 231 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 232 size - alloc, src); 233 234 space = 0; 235 for (int c = 0; c < rvd->vdev_children; c++) { 236 vdev_t *tvd = rvd->vdev_child[c]; 237 space += tvd->vdev_max_asize - tvd->vdev_asize; 238 } 239 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space, 240 src); 241 242 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 243 (spa_mode(spa) == FREAD), src); 244 245 cap = (size == 0) ? 0 : (alloc * 100 / size); 246 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 247 248 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 249 ddt_get_pool_dedup_ratio(spa), src); 250 251 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 252 rvd->vdev_state, src); 253 254 version = spa_version(spa); 255 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 256 src = ZPROP_SRC_DEFAULT; 257 else 258 src = ZPROP_SRC_LOCAL; 259 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 260 } 261 262 if (pool != NULL) { 263 dsl_dir_t *freedir = pool->dp_free_dir; 264 265 /* 266 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 267 * when opening pools before this version freedir will be NULL. 268 */ 269 if (freedir != NULL) { 270 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 271 freedir->dd_phys->dd_used_bytes, src); 272 } else { 273 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 274 NULL, 0, src); 275 } 276 } 277 278 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 279 280 if (spa->spa_comment != NULL) { 281 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 282 0, ZPROP_SRC_LOCAL); 283 } 284 285 if (spa->spa_root != NULL) 286 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 287 0, ZPROP_SRC_LOCAL); 288 289 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 290 if (dp->scd_path == NULL) { 291 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 292 "none", 0, ZPROP_SRC_LOCAL); 293 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 294 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 295 dp->scd_path, 0, ZPROP_SRC_LOCAL); 296 } 297 } 298} 299 300/* 301 * Get zpool property values. 302 */ 303int 304spa_prop_get(spa_t *spa, nvlist_t **nvp) 305{ 306 objset_t *mos = spa->spa_meta_objset; 307 zap_cursor_t zc; 308 zap_attribute_t za; 309 int err; 310 311 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 312 313 mutex_enter(&spa->spa_props_lock); 314 315 /* 316 * Get properties from the spa config. 317 */ 318 spa_prop_get_config(spa, nvp); 319 320 /* If no pool property object, no more prop to get. */ 321 if (mos == NULL || spa->spa_pool_props_object == 0) { 322 mutex_exit(&spa->spa_props_lock); 323 return (0); 324 } 325 326 /* 327 * Get properties from the MOS pool property object. 328 */ 329 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 330 (err = zap_cursor_retrieve(&zc, &za)) == 0; 331 zap_cursor_advance(&zc)) { 332 uint64_t intval = 0; 333 char *strval = NULL; 334 zprop_source_t src = ZPROP_SRC_DEFAULT; 335 zpool_prop_t prop; 336 337 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 338 continue; 339 340 switch (za.za_integer_length) { 341 case 8: 342 /* integer property */ 343 if (za.za_first_integer != 344 zpool_prop_default_numeric(prop)) 345 src = ZPROP_SRC_LOCAL; 346 347 if (prop == ZPOOL_PROP_BOOTFS) { 348 dsl_pool_t *dp; 349 dsl_dataset_t *ds = NULL; 350 351 dp = spa_get_dsl(spa); 352 dsl_pool_config_enter(dp, FTAG); 353 if (err = dsl_dataset_hold_obj(dp, 354 za.za_first_integer, FTAG, &ds)) { 355 dsl_pool_config_exit(dp, FTAG); 356 break; 357 } 358 359 strval = kmem_alloc( 360 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 361 KM_SLEEP); 362 dsl_dataset_name(ds, strval); 363 dsl_dataset_rele(ds, FTAG); 364 dsl_pool_config_exit(dp, FTAG); 365 } else { 366 strval = NULL; 367 intval = za.za_first_integer; 368 } 369 370 spa_prop_add_list(*nvp, prop, strval, intval, src); 371 372 if (strval != NULL) 373 kmem_free(strval, 374 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 375 376 break; 377 378 case 1: 379 /* string property */ 380 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 381 err = zap_lookup(mos, spa->spa_pool_props_object, 382 za.za_name, 1, za.za_num_integers, strval); 383 if (err) { 384 kmem_free(strval, za.za_num_integers); 385 break; 386 } 387 spa_prop_add_list(*nvp, prop, strval, 0, src); 388 kmem_free(strval, za.za_num_integers); 389 break; 390 391 default: 392 break; 393 } 394 } 395 zap_cursor_fini(&zc); 396 mutex_exit(&spa->spa_props_lock); 397out: 398 if (err && err != ENOENT) { 399 nvlist_free(*nvp); 400 *nvp = NULL; 401 return (err); 402 } 403 404 return (0); 405} 406 407/* 408 * Validate the given pool properties nvlist and modify the list 409 * for the property values to be set. 410 */ 411static int 412spa_prop_validate(spa_t *spa, nvlist_t *props) 413{ 414 nvpair_t *elem; 415 int error = 0, reset_bootfs = 0; 416 uint64_t objnum = 0; 417 boolean_t has_feature = B_FALSE; 418 419 elem = NULL; 420 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 421 uint64_t intval; 422 char *strval, *slash, *check, *fname; 423 const char *propname = nvpair_name(elem); 424 zpool_prop_t prop = zpool_name_to_prop(propname); 425 426 switch (prop) { 427 case ZPROP_INVAL: 428 if (!zpool_prop_feature(propname)) { 429 error = SET_ERROR(EINVAL); 430 break; 431 } 432 433 /* 434 * Sanitize the input. 435 */ 436 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 437 error = SET_ERROR(EINVAL); 438 break; 439 } 440 441 if (nvpair_value_uint64(elem, &intval) != 0) { 442 error = SET_ERROR(EINVAL); 443 break; 444 } 445 446 if (intval != 0) { 447 error = SET_ERROR(EINVAL); 448 break; 449 } 450 451 fname = strchr(propname, '@') + 1; 452 if (zfeature_lookup_name(fname, NULL) != 0) { 453 error = SET_ERROR(EINVAL); 454 break; 455 } 456 457 has_feature = B_TRUE; 458 break; 459 460 case ZPOOL_PROP_VERSION: 461 error = nvpair_value_uint64(elem, &intval); 462 if (!error && 463 (intval < spa_version(spa) || 464 intval > SPA_VERSION_BEFORE_FEATURES || 465 has_feature)) 466 error = SET_ERROR(EINVAL); 467 break; 468 469 case ZPOOL_PROP_DELEGATION: 470 case ZPOOL_PROP_AUTOREPLACE: 471 case ZPOOL_PROP_LISTSNAPS: 472 case ZPOOL_PROP_AUTOEXPAND: 473 error = nvpair_value_uint64(elem, &intval); 474 if (!error && intval > 1) 475 error = SET_ERROR(EINVAL); 476 break; 477 478 case ZPOOL_PROP_BOOTFS: 479 /* 480 * If the pool version is less than SPA_VERSION_BOOTFS, 481 * or the pool is still being created (version == 0), 482 * the bootfs property cannot be set. 483 */ 484 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 485 error = SET_ERROR(ENOTSUP); 486 break; 487 } 488 489 /* 490 * Make sure the vdev config is bootable 491 */ 492 if (!vdev_is_bootable(spa->spa_root_vdev)) { 493 error = SET_ERROR(ENOTSUP); 494 break; 495 } 496 497 reset_bootfs = 1; 498 499 error = nvpair_value_string(elem, &strval); 500 501 if (!error) { 502 objset_t *os; 503 uint64_t compress; 504 505 if (strval == NULL || strval[0] == '\0') { 506 objnum = zpool_prop_default_numeric( 507 ZPOOL_PROP_BOOTFS); 508 break; 509 } 510 511 if (error = dmu_objset_hold(strval, FTAG, &os)) 512 break; 513 514 /* Must be ZPL and not gzip compressed. */ 515 516 if (dmu_objset_type(os) != DMU_OST_ZFS) { 517 error = SET_ERROR(ENOTSUP); 518 } else if ((error = 519 dsl_prop_get_int_ds(dmu_objset_ds(os), 520 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 521 &compress)) == 0 && 522 !BOOTFS_COMPRESS_VALID(compress)) { 523 error = SET_ERROR(ENOTSUP); 524 } else { 525 objnum = dmu_objset_id(os); 526 } 527 dmu_objset_rele(os, FTAG); 528 } 529 break; 530 531 case ZPOOL_PROP_FAILUREMODE: 532 error = nvpair_value_uint64(elem, &intval); 533 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 534 intval > ZIO_FAILURE_MODE_PANIC)) 535 error = SET_ERROR(EINVAL); 536 537 /* 538 * This is a special case which only occurs when 539 * the pool has completely failed. This allows 540 * the user to change the in-core failmode property 541 * without syncing it out to disk (I/Os might 542 * currently be blocked). We do this by returning 543 * EIO to the caller (spa_prop_set) to trick it 544 * into thinking we encountered a property validation 545 * error. 546 */ 547 if (!error && spa_suspended(spa)) { 548 spa->spa_failmode = intval; 549 error = SET_ERROR(EIO); 550 } 551 break; 552 553 case ZPOOL_PROP_CACHEFILE: 554 if ((error = nvpair_value_string(elem, &strval)) != 0) 555 break; 556 557 if (strval[0] == '\0') 558 break; 559 560 if (strcmp(strval, "none") == 0) 561 break; 562 563 if (strval[0] != '/') { 564 error = SET_ERROR(EINVAL); 565 break; 566 } 567 568 slash = strrchr(strval, '/'); 569 ASSERT(slash != NULL); 570 571 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 572 strcmp(slash, "/..") == 0) 573 error = SET_ERROR(EINVAL); 574 break; 575 576 case ZPOOL_PROP_COMMENT: 577 if ((error = nvpair_value_string(elem, &strval)) != 0) 578 break; 579 for (check = strval; *check != '\0'; check++) { 580 /* 581 * The kernel doesn't have an easy isprint() 582 * check. For this kernel check, we merely 583 * check ASCII apart from DEL. Fix this if 584 * there is an easy-to-use kernel isprint(). 585 */ 586 if (*check >= 0x7f) { 587 error = SET_ERROR(EINVAL); 588 break; 589 } 590 check++; 591 } 592 if (strlen(strval) > ZPROP_MAX_COMMENT) 593 error = E2BIG; 594 break; 595 596 case ZPOOL_PROP_DEDUPDITTO: 597 if (spa_version(spa) < SPA_VERSION_DEDUP) 598 error = SET_ERROR(ENOTSUP); 599 else 600 error = nvpair_value_uint64(elem, &intval); 601 if (error == 0 && 602 intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 603 error = SET_ERROR(EINVAL); 604 break; 605 } 606 607 if (error) 608 break; 609 } 610 611 if (!error && reset_bootfs) { 612 error = nvlist_remove(props, 613 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 614 615 if (!error) { 616 error = nvlist_add_uint64(props, 617 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 618 } 619 } 620 621 return (error); 622} 623 624void 625spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 626{ 627 char *cachefile; 628 spa_config_dirent_t *dp; 629 630 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 631 &cachefile) != 0) 632 return; 633 634 dp = kmem_alloc(sizeof (spa_config_dirent_t), 635 KM_SLEEP); 636 637 if (cachefile[0] == '\0') 638 dp->scd_path = spa_strdup(spa_config_path); 639 else if (strcmp(cachefile, "none") == 0) 640 dp->scd_path = NULL; 641 else 642 dp->scd_path = spa_strdup(cachefile); 643 644 list_insert_head(&spa->spa_config_list, dp); 645 if (need_sync) 646 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 647} 648 649int 650spa_prop_set(spa_t *spa, nvlist_t *nvp) 651{ 652 int error; 653 nvpair_t *elem = NULL; 654 boolean_t need_sync = B_FALSE; 655 656 if ((error = spa_prop_validate(spa, nvp)) != 0) 657 return (error); 658 659 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 660 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 661 662 if (prop == ZPOOL_PROP_CACHEFILE || 663 prop == ZPOOL_PROP_ALTROOT || 664 prop == ZPOOL_PROP_READONLY) 665 continue; 666 667 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 668 uint64_t ver; 669 670 if (prop == ZPOOL_PROP_VERSION) { 671 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 672 } else { 673 ASSERT(zpool_prop_feature(nvpair_name(elem))); 674 ver = SPA_VERSION_FEATURES; 675 need_sync = B_TRUE; 676 } 677 678 /* Save time if the version is already set. */ 679 if (ver == spa_version(spa)) 680 continue; 681 682 /* 683 * In addition to the pool directory object, we might 684 * create the pool properties object, the features for 685 * read object, the features for write object, or the 686 * feature descriptions object. 687 */ 688 error = dsl_sync_task(spa->spa_name, NULL, 689 spa_sync_version, &ver, 6); 690 if (error) 691 return (error); 692 continue; 693 } 694 695 need_sync = B_TRUE; 696 break; 697 } 698 699 if (need_sync) { 700 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 701 nvp, 6)); 702 } 703 704 return (0); 705} 706 707/* 708 * If the bootfs property value is dsobj, clear it. 709 */ 710void 711spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 712{ 713 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 714 VERIFY(zap_remove(spa->spa_meta_objset, 715 spa->spa_pool_props_object, 716 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 717 spa->spa_bootfs = 0; 718 } 719} 720 721/*ARGSUSED*/ 722static int 723spa_change_guid_check(void *arg, dmu_tx_t *tx) 724{ 725 uint64_t *newguid = arg; 726 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 727 vdev_t *rvd = spa->spa_root_vdev; 728 uint64_t vdev_state; 729 730 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 731 vdev_state = rvd->vdev_state; 732 spa_config_exit(spa, SCL_STATE, FTAG); 733 734 if (vdev_state != VDEV_STATE_HEALTHY) 735 return (SET_ERROR(ENXIO)); 736 737 ASSERT3U(spa_guid(spa), !=, *newguid); 738 739 return (0); 740} 741 742static void 743spa_change_guid_sync(void *arg, dmu_tx_t *tx) 744{ 745 uint64_t *newguid = arg; 746 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 747 uint64_t oldguid; 748 vdev_t *rvd = spa->spa_root_vdev; 749 750 oldguid = spa_guid(spa); 751 752 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 753 rvd->vdev_guid = *newguid; 754 rvd->vdev_guid_sum += (*newguid - oldguid); 755 vdev_config_dirty(rvd); 756 spa_config_exit(spa, SCL_STATE, FTAG); 757 758 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 759 oldguid, *newguid); 760} 761 762/* 763 * Change the GUID for the pool. This is done so that we can later 764 * re-import a pool built from a clone of our own vdevs. We will modify 765 * the root vdev's guid, our own pool guid, and then mark all of our 766 * vdevs dirty. Note that we must make sure that all our vdevs are 767 * online when we do this, or else any vdevs that weren't present 768 * would be orphaned from our pool. We are also going to issue a 769 * sysevent to update any watchers. 770 */ 771int 772spa_change_guid(spa_t *spa) 773{ 774 int error; 775 uint64_t guid; 776 777 mutex_enter(&spa->spa_vdev_top_lock); 778 mutex_enter(&spa_namespace_lock); 779 guid = spa_generate_guid(NULL); 780 781 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 782 spa_change_guid_sync, &guid, 5); 783 784 if (error == 0) { 785 spa_config_sync(spa, B_FALSE, B_TRUE); 786 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 787 } 788 789 mutex_exit(&spa_namespace_lock); 790 mutex_exit(&spa->spa_vdev_top_lock); 791 792 return (error); 793} 794 795/* 796 * ========================================================================== 797 * SPA state manipulation (open/create/destroy/import/export) 798 * ========================================================================== 799 */ 800 801static int 802spa_error_entry_compare(const void *a, const void *b) 803{ 804 spa_error_entry_t *sa = (spa_error_entry_t *)a; 805 spa_error_entry_t *sb = (spa_error_entry_t *)b; 806 int ret; 807 808 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 809 sizeof (zbookmark_t)); 810 811 if (ret < 0) 812 return (-1); 813 else if (ret > 0) 814 return (1); 815 else 816 return (0); 817} 818 819/* 820 * Utility function which retrieves copies of the current logs and 821 * re-initializes them in the process. 822 */ 823void 824spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 825{ 826 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 827 828 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 829 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 830 831 avl_create(&spa->spa_errlist_scrub, 832 spa_error_entry_compare, sizeof (spa_error_entry_t), 833 offsetof(spa_error_entry_t, se_avl)); 834 avl_create(&spa->spa_errlist_last, 835 spa_error_entry_compare, sizeof (spa_error_entry_t), 836 offsetof(spa_error_entry_t, se_avl)); 837} 838 839static void 840spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 841{ 842 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 843 enum zti_modes mode = ztip->zti_mode; 844 uint_t value = ztip->zti_value; 845 uint_t count = ztip->zti_count; 846 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 847 char name[32]; 848 uint_t flags = 0; 849 boolean_t batch = B_FALSE; 850 851 if (mode == ZTI_MODE_NULL) { 852 tqs->stqs_count = 0; 853 tqs->stqs_taskq = NULL; 854 return; 855 } 856 857 ASSERT3U(count, >, 0); 858 859 tqs->stqs_count = count; 860 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 861 862 for (uint_t i = 0; i < count; i++) { 863 taskq_t *tq; 864 865 switch (mode) { 866 case ZTI_MODE_FIXED: 867 ASSERT3U(value, >=, 1); 868 value = MAX(value, 1); 869 break; 870 871 case ZTI_MODE_BATCH: 872 batch = B_TRUE; 873 flags |= TASKQ_THREADS_CPU_PCT; 874 value = zio_taskq_batch_pct; 875 break; 876 877 case ZTI_MODE_ONLINE_PERCENT: 878 flags |= TASKQ_THREADS_CPU_PCT; 879 break; 880 881 default: 882 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 883 "spa_activate()", 884 zio_type_name[t], zio_taskq_types[q], mode, value); 885 break; 886 } 887 888 if (count > 1) { 889 (void) snprintf(name, sizeof (name), "%s_%s_%u", 890 zio_type_name[t], zio_taskq_types[q], i); 891 } else { 892 (void) snprintf(name, sizeof (name), "%s_%s", 893 zio_type_name[t], zio_taskq_types[q]); 894 } 895 896#ifdef SYSDC 897 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 898 if (batch) 899 flags |= TASKQ_DC_BATCH; 900 901 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 902 spa->spa_proc, zio_taskq_basedc, flags); 903 } else { 904#endif 905 tq = taskq_create_proc(name, value, maxclsyspri, 50, 906 INT_MAX, spa->spa_proc, flags); 907#ifdef SYSDC 908 } 909#endif 910 911 tqs->stqs_taskq[i] = tq; 912 } 913} 914 915static void 916spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 917{ 918 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 919 920 if (tqs->stqs_taskq == NULL) { 921 ASSERT0(tqs->stqs_count); 922 return; 923 } 924 925 for (uint_t i = 0; i < tqs->stqs_count; i++) { 926 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 927 taskq_destroy(tqs->stqs_taskq[i]); 928 } 929 930 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 931 tqs->stqs_taskq = NULL; 932} 933 934/* 935 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 936 * Note that a type may have multiple discrete taskqs to avoid lock contention 937 * on the taskq itself. In that case we choose which taskq at random by using 938 * the low bits of gethrtime(). 939 */ 940void 941spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 942 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 943{ 944 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 945 taskq_t *tq; 946 947 ASSERT3P(tqs->stqs_taskq, !=, NULL); 948 ASSERT3U(tqs->stqs_count, !=, 0); 949 950 if (tqs->stqs_count == 1) { 951 tq = tqs->stqs_taskq[0]; 952 } else { 953 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 954 } 955 956 taskq_dispatch_ent(tq, func, arg, flags, ent); 957} 958 959static void 960spa_create_zio_taskqs(spa_t *spa) 961{ 962 for (int t = 0; t < ZIO_TYPES; t++) { 963 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 964 spa_taskqs_init(spa, t, q); 965 } 966 } 967} 968 969#ifdef _KERNEL 970#ifdef SPA_PROCESS 971static void 972spa_thread(void *arg) 973{ 974 callb_cpr_t cprinfo; 975 976 spa_t *spa = arg; 977 user_t *pu = PTOU(curproc); 978 979 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 980 spa->spa_name); 981 982 ASSERT(curproc != &p0); 983 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 984 "zpool-%s", spa->spa_name); 985 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 986 987#ifdef PSRSET_BIND 988 /* bind this thread to the requested psrset */ 989 if (zio_taskq_psrset_bind != PS_NONE) { 990 pool_lock(); 991 mutex_enter(&cpu_lock); 992 mutex_enter(&pidlock); 993 mutex_enter(&curproc->p_lock); 994 995 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 996 0, NULL, NULL) == 0) { 997 curthread->t_bind_pset = zio_taskq_psrset_bind; 998 } else { 999 cmn_err(CE_WARN, 1000 "Couldn't bind process for zfs pool \"%s\" to " 1001 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1002 } 1003 1004 mutex_exit(&curproc->p_lock); 1005 mutex_exit(&pidlock); 1006 mutex_exit(&cpu_lock); 1007 pool_unlock(); 1008 } 1009#endif 1010 1011#ifdef SYSDC 1012 if (zio_taskq_sysdc) { 1013 sysdc_thread_enter(curthread, 100, 0); 1014 } 1015#endif 1016 1017 spa->spa_proc = curproc; 1018 spa->spa_did = curthread->t_did; 1019 1020 spa_create_zio_taskqs(spa); 1021 1022 mutex_enter(&spa->spa_proc_lock); 1023 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1024 1025 spa->spa_proc_state = SPA_PROC_ACTIVE; 1026 cv_broadcast(&spa->spa_proc_cv); 1027 1028 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1029 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1030 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1031 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1032 1033 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1034 spa->spa_proc_state = SPA_PROC_GONE; 1035 spa->spa_proc = &p0; 1036 cv_broadcast(&spa->spa_proc_cv); 1037 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1038 1039 mutex_enter(&curproc->p_lock); 1040 lwp_exit(); 1041} 1042#endif /* SPA_PROCESS */ 1043#endif 1044 1045/* 1046 * Activate an uninitialized pool. 1047 */ 1048static void 1049spa_activate(spa_t *spa, int mode) 1050{ 1051 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1052 1053 spa->spa_state = POOL_STATE_ACTIVE; 1054 spa->spa_mode = mode; 1055 1056 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1057 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1058 1059 /* Try to create a covering process */ 1060 mutex_enter(&spa->spa_proc_lock); 1061 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1062 ASSERT(spa->spa_proc == &p0); 1063 spa->spa_did = 0; 1064 1065#ifdef SPA_PROCESS 1066 /* Only create a process if we're going to be around a while. */ 1067 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1068 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1069 NULL, 0) == 0) { 1070 spa->spa_proc_state = SPA_PROC_CREATED; 1071 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1072 cv_wait(&spa->spa_proc_cv, 1073 &spa->spa_proc_lock); 1074 } 1075 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1076 ASSERT(spa->spa_proc != &p0); 1077 ASSERT(spa->spa_did != 0); 1078 } else { 1079#ifdef _KERNEL 1080 cmn_err(CE_WARN, 1081 "Couldn't create process for zfs pool \"%s\"\n", 1082 spa->spa_name); 1083#endif 1084 } 1085 } 1086#endif /* SPA_PROCESS */ 1087 mutex_exit(&spa->spa_proc_lock); 1088 1089 /* If we didn't create a process, we need to create our taskqs. */ 1090 ASSERT(spa->spa_proc == &p0); 1091 if (spa->spa_proc == &p0) { 1092 spa_create_zio_taskqs(spa); 1093 } 1094 1095 /* 1096 * Start TRIM thread. 1097 */ 1098 trim_thread_create(spa); 1099 1100 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1101 offsetof(vdev_t, vdev_config_dirty_node)); 1102 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1103 offsetof(vdev_t, vdev_state_dirty_node)); 1104 1105 txg_list_create(&spa->spa_vdev_txg_list, 1106 offsetof(struct vdev, vdev_txg_node)); 1107 1108 avl_create(&spa->spa_errlist_scrub, 1109 spa_error_entry_compare, sizeof (spa_error_entry_t), 1110 offsetof(spa_error_entry_t, se_avl)); 1111 avl_create(&spa->spa_errlist_last, 1112 spa_error_entry_compare, sizeof (spa_error_entry_t), 1113 offsetof(spa_error_entry_t, se_avl)); 1114} 1115 1116/* 1117 * Opposite of spa_activate(). 1118 */ 1119static void 1120spa_deactivate(spa_t *spa) 1121{ 1122 ASSERT(spa->spa_sync_on == B_FALSE); 1123 ASSERT(spa->spa_dsl_pool == NULL); 1124 ASSERT(spa->spa_root_vdev == NULL); 1125 ASSERT(spa->spa_async_zio_root == NULL); 1126 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1127 1128 /* 1129 * Stop TRIM thread in case spa_unload() wasn't called directly 1130 * before spa_deactivate(). 1131 */ 1132 trim_thread_destroy(spa); 1133 1134 txg_list_destroy(&spa->spa_vdev_txg_list); 1135 1136 list_destroy(&spa->spa_config_dirty_list); 1137 list_destroy(&spa->spa_state_dirty_list); 1138 1139 for (int t = 0; t < ZIO_TYPES; t++) { 1140 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1141 spa_taskqs_fini(spa, t, q); 1142 } 1143 } 1144 1145 metaslab_class_destroy(spa->spa_normal_class); 1146 spa->spa_normal_class = NULL; 1147 1148 metaslab_class_destroy(spa->spa_log_class); 1149 spa->spa_log_class = NULL; 1150 1151 /* 1152 * If this was part of an import or the open otherwise failed, we may 1153 * still have errors left in the queues. Empty them just in case. 1154 */ 1155 spa_errlog_drain(spa); 1156 1157 avl_destroy(&spa->spa_errlist_scrub); 1158 avl_destroy(&spa->spa_errlist_last); 1159 1160 spa->spa_state = POOL_STATE_UNINITIALIZED; 1161 1162 mutex_enter(&spa->spa_proc_lock); 1163 if (spa->spa_proc_state != SPA_PROC_NONE) { 1164 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1165 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1166 cv_broadcast(&spa->spa_proc_cv); 1167 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1168 ASSERT(spa->spa_proc != &p0); 1169 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1170 } 1171 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1172 spa->spa_proc_state = SPA_PROC_NONE; 1173 } 1174 ASSERT(spa->spa_proc == &p0); 1175 mutex_exit(&spa->spa_proc_lock); 1176 1177#ifdef SPA_PROCESS 1178 /* 1179 * We want to make sure spa_thread() has actually exited the ZFS 1180 * module, so that the module can't be unloaded out from underneath 1181 * it. 1182 */ 1183 if (spa->spa_did != 0) { 1184 thread_join(spa->spa_did); 1185 spa->spa_did = 0; 1186 } 1187#endif /* SPA_PROCESS */ 1188} 1189 1190/* 1191 * Verify a pool configuration, and construct the vdev tree appropriately. This 1192 * will create all the necessary vdevs in the appropriate layout, with each vdev 1193 * in the CLOSED state. This will prep the pool before open/creation/import. 1194 * All vdev validation is done by the vdev_alloc() routine. 1195 */ 1196static int 1197spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1198 uint_t id, int atype) 1199{ 1200 nvlist_t **child; 1201 uint_t children; 1202 int error; 1203 1204 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1205 return (error); 1206 1207 if ((*vdp)->vdev_ops->vdev_op_leaf) 1208 return (0); 1209 1210 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1211 &child, &children); 1212 1213 if (error == ENOENT) 1214 return (0); 1215 1216 if (error) { 1217 vdev_free(*vdp); 1218 *vdp = NULL; 1219 return (SET_ERROR(EINVAL)); 1220 } 1221 1222 for (int c = 0; c < children; c++) { 1223 vdev_t *vd; 1224 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1225 atype)) != 0) { 1226 vdev_free(*vdp); 1227 *vdp = NULL; 1228 return (error); 1229 } 1230 } 1231 1232 ASSERT(*vdp != NULL); 1233 1234 return (0); 1235} 1236 1237/* 1238 * Opposite of spa_load(). 1239 */ 1240static void 1241spa_unload(spa_t *spa) 1242{ 1243 int i; 1244 1245 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1246 1247 /* 1248 * Stop TRIM thread. 1249 */ 1250 trim_thread_destroy(spa); 1251 1252 /* 1253 * Stop async tasks. 1254 */ 1255 spa_async_suspend(spa); 1256 1257 /* 1258 * Stop syncing. 1259 */ 1260 if (spa->spa_sync_on) { 1261 txg_sync_stop(spa->spa_dsl_pool); 1262 spa->spa_sync_on = B_FALSE; 1263 } 1264 1265 /* 1266 * Wait for any outstanding async I/O to complete. 1267 */ 1268 if (spa->spa_async_zio_root != NULL) { 1269 (void) zio_wait(spa->spa_async_zio_root); 1270 spa->spa_async_zio_root = NULL; 1271 } 1272 1273 bpobj_close(&spa->spa_deferred_bpobj); 1274 1275 /* 1276 * Close the dsl pool. 1277 */ 1278 if (spa->spa_dsl_pool) { 1279 dsl_pool_close(spa->spa_dsl_pool); 1280 spa->spa_dsl_pool = NULL; 1281 spa->spa_meta_objset = NULL; 1282 } 1283 1284 ddt_unload(spa); 1285 1286 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1287 1288 /* 1289 * Drop and purge level 2 cache 1290 */ 1291 spa_l2cache_drop(spa); 1292 1293 /* 1294 * Close all vdevs. 1295 */ 1296 if (spa->spa_root_vdev) 1297 vdev_free(spa->spa_root_vdev); 1298 ASSERT(spa->spa_root_vdev == NULL); 1299 1300 for (i = 0; i < spa->spa_spares.sav_count; i++) 1301 vdev_free(spa->spa_spares.sav_vdevs[i]); 1302 if (spa->spa_spares.sav_vdevs) { 1303 kmem_free(spa->spa_spares.sav_vdevs, 1304 spa->spa_spares.sav_count * sizeof (void *)); 1305 spa->spa_spares.sav_vdevs = NULL; 1306 } 1307 if (spa->spa_spares.sav_config) { 1308 nvlist_free(spa->spa_spares.sav_config); 1309 spa->spa_spares.sav_config = NULL; 1310 } 1311 spa->spa_spares.sav_count = 0; 1312 1313 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1314 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1315 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1316 } 1317 if (spa->spa_l2cache.sav_vdevs) { 1318 kmem_free(spa->spa_l2cache.sav_vdevs, 1319 spa->spa_l2cache.sav_count * sizeof (void *)); 1320 spa->spa_l2cache.sav_vdevs = NULL; 1321 } 1322 if (spa->spa_l2cache.sav_config) { 1323 nvlist_free(spa->spa_l2cache.sav_config); 1324 spa->spa_l2cache.sav_config = NULL; 1325 } 1326 spa->spa_l2cache.sav_count = 0; 1327 1328 spa->spa_async_suspended = 0; 1329 1330 if (spa->spa_comment != NULL) { 1331 spa_strfree(spa->spa_comment); 1332 spa->spa_comment = NULL; 1333 } 1334 1335 spa_config_exit(spa, SCL_ALL, FTAG); 1336} 1337 1338/* 1339 * Load (or re-load) the current list of vdevs describing the active spares for 1340 * this pool. When this is called, we have some form of basic information in 1341 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1342 * then re-generate a more complete list including status information. 1343 */ 1344static void 1345spa_load_spares(spa_t *spa) 1346{ 1347 nvlist_t **spares; 1348 uint_t nspares; 1349 int i; 1350 vdev_t *vd, *tvd; 1351 1352 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1353 1354 /* 1355 * First, close and free any existing spare vdevs. 1356 */ 1357 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1358 vd = spa->spa_spares.sav_vdevs[i]; 1359 1360 /* Undo the call to spa_activate() below */ 1361 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1362 B_FALSE)) != NULL && tvd->vdev_isspare) 1363 spa_spare_remove(tvd); 1364 vdev_close(vd); 1365 vdev_free(vd); 1366 } 1367 1368 if (spa->spa_spares.sav_vdevs) 1369 kmem_free(spa->spa_spares.sav_vdevs, 1370 spa->spa_spares.sav_count * sizeof (void *)); 1371 1372 if (spa->spa_spares.sav_config == NULL) 1373 nspares = 0; 1374 else 1375 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1376 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1377 1378 spa->spa_spares.sav_count = (int)nspares; 1379 spa->spa_spares.sav_vdevs = NULL; 1380 1381 if (nspares == 0) 1382 return; 1383 1384 /* 1385 * Construct the array of vdevs, opening them to get status in the 1386 * process. For each spare, there is potentially two different vdev_t 1387 * structures associated with it: one in the list of spares (used only 1388 * for basic validation purposes) and one in the active vdev 1389 * configuration (if it's spared in). During this phase we open and 1390 * validate each vdev on the spare list. If the vdev also exists in the 1391 * active configuration, then we also mark this vdev as an active spare. 1392 */ 1393 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1394 KM_SLEEP); 1395 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1396 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1397 VDEV_ALLOC_SPARE) == 0); 1398 ASSERT(vd != NULL); 1399 1400 spa->spa_spares.sav_vdevs[i] = vd; 1401 1402 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1403 B_FALSE)) != NULL) { 1404 if (!tvd->vdev_isspare) 1405 spa_spare_add(tvd); 1406 1407 /* 1408 * We only mark the spare active if we were successfully 1409 * able to load the vdev. Otherwise, importing a pool 1410 * with a bad active spare would result in strange 1411 * behavior, because multiple pool would think the spare 1412 * is actively in use. 1413 * 1414 * There is a vulnerability here to an equally bizarre 1415 * circumstance, where a dead active spare is later 1416 * brought back to life (onlined or otherwise). Given 1417 * the rarity of this scenario, and the extra complexity 1418 * it adds, we ignore the possibility. 1419 */ 1420 if (!vdev_is_dead(tvd)) 1421 spa_spare_activate(tvd); 1422 } 1423 1424 vd->vdev_top = vd; 1425 vd->vdev_aux = &spa->spa_spares; 1426 1427 if (vdev_open(vd) != 0) 1428 continue; 1429 1430 if (vdev_validate_aux(vd) == 0) 1431 spa_spare_add(vd); 1432 } 1433 1434 /* 1435 * Recompute the stashed list of spares, with status information 1436 * this time. 1437 */ 1438 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1439 DATA_TYPE_NVLIST_ARRAY) == 0); 1440 1441 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1442 KM_SLEEP); 1443 for (i = 0; i < spa->spa_spares.sav_count; i++) 1444 spares[i] = vdev_config_generate(spa, 1445 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1446 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1447 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1448 for (i = 0; i < spa->spa_spares.sav_count; i++) 1449 nvlist_free(spares[i]); 1450 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1451} 1452 1453/* 1454 * Load (or re-load) the current list of vdevs describing the active l2cache for 1455 * this pool. When this is called, we have some form of basic information in 1456 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1457 * then re-generate a more complete list including status information. 1458 * Devices which are already active have their details maintained, and are 1459 * not re-opened. 1460 */ 1461static void 1462spa_load_l2cache(spa_t *spa) 1463{ 1464 nvlist_t **l2cache; 1465 uint_t nl2cache; 1466 int i, j, oldnvdevs; 1467 uint64_t guid; 1468 vdev_t *vd, **oldvdevs, **newvdevs; 1469 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1470 1471 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1472 1473 if (sav->sav_config != NULL) { 1474 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1475 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1476 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1477 } else { 1478 nl2cache = 0; 1479 newvdevs = NULL; 1480 } 1481 1482 oldvdevs = sav->sav_vdevs; 1483 oldnvdevs = sav->sav_count; 1484 sav->sav_vdevs = NULL; 1485 sav->sav_count = 0; 1486 1487 /* 1488 * Process new nvlist of vdevs. 1489 */ 1490 for (i = 0; i < nl2cache; i++) { 1491 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1492 &guid) == 0); 1493 1494 newvdevs[i] = NULL; 1495 for (j = 0; j < oldnvdevs; j++) { 1496 vd = oldvdevs[j]; 1497 if (vd != NULL && guid == vd->vdev_guid) { 1498 /* 1499 * Retain previous vdev for add/remove ops. 1500 */ 1501 newvdevs[i] = vd; 1502 oldvdevs[j] = NULL; 1503 break; 1504 } 1505 } 1506 1507 if (newvdevs[i] == NULL) { 1508 /* 1509 * Create new vdev 1510 */ 1511 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1512 VDEV_ALLOC_L2CACHE) == 0); 1513 ASSERT(vd != NULL); 1514 newvdevs[i] = vd; 1515 1516 /* 1517 * Commit this vdev as an l2cache device, 1518 * even if it fails to open. 1519 */ 1520 spa_l2cache_add(vd); 1521 1522 vd->vdev_top = vd; 1523 vd->vdev_aux = sav; 1524 1525 spa_l2cache_activate(vd); 1526 1527 if (vdev_open(vd) != 0) 1528 continue; 1529 1530 (void) vdev_validate_aux(vd); 1531 1532 if (!vdev_is_dead(vd)) 1533 l2arc_add_vdev(spa, vd); 1534 } 1535 } 1536 1537 /* 1538 * Purge vdevs that were dropped 1539 */ 1540 for (i = 0; i < oldnvdevs; i++) { 1541 uint64_t pool; 1542 1543 vd = oldvdevs[i]; 1544 if (vd != NULL) { 1545 ASSERT(vd->vdev_isl2cache); 1546 1547 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1548 pool != 0ULL && l2arc_vdev_present(vd)) 1549 l2arc_remove_vdev(vd); 1550 vdev_clear_stats(vd); 1551 vdev_free(vd); 1552 } 1553 } 1554 1555 if (oldvdevs) 1556 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1557 1558 if (sav->sav_config == NULL) 1559 goto out; 1560 1561 sav->sav_vdevs = newvdevs; 1562 sav->sav_count = (int)nl2cache; 1563 1564 /* 1565 * Recompute the stashed list of l2cache devices, with status 1566 * information this time. 1567 */ 1568 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1569 DATA_TYPE_NVLIST_ARRAY) == 0); 1570 1571 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1572 for (i = 0; i < sav->sav_count; i++) 1573 l2cache[i] = vdev_config_generate(spa, 1574 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1575 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1576 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1577out: 1578 for (i = 0; i < sav->sav_count; i++) 1579 nvlist_free(l2cache[i]); 1580 if (sav->sav_count) 1581 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1582} 1583 1584static int 1585load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1586{ 1587 dmu_buf_t *db; 1588 char *packed = NULL; 1589 size_t nvsize = 0; 1590 int error; 1591 *value = NULL; 1592 1593 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 1594 nvsize = *(uint64_t *)db->db_data; 1595 dmu_buf_rele(db, FTAG); 1596 1597 packed = kmem_alloc(nvsize, KM_SLEEP); 1598 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1599 DMU_READ_PREFETCH); 1600 if (error == 0) 1601 error = nvlist_unpack(packed, nvsize, value, 0); 1602 kmem_free(packed, nvsize); 1603 1604 return (error); 1605} 1606 1607/* 1608 * Checks to see if the given vdev could not be opened, in which case we post a 1609 * sysevent to notify the autoreplace code that the device has been removed. 1610 */ 1611static void 1612spa_check_removed(vdev_t *vd) 1613{ 1614 for (int c = 0; c < vd->vdev_children; c++) 1615 spa_check_removed(vd->vdev_child[c]); 1616 1617 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1618 !vd->vdev_ishole) { 1619 zfs_post_autoreplace(vd->vdev_spa, vd); 1620 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1621 } 1622} 1623 1624/* 1625 * Validate the current config against the MOS config 1626 */ 1627static boolean_t 1628spa_config_valid(spa_t *spa, nvlist_t *config) 1629{ 1630 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 1631 nvlist_t *nv; 1632 1633 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 1634 1635 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1636 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 1637 1638 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1639 1640 /* 1641 * If we're doing a normal import, then build up any additional 1642 * diagnostic information about missing devices in this config. 1643 * We'll pass this up to the user for further processing. 1644 */ 1645 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1646 nvlist_t **child, *nv; 1647 uint64_t idx = 0; 1648 1649 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1650 KM_SLEEP); 1651 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1652 1653 for (int c = 0; c < rvd->vdev_children; c++) { 1654 vdev_t *tvd = rvd->vdev_child[c]; 1655 vdev_t *mtvd = mrvd->vdev_child[c]; 1656 1657 if (tvd->vdev_ops == &vdev_missing_ops && 1658 mtvd->vdev_ops != &vdev_missing_ops && 1659 mtvd->vdev_islog) 1660 child[idx++] = vdev_config_generate(spa, mtvd, 1661 B_FALSE, 0); 1662 } 1663 1664 if (idx) { 1665 VERIFY(nvlist_add_nvlist_array(nv, 1666 ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 1667 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 1668 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 1669 1670 for (int i = 0; i < idx; i++) 1671 nvlist_free(child[i]); 1672 } 1673 nvlist_free(nv); 1674 kmem_free(child, rvd->vdev_children * sizeof (char **)); 1675 } 1676 1677 /* 1678 * Compare the root vdev tree with the information we have 1679 * from the MOS config (mrvd). Check each top-level vdev 1680 * with the corresponding MOS config top-level (mtvd). 1681 */ 1682 for (int c = 0; c < rvd->vdev_children; c++) { 1683 vdev_t *tvd = rvd->vdev_child[c]; 1684 vdev_t *mtvd = mrvd->vdev_child[c]; 1685 1686 /* 1687 * Resolve any "missing" vdevs in the current configuration. 1688 * If we find that the MOS config has more accurate information 1689 * about the top-level vdev then use that vdev instead. 1690 */ 1691 if (tvd->vdev_ops == &vdev_missing_ops && 1692 mtvd->vdev_ops != &vdev_missing_ops) { 1693 1694 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 1695 continue; 1696 1697 /* 1698 * Device specific actions. 1699 */ 1700 if (mtvd->vdev_islog) { 1701 spa_set_log_state(spa, SPA_LOG_CLEAR); 1702 } else { 1703 /* 1704 * XXX - once we have 'readonly' pool 1705 * support we should be able to handle 1706 * missing data devices by transitioning 1707 * the pool to readonly. 1708 */ 1709 continue; 1710 } 1711 1712 /* 1713 * Swap the missing vdev with the data we were 1714 * able to obtain from the MOS config. 1715 */ 1716 vdev_remove_child(rvd, tvd); 1717 vdev_remove_child(mrvd, mtvd); 1718 1719 vdev_add_child(rvd, mtvd); 1720 vdev_add_child(mrvd, tvd); 1721 1722 spa_config_exit(spa, SCL_ALL, FTAG); 1723 vdev_load(mtvd); 1724 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1725 1726 vdev_reopen(rvd); 1727 } else if (mtvd->vdev_islog) { 1728 /* 1729 * Load the slog device's state from the MOS config 1730 * since it's possible that the label does not 1731 * contain the most up-to-date information. 1732 */ 1733 vdev_load_log_state(tvd, mtvd); 1734 vdev_reopen(tvd); 1735 } 1736 } 1737 vdev_free(mrvd); 1738 spa_config_exit(spa, SCL_ALL, FTAG); 1739 1740 /* 1741 * Ensure we were able to validate the config. 1742 */ 1743 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1744} 1745 1746/* 1747 * Check for missing log devices 1748 */ 1749static boolean_t 1750spa_check_logs(spa_t *spa) 1751{ 1752 boolean_t rv = B_FALSE; 1753 1754 switch (spa->spa_log_state) { 1755 case SPA_LOG_MISSING: 1756 /* need to recheck in case slog has been restored */ 1757 case SPA_LOG_UNKNOWN: 1758 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain, 1759 NULL, DS_FIND_CHILDREN) != 0); 1760 if (rv) 1761 spa_set_log_state(spa, SPA_LOG_MISSING); 1762 break; 1763 } 1764 return (rv); 1765} 1766 1767static boolean_t 1768spa_passivate_log(spa_t *spa) 1769{ 1770 vdev_t *rvd = spa->spa_root_vdev; 1771 boolean_t slog_found = B_FALSE; 1772 1773 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1774 1775 if (!spa_has_slogs(spa)) 1776 return (B_FALSE); 1777 1778 for (int c = 0; c < rvd->vdev_children; c++) { 1779 vdev_t *tvd = rvd->vdev_child[c]; 1780 metaslab_group_t *mg = tvd->vdev_mg; 1781 1782 if (tvd->vdev_islog) { 1783 metaslab_group_passivate(mg); 1784 slog_found = B_TRUE; 1785 } 1786 } 1787 1788 return (slog_found); 1789} 1790 1791static void 1792spa_activate_log(spa_t *spa) 1793{ 1794 vdev_t *rvd = spa->spa_root_vdev; 1795 1796 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1797 1798 for (int c = 0; c < rvd->vdev_children; c++) { 1799 vdev_t *tvd = rvd->vdev_child[c]; 1800 metaslab_group_t *mg = tvd->vdev_mg; 1801 1802 if (tvd->vdev_islog) 1803 metaslab_group_activate(mg); 1804 } 1805} 1806 1807int 1808spa_offline_log(spa_t *spa) 1809{ 1810 int error; 1811 1812 error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1813 NULL, DS_FIND_CHILDREN); 1814 if (error == 0) { 1815 /* 1816 * We successfully offlined the log device, sync out the 1817 * current txg so that the "stubby" block can be removed 1818 * by zil_sync(). 1819 */ 1820 txg_wait_synced(spa->spa_dsl_pool, 0); 1821 } 1822 return (error); 1823} 1824 1825static void 1826spa_aux_check_removed(spa_aux_vdev_t *sav) 1827{ 1828 int i; 1829 1830 for (i = 0; i < sav->sav_count; i++) 1831 spa_check_removed(sav->sav_vdevs[i]); 1832} 1833 1834void 1835spa_claim_notify(zio_t *zio) 1836{ 1837 spa_t *spa = zio->io_spa; 1838 1839 if (zio->io_error) 1840 return; 1841 1842 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1843 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1844 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1845 mutex_exit(&spa->spa_props_lock); 1846} 1847 1848typedef struct spa_load_error { 1849 uint64_t sle_meta_count; 1850 uint64_t sle_data_count; 1851} spa_load_error_t; 1852 1853static void 1854spa_load_verify_done(zio_t *zio) 1855{ 1856 blkptr_t *bp = zio->io_bp; 1857 spa_load_error_t *sle = zio->io_private; 1858 dmu_object_type_t type = BP_GET_TYPE(bp); 1859 int error = zio->io_error; 1860 1861 if (error) { 1862 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1863 type != DMU_OT_INTENT_LOG) 1864 atomic_add_64(&sle->sle_meta_count, 1); 1865 else 1866 atomic_add_64(&sle->sle_data_count, 1); 1867 } 1868 zio_data_buf_free(zio->io_data, zio->io_size); 1869} 1870 1871/*ARGSUSED*/ 1872static int 1873spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1874 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 1875{ 1876 if (bp != NULL) { 1877 zio_t *rio = arg; 1878 size_t size = BP_GET_PSIZE(bp); 1879 void *data = zio_data_buf_alloc(size); 1880 1881 zio_nowait(zio_read(rio, spa, bp, data, size, 1882 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 1883 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 1884 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 1885 } 1886 return (0); 1887} 1888 1889static int 1890spa_load_verify(spa_t *spa) 1891{ 1892 zio_t *rio; 1893 spa_load_error_t sle = { 0 }; 1894 zpool_rewind_policy_t policy; 1895 boolean_t verify_ok = B_FALSE; 1896 int error; 1897 1898 zpool_get_rewind_policy(spa->spa_config, &policy); 1899 1900 if (policy.zrp_request & ZPOOL_NEVER_REWIND) 1901 return (0); 1902 1903 rio = zio_root(spa, NULL, &sle, 1904 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1905 1906 error = traverse_pool(spa, spa->spa_verify_min_txg, 1907 TRAVERSE_PRE | TRAVERSE_PREFETCH, spa_load_verify_cb, rio); 1908 1909 (void) zio_wait(rio); 1910 1911 spa->spa_load_meta_errors = sle.sle_meta_count; 1912 spa->spa_load_data_errors = sle.sle_data_count; 1913 1914 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 1915 sle.sle_data_count <= policy.zrp_maxdata) { 1916 int64_t loss = 0; 1917 1918 verify_ok = B_TRUE; 1919 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 1920 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 1921 1922 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 1923 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1924 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 1925 VERIFY(nvlist_add_int64(spa->spa_load_info, 1926 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 1927 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1928 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 1929 } else { 1930 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 1931 } 1932 1933 if (error) { 1934 if (error != ENXIO && error != EIO) 1935 error = SET_ERROR(EIO); 1936 return (error); 1937 } 1938 1939 return (verify_ok ? 0 : EIO); 1940} 1941 1942/* 1943 * Find a value in the pool props object. 1944 */ 1945static void 1946spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 1947{ 1948 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 1949 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 1950} 1951 1952/* 1953 * Find a value in the pool directory object. 1954 */ 1955static int 1956spa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 1957{ 1958 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 1959 name, sizeof (uint64_t), 1, val)); 1960} 1961 1962static int 1963spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 1964{ 1965 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 1966 return (err); 1967} 1968 1969/* 1970 * Fix up config after a partly-completed split. This is done with the 1971 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 1972 * pool have that entry in their config, but only the splitting one contains 1973 * a list of all the guids of the vdevs that are being split off. 1974 * 1975 * This function determines what to do with that list: either rejoin 1976 * all the disks to the pool, or complete the splitting process. To attempt 1977 * the rejoin, each disk that is offlined is marked online again, and 1978 * we do a reopen() call. If the vdev label for every disk that was 1979 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 1980 * then we call vdev_split() on each disk, and complete the split. 1981 * 1982 * Otherwise we leave the config alone, with all the vdevs in place in 1983 * the original pool. 1984 */ 1985static void 1986spa_try_repair(spa_t *spa, nvlist_t *config) 1987{ 1988 uint_t extracted; 1989 uint64_t *glist; 1990 uint_t i, gcount; 1991 nvlist_t *nvl; 1992 vdev_t **vd; 1993 boolean_t attempt_reopen; 1994 1995 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 1996 return; 1997 1998 /* check that the config is complete */ 1999 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2000 &glist, &gcount) != 0) 2001 return; 2002 2003 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2004 2005 /* attempt to online all the vdevs & validate */ 2006 attempt_reopen = B_TRUE; 2007 for (i = 0; i < gcount; i++) { 2008 if (glist[i] == 0) /* vdev is hole */ 2009 continue; 2010 2011 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2012 if (vd[i] == NULL) { 2013 /* 2014 * Don't bother attempting to reopen the disks; 2015 * just do the split. 2016 */ 2017 attempt_reopen = B_FALSE; 2018 } else { 2019 /* attempt to re-online it */ 2020 vd[i]->vdev_offline = B_FALSE; 2021 } 2022 } 2023 2024 if (attempt_reopen) { 2025 vdev_reopen(spa->spa_root_vdev); 2026 2027 /* check each device to see what state it's in */ 2028 for (extracted = 0, i = 0; i < gcount; i++) { 2029 if (vd[i] != NULL && 2030 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2031 break; 2032 ++extracted; 2033 } 2034 } 2035 2036 /* 2037 * If every disk has been moved to the new pool, or if we never 2038 * even attempted to look at them, then we split them off for 2039 * good. 2040 */ 2041 if (!attempt_reopen || gcount == extracted) { 2042 for (i = 0; i < gcount; i++) 2043 if (vd[i] != NULL) 2044 vdev_split(vd[i]); 2045 vdev_reopen(spa->spa_root_vdev); 2046 } 2047 2048 kmem_free(vd, gcount * sizeof (vdev_t *)); 2049} 2050 2051static int 2052spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 2053 boolean_t mosconfig) 2054{ 2055 nvlist_t *config = spa->spa_config; 2056 char *ereport = FM_EREPORT_ZFS_POOL; 2057 char *comment; 2058 int error; 2059 uint64_t pool_guid; 2060 nvlist_t *nvl; 2061 2062 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2063 return (SET_ERROR(EINVAL)); 2064 2065 ASSERT(spa->spa_comment == NULL); 2066 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2067 spa->spa_comment = spa_strdup(comment); 2068 2069 /* 2070 * Versioning wasn't explicitly added to the label until later, so if 2071 * it's not present treat it as the initial version. 2072 */ 2073 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2074 &spa->spa_ubsync.ub_version) != 0) 2075 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2076 2077 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2078 &spa->spa_config_txg); 2079 2080 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 2081 spa_guid_exists(pool_guid, 0)) { 2082 error = SET_ERROR(EEXIST); 2083 } else { 2084 spa->spa_config_guid = pool_guid; 2085 2086 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 2087 &nvl) == 0) { 2088 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 2089 KM_SLEEP) == 0); 2090 } 2091 2092 nvlist_free(spa->spa_load_info); 2093 spa->spa_load_info = fnvlist_alloc(); 2094 2095 gethrestime(&spa->spa_loaded_ts); 2096 error = spa_load_impl(spa, pool_guid, config, state, type, 2097 mosconfig, &ereport); 2098 } 2099 2100 spa->spa_minref = refcount_count(&spa->spa_refcount); 2101 if (error) { 2102 if (error != EEXIST) { 2103 spa->spa_loaded_ts.tv_sec = 0; 2104 spa->spa_loaded_ts.tv_nsec = 0; 2105 } 2106 if (error != EBADF) { 2107 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2108 } 2109 } 2110 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2111 spa->spa_ena = 0; 2112 2113 return (error); 2114} 2115 2116/* 2117 * Load an existing storage pool, using the pool's builtin spa_config as a 2118 * source of configuration information. 2119 */ 2120static int 2121spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 2122 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 2123 char **ereport) 2124{ 2125 int error = 0; 2126 nvlist_t *nvroot = NULL; 2127 nvlist_t *label; 2128 vdev_t *rvd; 2129 uberblock_t *ub = &spa->spa_uberblock; 2130 uint64_t children, config_cache_txg = spa->spa_config_txg; 2131 int orig_mode = spa->spa_mode; 2132 int parse; 2133 uint64_t obj; 2134 boolean_t missing_feat_write = B_FALSE; 2135 2136 /* 2137 * If this is an untrusted config, access the pool in read-only mode. 2138 * This prevents things like resilvering recently removed devices. 2139 */ 2140 if (!mosconfig) 2141 spa->spa_mode = FREAD; 2142 2143 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2144 2145 spa->spa_load_state = state; 2146 2147 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2148 return (SET_ERROR(EINVAL)); 2149 2150 parse = (type == SPA_IMPORT_EXISTING ? 2151 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2152 2153 /* 2154 * Create "The Godfather" zio to hold all async IOs 2155 */ 2156 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 2157 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 2158 2159 /* 2160 * Parse the configuration into a vdev tree. We explicitly set the 2161 * value that will be returned by spa_version() since parsing the 2162 * configuration requires knowing the version number. 2163 */ 2164 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2165 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2166 spa_config_exit(spa, SCL_ALL, FTAG); 2167 2168 if (error != 0) 2169 return (error); 2170 2171 ASSERT(spa->spa_root_vdev == rvd); 2172 2173 if (type != SPA_IMPORT_ASSEMBLE) { 2174 ASSERT(spa_guid(spa) == pool_guid); 2175 } 2176 2177 /* 2178 * Try to open all vdevs, loading each label in the process. 2179 */ 2180 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2181 error = vdev_open(rvd); 2182 spa_config_exit(spa, SCL_ALL, FTAG); 2183 if (error != 0) 2184 return (error); 2185 2186 /* 2187 * We need to validate the vdev labels against the configuration that 2188 * we have in hand, which is dependent on the setting of mosconfig. If 2189 * mosconfig is true then we're validating the vdev labels based on 2190 * that config. Otherwise, we're validating against the cached config 2191 * (zpool.cache) that was read when we loaded the zfs module, and then 2192 * later we will recursively call spa_load() and validate against 2193 * the vdev config. 2194 * 2195 * If we're assembling a new pool that's been split off from an 2196 * existing pool, the labels haven't yet been updated so we skip 2197 * validation for now. 2198 */ 2199 if (type != SPA_IMPORT_ASSEMBLE) { 2200 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2201 error = vdev_validate(rvd, mosconfig); 2202 spa_config_exit(spa, SCL_ALL, FTAG); 2203 2204 if (error != 0) 2205 return (error); 2206 2207 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2208 return (SET_ERROR(ENXIO)); 2209 } 2210 2211 /* 2212 * Find the best uberblock. 2213 */ 2214 vdev_uberblock_load(rvd, ub, &label); 2215 2216 /* 2217 * If we weren't able to find a single valid uberblock, return failure. 2218 */ 2219 if (ub->ub_txg == 0) { 2220 nvlist_free(label); 2221 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2222 } 2223 2224 /* 2225 * If the pool has an unsupported version we can't open it. 2226 */ 2227 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2228 nvlist_free(label); 2229 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2230 } 2231 2232 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2233 nvlist_t *features; 2234 2235 /* 2236 * If we weren't able to find what's necessary for reading the 2237 * MOS in the label, return failure. 2238 */ 2239 if (label == NULL || nvlist_lookup_nvlist(label, 2240 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2241 nvlist_free(label); 2242 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2243 ENXIO)); 2244 } 2245 2246 /* 2247 * Update our in-core representation with the definitive values 2248 * from the label. 2249 */ 2250 nvlist_free(spa->spa_label_features); 2251 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2252 } 2253 2254 nvlist_free(label); 2255 2256 /* 2257 * Look through entries in the label nvlist's features_for_read. If 2258 * there is a feature listed there which we don't understand then we 2259 * cannot open a pool. 2260 */ 2261 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2262 nvlist_t *unsup_feat; 2263 2264 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2265 0); 2266 2267 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2268 NULL); nvp != NULL; 2269 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2270 if (!zfeature_is_supported(nvpair_name(nvp))) { 2271 VERIFY(nvlist_add_string(unsup_feat, 2272 nvpair_name(nvp), "") == 0); 2273 } 2274 } 2275 2276 if (!nvlist_empty(unsup_feat)) { 2277 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2278 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2279 nvlist_free(unsup_feat); 2280 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2281 ENOTSUP)); 2282 } 2283 2284 nvlist_free(unsup_feat); 2285 } 2286 2287 /* 2288 * If the vdev guid sum doesn't match the uberblock, we have an 2289 * incomplete configuration. We first check to see if the pool 2290 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 2291 * If it is, defer the vdev_guid_sum check till later so we 2292 * can handle missing vdevs. 2293 */ 2294 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 2295 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 2296 rvd->vdev_guid_sum != ub->ub_guid_sum) 2297 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2298 2299 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2300 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2301 spa_try_repair(spa, config); 2302 spa_config_exit(spa, SCL_ALL, FTAG); 2303 nvlist_free(spa->spa_config_splitting); 2304 spa->spa_config_splitting = NULL; 2305 } 2306 2307 /* 2308 * Initialize internal SPA structures. 2309 */ 2310 spa->spa_state = POOL_STATE_ACTIVE; 2311 spa->spa_ubsync = spa->spa_uberblock; 2312 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2313 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2314 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2315 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2316 spa->spa_claim_max_txg = spa->spa_first_txg; 2317 spa->spa_prev_software_version = ub->ub_software_version; 2318 2319 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2320 if (error) 2321 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2322 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2323 2324 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 2325 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2326 2327 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2328 boolean_t missing_feat_read = B_FALSE; 2329 nvlist_t *unsup_feat, *enabled_feat; 2330 2331 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2332 &spa->spa_feat_for_read_obj) != 0) { 2333 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2334 } 2335 2336 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2337 &spa->spa_feat_for_write_obj) != 0) { 2338 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2339 } 2340 2341 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2342 &spa->spa_feat_desc_obj) != 0) { 2343 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2344 } 2345 2346 enabled_feat = fnvlist_alloc(); 2347 unsup_feat = fnvlist_alloc(); 2348 2349 if (!feature_is_supported(spa->spa_meta_objset, 2350 spa->spa_feat_for_read_obj, spa->spa_feat_desc_obj, 2351 unsup_feat, enabled_feat)) 2352 missing_feat_read = B_TRUE; 2353 2354 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 2355 if (!feature_is_supported(spa->spa_meta_objset, 2356 spa->spa_feat_for_write_obj, spa->spa_feat_desc_obj, 2357 unsup_feat, enabled_feat)) { 2358 missing_feat_write = B_TRUE; 2359 } 2360 } 2361 2362 fnvlist_add_nvlist(spa->spa_load_info, 2363 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 2364 2365 if (!nvlist_empty(unsup_feat)) { 2366 fnvlist_add_nvlist(spa->spa_load_info, 2367 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 2368 } 2369 2370 fnvlist_free(enabled_feat); 2371 fnvlist_free(unsup_feat); 2372 2373 if (!missing_feat_read) { 2374 fnvlist_add_boolean(spa->spa_load_info, 2375 ZPOOL_CONFIG_CAN_RDONLY); 2376 } 2377 2378 /* 2379 * If the state is SPA_LOAD_TRYIMPORT, our objective is 2380 * twofold: to determine whether the pool is available for 2381 * import in read-write mode and (if it is not) whether the 2382 * pool is available for import in read-only mode. If the pool 2383 * is available for import in read-write mode, it is displayed 2384 * as available in userland; if it is not available for import 2385 * in read-only mode, it is displayed as unavailable in 2386 * userland. If the pool is available for import in read-only 2387 * mode but not read-write mode, it is displayed as unavailable 2388 * in userland with a special note that the pool is actually 2389 * available for open in read-only mode. 2390 * 2391 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2392 * missing a feature for write, we must first determine whether 2393 * the pool can be opened read-only before returning to 2394 * userland in order to know whether to display the 2395 * abovementioned note. 2396 */ 2397 if (missing_feat_read || (missing_feat_write && 2398 spa_writeable(spa))) { 2399 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2400 ENOTSUP)); 2401 } 2402 } 2403 2404 spa->spa_is_initializing = B_TRUE; 2405 error = dsl_pool_open(spa->spa_dsl_pool); 2406 spa->spa_is_initializing = B_FALSE; 2407 if (error != 0) 2408 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2409 2410 if (!mosconfig) { 2411 uint64_t hostid; 2412 nvlist_t *policy = NULL, *nvconfig; 2413 2414 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2415 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2416 2417 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 2418 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2419 char *hostname; 2420 unsigned long myhostid = 0; 2421 2422 VERIFY(nvlist_lookup_string(nvconfig, 2423 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 2424 2425#ifdef _KERNEL 2426 myhostid = zone_get_hostid(NULL); 2427#else /* _KERNEL */ 2428 /* 2429 * We're emulating the system's hostid in userland, so 2430 * we can't use zone_get_hostid(). 2431 */ 2432 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2433#endif /* _KERNEL */ 2434 if (check_hostid && hostid != 0 && myhostid != 0 && 2435 hostid != myhostid) { 2436 nvlist_free(nvconfig); 2437 cmn_err(CE_WARN, "pool '%s' could not be " 2438 "loaded as it was last accessed by " 2439 "another system (host: %s hostid: 0x%lx). " 2440 "See: http://illumos.org/msg/ZFS-8000-EY", 2441 spa_name(spa), hostname, 2442 (unsigned long)hostid); 2443 return (SET_ERROR(EBADF)); 2444 } 2445 } 2446 if (nvlist_lookup_nvlist(spa->spa_config, 2447 ZPOOL_REWIND_POLICY, &policy) == 0) 2448 VERIFY(nvlist_add_nvlist(nvconfig, 2449 ZPOOL_REWIND_POLICY, policy) == 0); 2450 2451 spa_config_set(spa, nvconfig); 2452 spa_unload(spa); 2453 spa_deactivate(spa); 2454 spa_activate(spa, orig_mode); 2455 2456 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2457 } 2458 2459 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2460 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2461 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2462 if (error != 0) 2463 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2464 2465 /* 2466 * Load the bit that tells us to use the new accounting function 2467 * (raid-z deflation). If we have an older pool, this will not 2468 * be present. 2469 */ 2470 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 2471 if (error != 0 && error != ENOENT) 2472 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2473 2474 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 2475 &spa->spa_creation_version); 2476 if (error != 0 && error != ENOENT) 2477 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2478 2479 /* 2480 * Load the persistent error log. If we have an older pool, this will 2481 * not be present. 2482 */ 2483 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 2484 if (error != 0 && error != ENOENT) 2485 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2486 2487 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 2488 &spa->spa_errlog_scrub); 2489 if (error != 0 && error != ENOENT) 2490 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2491 2492 /* 2493 * Load the history object. If we have an older pool, this 2494 * will not be present. 2495 */ 2496 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 2497 if (error != 0 && error != ENOENT) 2498 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2499 2500 /* 2501 * If we're assembling the pool from the split-off vdevs of 2502 * an existing pool, we don't want to attach the spares & cache 2503 * devices. 2504 */ 2505 2506 /* 2507 * Load any hot spares for this pool. 2508 */ 2509 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 2510 if (error != 0 && error != ENOENT) 2511 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2512 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2513 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2514 if (load_nvlist(spa, spa->spa_spares.sav_object, 2515 &spa->spa_spares.sav_config) != 0) 2516 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2517 2518 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2519 spa_load_spares(spa); 2520 spa_config_exit(spa, SCL_ALL, FTAG); 2521 } else if (error == 0) { 2522 spa->spa_spares.sav_sync = B_TRUE; 2523 } 2524 2525 /* 2526 * Load any level 2 ARC devices for this pool. 2527 */ 2528 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2529 &spa->spa_l2cache.sav_object); 2530 if (error != 0 && error != ENOENT) 2531 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2532 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2533 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2534 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 2535 &spa->spa_l2cache.sav_config) != 0) 2536 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2537 2538 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2539 spa_load_l2cache(spa); 2540 spa_config_exit(spa, SCL_ALL, FTAG); 2541 } else if (error == 0) { 2542 spa->spa_l2cache.sav_sync = B_TRUE; 2543 } 2544 2545 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2546 2547 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 2548 if (error && error != ENOENT) 2549 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2550 2551 if (error == 0) { 2552 uint64_t autoreplace; 2553 2554 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 2555 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 2556 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 2557 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 2558 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 2559 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 2560 &spa->spa_dedup_ditto); 2561 2562 spa->spa_autoreplace = (autoreplace != 0); 2563 } 2564 2565 /* 2566 * If the 'autoreplace' property is set, then post a resource notifying 2567 * the ZFS DE that it should not issue any faults for unopenable 2568 * devices. We also iterate over the vdevs, and post a sysevent for any 2569 * unopenable vdevs so that the normal autoreplace handler can take 2570 * over. 2571 */ 2572 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 2573 spa_check_removed(spa->spa_root_vdev); 2574 /* 2575 * For the import case, this is done in spa_import(), because 2576 * at this point we're using the spare definitions from 2577 * the MOS config, not necessarily from the userland config. 2578 */ 2579 if (state != SPA_LOAD_IMPORT) { 2580 spa_aux_check_removed(&spa->spa_spares); 2581 spa_aux_check_removed(&spa->spa_l2cache); 2582 } 2583 } 2584 2585 /* 2586 * Load the vdev state for all toplevel vdevs. 2587 */ 2588 vdev_load(rvd); 2589 2590 /* 2591 * Propagate the leaf DTLs we just loaded all the way up the tree. 2592 */ 2593 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2594 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2595 spa_config_exit(spa, SCL_ALL, FTAG); 2596 2597 /* 2598 * Load the DDTs (dedup tables). 2599 */ 2600 error = ddt_load(spa); 2601 if (error != 0) 2602 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2603 2604 spa_update_dspace(spa); 2605 2606 /* 2607 * Validate the config, using the MOS config to fill in any 2608 * information which might be missing. If we fail to validate 2609 * the config then declare the pool unfit for use. If we're 2610 * assembling a pool from a split, the log is not transferred 2611 * over. 2612 */ 2613 if (type != SPA_IMPORT_ASSEMBLE) { 2614 nvlist_t *nvconfig; 2615 2616 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2617 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2618 2619 if (!spa_config_valid(spa, nvconfig)) { 2620 nvlist_free(nvconfig); 2621 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2622 ENXIO)); 2623 } 2624 nvlist_free(nvconfig); 2625 2626 /* 2627 * Now that we've validated the config, check the state of the 2628 * root vdev. If it can't be opened, it indicates one or 2629 * more toplevel vdevs are faulted. 2630 */ 2631 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2632 return (SET_ERROR(ENXIO)); 2633 2634 if (spa_check_logs(spa)) { 2635 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 2636 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 2637 } 2638 } 2639 2640 if (missing_feat_write) { 2641 ASSERT(state == SPA_LOAD_TRYIMPORT); 2642 2643 /* 2644 * At this point, we know that we can open the pool in 2645 * read-only mode but not read-write mode. We now have enough 2646 * information and can return to userland. 2647 */ 2648 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2649 } 2650 2651 /* 2652 * We've successfully opened the pool, verify that we're ready 2653 * to start pushing transactions. 2654 */ 2655 if (state != SPA_LOAD_TRYIMPORT) { 2656 if (error = spa_load_verify(spa)) 2657 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2658 error)); 2659 } 2660 2661 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2662 spa->spa_load_max_txg == UINT64_MAX)) { 2663 dmu_tx_t *tx; 2664 int need_update = B_FALSE; 2665 2666 ASSERT(state != SPA_LOAD_TRYIMPORT); 2667 2668 /* 2669 * Claim log blocks that haven't been committed yet. 2670 * This must all happen in a single txg. 2671 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2672 * invoked from zil_claim_log_block()'s i/o done callback. 2673 * Price of rollback is that we abandon the log. 2674 */ 2675 spa->spa_claiming = B_TRUE; 2676 2677 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 2678 spa_first_txg(spa)); 2679 (void) dmu_objset_find(spa_name(spa), 2680 zil_claim, tx, DS_FIND_CHILDREN); 2681 dmu_tx_commit(tx); 2682 2683 spa->spa_claiming = B_FALSE; 2684 2685 spa_set_log_state(spa, SPA_LOG_GOOD); 2686 spa->spa_sync_on = B_TRUE; 2687 txg_sync_start(spa->spa_dsl_pool); 2688 2689 /* 2690 * Wait for all claims to sync. We sync up to the highest 2691 * claimed log block birth time so that claimed log blocks 2692 * don't appear to be from the future. spa_claim_max_txg 2693 * will have been set for us by either zil_check_log_chain() 2694 * (invoked from spa_check_logs()) or zil_claim() above. 2695 */ 2696 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 2697 2698 /* 2699 * If the config cache is stale, or we have uninitialized 2700 * metaslabs (see spa_vdev_add()), then update the config. 2701 * 2702 * If this is a verbatim import, trust the current 2703 * in-core spa_config and update the disk labels. 2704 */ 2705 if (config_cache_txg != spa->spa_config_txg || 2706 state == SPA_LOAD_IMPORT || 2707 state == SPA_LOAD_RECOVER || 2708 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 2709 need_update = B_TRUE; 2710 2711 for (int c = 0; c < rvd->vdev_children; c++) 2712 if (rvd->vdev_child[c]->vdev_ms_array == 0) 2713 need_update = B_TRUE; 2714 2715 /* 2716 * Update the config cache asychronously in case we're the 2717 * root pool, in which case the config cache isn't writable yet. 2718 */ 2719 if (need_update) 2720 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2721 2722 /* 2723 * Check all DTLs to see if anything needs resilvering. 2724 */ 2725 if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 2726 vdev_resilver_needed(rvd, NULL, NULL)) 2727 spa_async_request(spa, SPA_ASYNC_RESILVER); 2728 2729 /* 2730 * Log the fact that we booted up (so that we can detect if 2731 * we rebooted in the middle of an operation). 2732 */ 2733 spa_history_log_version(spa, "open"); 2734 2735 /* 2736 * Delete any inconsistent datasets. 2737 */ 2738 (void) dmu_objset_find(spa_name(spa), 2739 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 2740 2741 /* 2742 * Clean up any stale temporary dataset userrefs. 2743 */ 2744 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 2745 } 2746 2747 return (0); 2748} 2749 2750static int 2751spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 2752{ 2753 int mode = spa->spa_mode; 2754 2755 spa_unload(spa); 2756 spa_deactivate(spa); 2757 2758 spa->spa_load_max_txg--; 2759 2760 spa_activate(spa, mode); 2761 spa_async_suspend(spa); 2762 2763 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 2764} 2765 2766/* 2767 * If spa_load() fails this function will try loading prior txg's. If 2768 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 2769 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 2770 * function will not rewind the pool and will return the same error as 2771 * spa_load(). 2772 */ 2773static int 2774spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 2775 uint64_t max_request, int rewind_flags) 2776{ 2777 nvlist_t *loadinfo = NULL; 2778 nvlist_t *config = NULL; 2779 int load_error, rewind_error; 2780 uint64_t safe_rewind_txg; 2781 uint64_t min_txg; 2782 2783 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 2784 spa->spa_load_max_txg = spa->spa_load_txg; 2785 spa_set_log_state(spa, SPA_LOG_CLEAR); 2786 } else { 2787 spa->spa_load_max_txg = max_request; 2788 } 2789 2790 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 2791 mosconfig); 2792 if (load_error == 0) 2793 return (0); 2794 2795 if (spa->spa_root_vdev != NULL) 2796 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2797 2798 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 2799 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 2800 2801 if (rewind_flags & ZPOOL_NEVER_REWIND) { 2802 nvlist_free(config); 2803 return (load_error); 2804 } 2805 2806 if (state == SPA_LOAD_RECOVER) { 2807 /* Price of rolling back is discarding txgs, including log */ 2808 spa_set_log_state(spa, SPA_LOG_CLEAR); 2809 } else { 2810 /* 2811 * If we aren't rolling back save the load info from our first 2812 * import attempt so that we can restore it after attempting 2813 * to rewind. 2814 */ 2815 loadinfo = spa->spa_load_info; 2816 spa->spa_load_info = fnvlist_alloc(); 2817 } 2818 2819 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 2820 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 2821 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 2822 TXG_INITIAL : safe_rewind_txg; 2823 2824 /* 2825 * Continue as long as we're finding errors, we're still within 2826 * the acceptable rewind range, and we're still finding uberblocks 2827 */ 2828 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 2829 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 2830 if (spa->spa_load_max_txg < safe_rewind_txg) 2831 spa->spa_extreme_rewind = B_TRUE; 2832 rewind_error = spa_load_retry(spa, state, mosconfig); 2833 } 2834 2835 spa->spa_extreme_rewind = B_FALSE; 2836 spa->spa_load_max_txg = UINT64_MAX; 2837 2838 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 2839 spa_config_set(spa, config); 2840 2841 if (state == SPA_LOAD_RECOVER) { 2842 ASSERT3P(loadinfo, ==, NULL); 2843 return (rewind_error); 2844 } else { 2845 /* Store the rewind info as part of the initial load info */ 2846 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 2847 spa->spa_load_info); 2848 2849 /* Restore the initial load info */ 2850 fnvlist_free(spa->spa_load_info); 2851 spa->spa_load_info = loadinfo; 2852 2853 return (load_error); 2854 } 2855} 2856 2857/* 2858 * Pool Open/Import 2859 * 2860 * The import case is identical to an open except that the configuration is sent 2861 * down from userland, instead of grabbed from the configuration cache. For the 2862 * case of an open, the pool configuration will exist in the 2863 * POOL_STATE_UNINITIALIZED state. 2864 * 2865 * The stats information (gen/count/ustats) is used to gather vdev statistics at 2866 * the same time open the pool, without having to keep around the spa_t in some 2867 * ambiguous state. 2868 */ 2869static int 2870spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 2871 nvlist_t **config) 2872{ 2873 spa_t *spa; 2874 spa_load_state_t state = SPA_LOAD_OPEN; 2875 int error; 2876 int locked = B_FALSE; 2877 int firstopen = B_FALSE; 2878 2879 *spapp = NULL; 2880 2881 /* 2882 * As disgusting as this is, we need to support recursive calls to this 2883 * function because dsl_dir_open() is called during spa_load(), and ends 2884 * up calling spa_open() again. The real fix is to figure out how to 2885 * avoid dsl_dir_open() calling this in the first place. 2886 */ 2887 if (mutex_owner(&spa_namespace_lock) != curthread) { 2888 mutex_enter(&spa_namespace_lock); 2889 locked = B_TRUE; 2890 } 2891 2892 if ((spa = spa_lookup(pool)) == NULL) { 2893 if (locked) 2894 mutex_exit(&spa_namespace_lock); 2895 return (SET_ERROR(ENOENT)); 2896 } 2897 2898 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 2899 zpool_rewind_policy_t policy; 2900 2901 firstopen = B_TRUE; 2902 2903 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 2904 &policy); 2905 if (policy.zrp_request & ZPOOL_DO_REWIND) 2906 state = SPA_LOAD_RECOVER; 2907 2908 spa_activate(spa, spa_mode_global); 2909 2910 if (state != SPA_LOAD_RECOVER) 2911 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 2912 2913 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 2914 policy.zrp_request); 2915 2916 if (error == EBADF) { 2917 /* 2918 * If vdev_validate() returns failure (indicated by 2919 * EBADF), it indicates that one of the vdevs indicates 2920 * that the pool has been exported or destroyed. If 2921 * this is the case, the config cache is out of sync and 2922 * we should remove the pool from the namespace. 2923 */ 2924 spa_unload(spa); 2925 spa_deactivate(spa); 2926 spa_config_sync(spa, B_TRUE, B_TRUE); 2927 spa_remove(spa); 2928 if (locked) 2929 mutex_exit(&spa_namespace_lock); 2930 return (SET_ERROR(ENOENT)); 2931 } 2932 2933 if (error) { 2934 /* 2935 * We can't open the pool, but we still have useful 2936 * information: the state of each vdev after the 2937 * attempted vdev_open(). Return this to the user. 2938 */ 2939 if (config != NULL && spa->spa_config) { 2940 VERIFY(nvlist_dup(spa->spa_config, config, 2941 KM_SLEEP) == 0); 2942 VERIFY(nvlist_add_nvlist(*config, 2943 ZPOOL_CONFIG_LOAD_INFO, 2944 spa->spa_load_info) == 0); 2945 } 2946 spa_unload(spa); 2947 spa_deactivate(spa); 2948 spa->spa_last_open_failed = error; 2949 if (locked) 2950 mutex_exit(&spa_namespace_lock); 2951 *spapp = NULL; 2952 return (error); 2953 } 2954 } 2955 2956 spa_open_ref(spa, tag); 2957 2958 if (config != NULL) 2959 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2960 2961 /* 2962 * If we've recovered the pool, pass back any information we 2963 * gathered while doing the load. 2964 */ 2965 if (state == SPA_LOAD_RECOVER) { 2966 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 2967 spa->spa_load_info) == 0); 2968 } 2969 2970 if (locked) { 2971 spa->spa_last_open_failed = 0; 2972 spa->spa_last_ubsync_txg = 0; 2973 spa->spa_load_txg = 0; 2974 mutex_exit(&spa_namespace_lock); 2975#ifdef __FreeBSD__ 2976#ifdef _KERNEL 2977 if (firstopen) 2978 zvol_create_minors(spa->spa_name); 2979#endif 2980#endif 2981 } 2982 2983 *spapp = spa; 2984 2985 return (0); 2986} 2987 2988int 2989spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 2990 nvlist_t **config) 2991{ 2992 return (spa_open_common(name, spapp, tag, policy, config)); 2993} 2994 2995int 2996spa_open(const char *name, spa_t **spapp, void *tag) 2997{ 2998 return (spa_open_common(name, spapp, tag, NULL, NULL)); 2999} 3000 3001/* 3002 * Lookup the given spa_t, incrementing the inject count in the process, 3003 * preventing it from being exported or destroyed. 3004 */ 3005spa_t * 3006spa_inject_addref(char *name) 3007{ 3008 spa_t *spa; 3009 3010 mutex_enter(&spa_namespace_lock); 3011 if ((spa = spa_lookup(name)) == NULL) { 3012 mutex_exit(&spa_namespace_lock); 3013 return (NULL); 3014 } 3015 spa->spa_inject_ref++; 3016 mutex_exit(&spa_namespace_lock); 3017 3018 return (spa); 3019} 3020 3021void 3022spa_inject_delref(spa_t *spa) 3023{ 3024 mutex_enter(&spa_namespace_lock); 3025 spa->spa_inject_ref--; 3026 mutex_exit(&spa_namespace_lock); 3027} 3028 3029/* 3030 * Add spares device information to the nvlist. 3031 */ 3032static void 3033spa_add_spares(spa_t *spa, nvlist_t *config) 3034{ 3035 nvlist_t **spares; 3036 uint_t i, nspares; 3037 nvlist_t *nvroot; 3038 uint64_t guid; 3039 vdev_stat_t *vs; 3040 uint_t vsc; 3041 uint64_t pool; 3042 3043 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3044 3045 if (spa->spa_spares.sav_count == 0) 3046 return; 3047 3048 VERIFY(nvlist_lookup_nvlist(config, 3049 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3050 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3051 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3052 if (nspares != 0) { 3053 VERIFY(nvlist_add_nvlist_array(nvroot, 3054 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3055 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3056 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3057 3058 /* 3059 * Go through and find any spares which have since been 3060 * repurposed as an active spare. If this is the case, update 3061 * their status appropriately. 3062 */ 3063 for (i = 0; i < nspares; i++) { 3064 VERIFY(nvlist_lookup_uint64(spares[i], 3065 ZPOOL_CONFIG_GUID, &guid) == 0); 3066 if (spa_spare_exists(guid, &pool, NULL) && 3067 pool != 0ULL) { 3068 VERIFY(nvlist_lookup_uint64_array( 3069 spares[i], ZPOOL_CONFIG_VDEV_STATS, 3070 (uint64_t **)&vs, &vsc) == 0); 3071 vs->vs_state = VDEV_STATE_CANT_OPEN; 3072 vs->vs_aux = VDEV_AUX_SPARED; 3073 } 3074 } 3075 } 3076} 3077 3078/* 3079 * Add l2cache device information to the nvlist, including vdev stats. 3080 */ 3081static void 3082spa_add_l2cache(spa_t *spa, nvlist_t *config) 3083{ 3084 nvlist_t **l2cache; 3085 uint_t i, j, nl2cache; 3086 nvlist_t *nvroot; 3087 uint64_t guid; 3088 vdev_t *vd; 3089 vdev_stat_t *vs; 3090 uint_t vsc; 3091 3092 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3093 3094 if (spa->spa_l2cache.sav_count == 0) 3095 return; 3096 3097 VERIFY(nvlist_lookup_nvlist(config, 3098 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3099 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3100 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3101 if (nl2cache != 0) { 3102 VERIFY(nvlist_add_nvlist_array(nvroot, 3103 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3104 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3105 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3106 3107 /* 3108 * Update level 2 cache device stats. 3109 */ 3110 3111 for (i = 0; i < nl2cache; i++) { 3112 VERIFY(nvlist_lookup_uint64(l2cache[i], 3113 ZPOOL_CONFIG_GUID, &guid) == 0); 3114 3115 vd = NULL; 3116 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3117 if (guid == 3118 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3119 vd = spa->spa_l2cache.sav_vdevs[j]; 3120 break; 3121 } 3122 } 3123 ASSERT(vd != NULL); 3124 3125 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 3126 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 3127 == 0); 3128 vdev_get_stats(vd, vs); 3129 } 3130 } 3131} 3132 3133static void 3134spa_add_feature_stats(spa_t *spa, nvlist_t *config) 3135{ 3136 nvlist_t *features; 3137 zap_cursor_t zc; 3138 zap_attribute_t za; 3139 3140 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3141 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3142 3143 /* We may be unable to read features if pool is suspended. */ 3144 if (spa_suspended(spa)) 3145 goto out; 3146 3147 if (spa->spa_feat_for_read_obj != 0) { 3148 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3149 spa->spa_feat_for_read_obj); 3150 zap_cursor_retrieve(&zc, &za) == 0; 3151 zap_cursor_advance(&zc)) { 3152 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3153 za.za_num_integers == 1); 3154 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3155 za.za_first_integer)); 3156 } 3157 zap_cursor_fini(&zc); 3158 } 3159 3160 if (spa->spa_feat_for_write_obj != 0) { 3161 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3162 spa->spa_feat_for_write_obj); 3163 zap_cursor_retrieve(&zc, &za) == 0; 3164 zap_cursor_advance(&zc)) { 3165 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3166 za.za_num_integers == 1); 3167 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3168 za.za_first_integer)); 3169 } 3170 zap_cursor_fini(&zc); 3171 } 3172 3173out: 3174 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3175 features) == 0); 3176 nvlist_free(features); 3177} 3178 3179int 3180spa_get_stats(const char *name, nvlist_t **config, 3181 char *altroot, size_t buflen) 3182{ 3183 int error; 3184 spa_t *spa; 3185 3186 *config = NULL; 3187 error = spa_open_common(name, &spa, FTAG, NULL, config); 3188 3189 if (spa != NULL) { 3190 /* 3191 * This still leaves a window of inconsistency where the spares 3192 * or l2cache devices could change and the config would be 3193 * self-inconsistent. 3194 */ 3195 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3196 3197 if (*config != NULL) { 3198 uint64_t loadtimes[2]; 3199 3200 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 3201 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 3202 VERIFY(nvlist_add_uint64_array(*config, 3203 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 3204 3205 VERIFY(nvlist_add_uint64(*config, 3206 ZPOOL_CONFIG_ERRCOUNT, 3207 spa_get_errlog_size(spa)) == 0); 3208 3209 if (spa_suspended(spa)) 3210 VERIFY(nvlist_add_uint64(*config, 3211 ZPOOL_CONFIG_SUSPENDED, 3212 spa->spa_failmode) == 0); 3213 3214 spa_add_spares(spa, *config); 3215 spa_add_l2cache(spa, *config); 3216 spa_add_feature_stats(spa, *config); 3217 } 3218 } 3219 3220 /* 3221 * We want to get the alternate root even for faulted pools, so we cheat 3222 * and call spa_lookup() directly. 3223 */ 3224 if (altroot) { 3225 if (spa == NULL) { 3226 mutex_enter(&spa_namespace_lock); 3227 spa = spa_lookup(name); 3228 if (spa) 3229 spa_altroot(spa, altroot, buflen); 3230 else 3231 altroot[0] = '\0'; 3232 spa = NULL; 3233 mutex_exit(&spa_namespace_lock); 3234 } else { 3235 spa_altroot(spa, altroot, buflen); 3236 } 3237 } 3238 3239 if (spa != NULL) { 3240 spa_config_exit(spa, SCL_CONFIG, FTAG); 3241 spa_close(spa, FTAG); 3242 } 3243 3244 return (error); 3245} 3246 3247/* 3248 * Validate that the auxiliary device array is well formed. We must have an 3249 * array of nvlists, each which describes a valid leaf vdev. If this is an 3250 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3251 * specified, as long as they are well-formed. 3252 */ 3253static int 3254spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3255 spa_aux_vdev_t *sav, const char *config, uint64_t version, 3256 vdev_labeltype_t label) 3257{ 3258 nvlist_t **dev; 3259 uint_t i, ndev; 3260 vdev_t *vd; 3261 int error; 3262 3263 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3264 3265 /* 3266 * It's acceptable to have no devs specified. 3267 */ 3268 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 3269 return (0); 3270 3271 if (ndev == 0) 3272 return (SET_ERROR(EINVAL)); 3273 3274 /* 3275 * Make sure the pool is formatted with a version that supports this 3276 * device type. 3277 */ 3278 if (spa_version(spa) < version) 3279 return (SET_ERROR(ENOTSUP)); 3280 3281 /* 3282 * Set the pending device list so we correctly handle device in-use 3283 * checking. 3284 */ 3285 sav->sav_pending = dev; 3286 sav->sav_npending = ndev; 3287 3288 for (i = 0; i < ndev; i++) { 3289 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 3290 mode)) != 0) 3291 goto out; 3292 3293 if (!vd->vdev_ops->vdev_op_leaf) { 3294 vdev_free(vd); 3295 error = SET_ERROR(EINVAL); 3296 goto out; 3297 } 3298 3299 /* 3300 * The L2ARC currently only supports disk devices in 3301 * kernel context. For user-level testing, we allow it. 3302 */ 3303#ifdef _KERNEL 3304 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3305 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3306 error = SET_ERROR(ENOTBLK); 3307 vdev_free(vd); 3308 goto out; 3309 } 3310#endif 3311 vd->vdev_top = vd; 3312 3313 if ((error = vdev_open(vd)) == 0 && 3314 (error = vdev_label_init(vd, crtxg, label)) == 0) { 3315 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 3316 vd->vdev_guid) == 0); 3317 } 3318 3319 vdev_free(vd); 3320 3321 if (error && 3322 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 3323 goto out; 3324 else 3325 error = 0; 3326 } 3327 3328out: 3329 sav->sav_pending = NULL; 3330 sav->sav_npending = 0; 3331 return (error); 3332} 3333 3334static int 3335spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3336{ 3337 int error; 3338 3339 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3340 3341 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3342 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3343 VDEV_LABEL_SPARE)) != 0) { 3344 return (error); 3345 } 3346 3347 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3348 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3349 VDEV_LABEL_L2CACHE)); 3350} 3351 3352static void 3353spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3354 const char *config) 3355{ 3356 int i; 3357 3358 if (sav->sav_config != NULL) { 3359 nvlist_t **olddevs; 3360 uint_t oldndevs; 3361 nvlist_t **newdevs; 3362 3363 /* 3364 * Generate new dev list by concatentating with the 3365 * current dev list. 3366 */ 3367 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3368 &olddevs, &oldndevs) == 0); 3369 3370 newdevs = kmem_alloc(sizeof (void *) * 3371 (ndevs + oldndevs), KM_SLEEP); 3372 for (i = 0; i < oldndevs; i++) 3373 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3374 KM_SLEEP) == 0); 3375 for (i = 0; i < ndevs; i++) 3376 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3377 KM_SLEEP) == 0); 3378 3379 VERIFY(nvlist_remove(sav->sav_config, config, 3380 DATA_TYPE_NVLIST_ARRAY) == 0); 3381 3382 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3383 config, newdevs, ndevs + oldndevs) == 0); 3384 for (i = 0; i < oldndevs + ndevs; i++) 3385 nvlist_free(newdevs[i]); 3386 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3387 } else { 3388 /* 3389 * Generate a new dev list. 3390 */ 3391 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3392 KM_SLEEP) == 0); 3393 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3394 devs, ndevs) == 0); 3395 } 3396} 3397 3398/* 3399 * Stop and drop level 2 ARC devices 3400 */ 3401void 3402spa_l2cache_drop(spa_t *spa) 3403{ 3404 vdev_t *vd; 3405 int i; 3406 spa_aux_vdev_t *sav = &spa->spa_l2cache; 3407 3408 for (i = 0; i < sav->sav_count; i++) { 3409 uint64_t pool; 3410 3411 vd = sav->sav_vdevs[i]; 3412 ASSERT(vd != NULL); 3413 3414 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 3415 pool != 0ULL && l2arc_vdev_present(vd)) 3416 l2arc_remove_vdev(vd); 3417 } 3418} 3419 3420/* 3421 * Pool Creation 3422 */ 3423int 3424spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 3425 nvlist_t *zplprops) 3426{ 3427 spa_t *spa; 3428 char *altroot = NULL; 3429 vdev_t *rvd; 3430 dsl_pool_t *dp; 3431 dmu_tx_t *tx; 3432 int error = 0; 3433 uint64_t txg = TXG_INITIAL; 3434 nvlist_t **spares, **l2cache; 3435 uint_t nspares, nl2cache; 3436 uint64_t version, obj; 3437 boolean_t has_features; 3438 3439 /* 3440 * If this pool already exists, return failure. 3441 */ 3442 mutex_enter(&spa_namespace_lock); 3443 if (spa_lookup(pool) != NULL) { 3444 mutex_exit(&spa_namespace_lock); 3445 return (SET_ERROR(EEXIST)); 3446 } 3447 3448 /* 3449 * Allocate a new spa_t structure. 3450 */ 3451 (void) nvlist_lookup_string(props, 3452 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3453 spa = spa_add(pool, NULL, altroot); 3454 spa_activate(spa, spa_mode_global); 3455 3456 if (props && (error = spa_prop_validate(spa, props))) { 3457 spa_deactivate(spa); 3458 spa_remove(spa); 3459 mutex_exit(&spa_namespace_lock); 3460 return (error); 3461 } 3462 3463 has_features = B_FALSE; 3464 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3465 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3466 if (zpool_prop_feature(nvpair_name(elem))) 3467 has_features = B_TRUE; 3468 } 3469 3470 if (has_features || nvlist_lookup_uint64(props, 3471 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3472 version = SPA_VERSION; 3473 } 3474 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3475 3476 spa->spa_first_txg = txg; 3477 spa->spa_uberblock.ub_txg = txg - 1; 3478 spa->spa_uberblock.ub_version = version; 3479 spa->spa_ubsync = spa->spa_uberblock; 3480 3481 /* 3482 * Create "The Godfather" zio to hold all async IOs 3483 */ 3484 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 3485 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 3486 3487 /* 3488 * Create the root vdev. 3489 */ 3490 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3491 3492 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 3493 3494 ASSERT(error != 0 || rvd != NULL); 3495 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 3496 3497 if (error == 0 && !zfs_allocatable_devs(nvroot)) 3498 error = SET_ERROR(EINVAL); 3499 3500 if (error == 0 && 3501 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3502 (error = spa_validate_aux(spa, nvroot, txg, 3503 VDEV_ALLOC_ADD)) == 0) { 3504 for (int c = 0; c < rvd->vdev_children; c++) { 3505 vdev_ashift_optimize(rvd->vdev_child[c]); 3506 vdev_metaslab_set_size(rvd->vdev_child[c]); 3507 vdev_expand(rvd->vdev_child[c], txg); 3508 } 3509 } 3510 3511 spa_config_exit(spa, SCL_ALL, FTAG); 3512 3513 if (error != 0) { 3514 spa_unload(spa); 3515 spa_deactivate(spa); 3516 spa_remove(spa); 3517 mutex_exit(&spa_namespace_lock); 3518 return (error); 3519 } 3520 3521 /* 3522 * Get the list of spares, if specified. 3523 */ 3524 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3525 &spares, &nspares) == 0) { 3526 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 3527 KM_SLEEP) == 0); 3528 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3529 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3530 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3531 spa_load_spares(spa); 3532 spa_config_exit(spa, SCL_ALL, FTAG); 3533 spa->spa_spares.sav_sync = B_TRUE; 3534 } 3535 3536 /* 3537 * Get the list of level 2 cache devices, if specified. 3538 */ 3539 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3540 &l2cache, &nl2cache) == 0) { 3541 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3542 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3543 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3544 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3545 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3546 spa_load_l2cache(spa); 3547 spa_config_exit(spa, SCL_ALL, FTAG); 3548 spa->spa_l2cache.sav_sync = B_TRUE; 3549 } 3550 3551 spa->spa_is_initializing = B_TRUE; 3552 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3553 spa->spa_meta_objset = dp->dp_meta_objset; 3554 spa->spa_is_initializing = B_FALSE; 3555 3556 /* 3557 * Create DDTs (dedup tables). 3558 */ 3559 ddt_create(spa); 3560 3561 spa_update_dspace(spa); 3562 3563 tx = dmu_tx_create_assigned(dp, txg); 3564 3565 /* 3566 * Create the pool config object. 3567 */ 3568 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3569 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3570 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3571 3572 if (zap_add(spa->spa_meta_objset, 3573 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3574 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3575 cmn_err(CE_PANIC, "failed to add pool config"); 3576 } 3577 3578 if (spa_version(spa) >= SPA_VERSION_FEATURES) 3579 spa_feature_create_zap_objects(spa, tx); 3580 3581 if (zap_add(spa->spa_meta_objset, 3582 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 3583 sizeof (uint64_t), 1, &version, tx) != 0) { 3584 cmn_err(CE_PANIC, "failed to add pool version"); 3585 } 3586 3587 /* Newly created pools with the right version are always deflated. */ 3588 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 3589 spa->spa_deflate = TRUE; 3590 if (zap_add(spa->spa_meta_objset, 3591 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3592 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 3593 cmn_err(CE_PANIC, "failed to add deflate"); 3594 } 3595 } 3596 3597 /* 3598 * Create the deferred-free bpobj. Turn off compression 3599 * because sync-to-convergence takes longer if the blocksize 3600 * keeps changing. 3601 */ 3602 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3603 dmu_object_set_compress(spa->spa_meta_objset, obj, 3604 ZIO_COMPRESS_OFF, tx); 3605 if (zap_add(spa->spa_meta_objset, 3606 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3607 sizeof (uint64_t), 1, &obj, tx) != 0) { 3608 cmn_err(CE_PANIC, "failed to add bpobj"); 3609 } 3610 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3611 spa->spa_meta_objset, obj)); 3612 3613 /* 3614 * Create the pool's history object. 3615 */ 3616 if (version >= SPA_VERSION_ZPOOL_HISTORY) 3617 spa_history_create_obj(spa, tx); 3618 3619 /* 3620 * Set pool properties. 3621 */ 3622 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3623 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3624 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3625 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3626 3627 if (props != NULL) { 3628 spa_configfile_set(spa, props, B_FALSE); 3629 spa_sync_props(props, tx); 3630 } 3631 3632 dmu_tx_commit(tx); 3633 3634 spa->spa_sync_on = B_TRUE; 3635 txg_sync_start(spa->spa_dsl_pool); 3636 3637 /* 3638 * We explicitly wait for the first transaction to complete so that our 3639 * bean counters are appropriately updated. 3640 */ 3641 txg_wait_synced(spa->spa_dsl_pool, txg); 3642 3643 spa_config_sync(spa, B_FALSE, B_TRUE); 3644 3645 spa_history_log_version(spa, "create"); 3646 3647 spa->spa_minref = refcount_count(&spa->spa_refcount); 3648 3649 mutex_exit(&spa_namespace_lock); 3650 3651 return (0); 3652} 3653 3654#ifdef _KERNEL 3655#if defined(sun) 3656/* 3657 * Get the root pool information from the root disk, then import the root pool 3658 * during the system boot up time. 3659 */ 3660extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 3661 3662static nvlist_t * 3663spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3664{ 3665 nvlist_t *config; 3666 nvlist_t *nvtop, *nvroot; 3667 uint64_t pgid; 3668 3669 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 3670 return (NULL); 3671 3672 /* 3673 * Add this top-level vdev to the child array. 3674 */ 3675 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3676 &nvtop) == 0); 3677 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3678 &pgid) == 0); 3679 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3680 3681 /* 3682 * Put this pool's top-level vdevs into a root vdev. 3683 */ 3684 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3685 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3686 VDEV_TYPE_ROOT) == 0); 3687 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3688 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3689 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3690 &nvtop, 1) == 0); 3691 3692 /* 3693 * Replace the existing vdev_tree with the new root vdev in 3694 * this pool's configuration (remove the old, add the new). 3695 */ 3696 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3697 nvlist_free(nvroot); 3698 return (config); 3699} 3700 3701/* 3702 * Walk the vdev tree and see if we can find a device with "better" 3703 * configuration. A configuration is "better" if the label on that 3704 * device has a more recent txg. 3705 */ 3706static void 3707spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3708{ 3709 for (int c = 0; c < vd->vdev_children; c++) 3710 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3711 3712 if (vd->vdev_ops->vdev_op_leaf) { 3713 nvlist_t *label; 3714 uint64_t label_txg; 3715 3716 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 3717 &label) != 0) 3718 return; 3719 3720 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 3721 &label_txg) == 0); 3722 3723 /* 3724 * Do we have a better boot device? 3725 */ 3726 if (label_txg > *txg) { 3727 *txg = label_txg; 3728 *avd = vd; 3729 } 3730 nvlist_free(label); 3731 } 3732} 3733 3734/* 3735 * Import a root pool. 3736 * 3737 * For x86. devpath_list will consist of devid and/or physpath name of 3738 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 3739 * The GRUB "findroot" command will return the vdev we should boot. 3740 * 3741 * For Sparc, devpath_list consists the physpath name of the booting device 3742 * no matter the rootpool is a single device pool or a mirrored pool. 3743 * e.g. 3744 * "/pci@1f,0/ide@d/disk@0,0:a" 3745 */ 3746int 3747spa_import_rootpool(char *devpath, char *devid) 3748{ 3749 spa_t *spa; 3750 vdev_t *rvd, *bvd, *avd = NULL; 3751 nvlist_t *config, *nvtop; 3752 uint64_t guid, txg; 3753 char *pname; 3754 int error; 3755 3756 /* 3757 * Read the label from the boot device and generate a configuration. 3758 */ 3759 config = spa_generate_rootconf(devpath, devid, &guid); 3760#if defined(_OBP) && defined(_KERNEL) 3761 if (config == NULL) { 3762 if (strstr(devpath, "/iscsi/ssd") != NULL) { 3763 /* iscsi boot */ 3764 get_iscsi_bootpath_phy(devpath); 3765 config = spa_generate_rootconf(devpath, devid, &guid); 3766 } 3767 } 3768#endif 3769 if (config == NULL) { 3770 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 3771 devpath); 3772 return (SET_ERROR(EIO)); 3773 } 3774 3775 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3776 &pname) == 0); 3777 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 3778 3779 mutex_enter(&spa_namespace_lock); 3780 if ((spa = spa_lookup(pname)) != NULL) { 3781 /* 3782 * Remove the existing root pool from the namespace so that we 3783 * can replace it with the correct config we just read in. 3784 */ 3785 spa_remove(spa); 3786 } 3787 3788 spa = spa_add(pname, config, NULL); 3789 spa->spa_is_root = B_TRUE; 3790 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 3791 3792 /* 3793 * Build up a vdev tree based on the boot device's label config. 3794 */ 3795 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3796 &nvtop) == 0); 3797 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3798 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 3799 VDEV_ALLOC_ROOTPOOL); 3800 spa_config_exit(spa, SCL_ALL, FTAG); 3801 if (error) { 3802 mutex_exit(&spa_namespace_lock); 3803 nvlist_free(config); 3804 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 3805 pname); 3806 return (error); 3807 } 3808 3809 /* 3810 * Get the boot vdev. 3811 */ 3812 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 3813 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 3814 (u_longlong_t)guid); 3815 error = SET_ERROR(ENOENT); 3816 goto out; 3817 } 3818 3819 /* 3820 * Determine if there is a better boot device. 3821 */ 3822 avd = bvd; 3823 spa_alt_rootvdev(rvd, &avd, &txg); 3824 if (avd != bvd) { 3825 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 3826 "try booting from '%s'", avd->vdev_path); 3827 error = SET_ERROR(EINVAL); 3828 goto out; 3829 } 3830 3831 /* 3832 * If the boot device is part of a spare vdev then ensure that 3833 * we're booting off the active spare. 3834 */ 3835 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3836 !bvd->vdev_isspare) { 3837 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 3838 "try booting from '%s'", 3839 bvd->vdev_parent-> 3840 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 3841 error = SET_ERROR(EINVAL); 3842 goto out; 3843 } 3844 3845 error = 0; 3846out: 3847 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3848 vdev_free(rvd); 3849 spa_config_exit(spa, SCL_ALL, FTAG); 3850 mutex_exit(&spa_namespace_lock); 3851 3852 nvlist_free(config); 3853 return (error); 3854} 3855 3856#else 3857 3858extern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs, 3859 uint64_t *count); 3860 3861static nvlist_t * 3862spa_generate_rootconf(const char *name) 3863{ 3864 nvlist_t **configs, **tops; 3865 nvlist_t *config; 3866 nvlist_t *best_cfg, *nvtop, *nvroot; 3867 uint64_t *holes; 3868 uint64_t best_txg; 3869 uint64_t nchildren; 3870 uint64_t pgid; 3871 uint64_t count; 3872 uint64_t i; 3873 uint_t nholes; 3874 3875 if (vdev_geom_read_pool_label(name, &configs, &count) != 0) 3876 return (NULL); 3877 3878 ASSERT3U(count, !=, 0); 3879 best_txg = 0; 3880 for (i = 0; i < count; i++) { 3881 uint64_t txg; 3882 3883 VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG, 3884 &txg) == 0); 3885 if (txg > best_txg) { 3886 best_txg = txg; 3887 best_cfg = configs[i]; 3888 } 3889 } 3890 3891 /* 3892 * Multi-vdev root pool configuration discovery is not supported yet. 3893 */ 3894 nchildren = 1; 3895 nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren); 3896 holes = NULL; 3897 nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY, 3898 &holes, &nholes); 3899 3900 tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP); 3901 for (i = 0; i < nchildren; i++) { 3902 if (i >= count) 3903 break; 3904 if (configs[i] == NULL) 3905 continue; 3906 VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE, 3907 &nvtop) == 0); 3908 nvlist_dup(nvtop, &tops[i], KM_SLEEP); 3909 } 3910 for (i = 0; holes != NULL && i < nholes; i++) { 3911 if (i >= nchildren) 3912 continue; 3913 if (tops[holes[i]] != NULL) 3914 continue; 3915 nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP); 3916 VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE, 3917 VDEV_TYPE_HOLE) == 0); 3918 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, 3919 holes[i]) == 0); 3920 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID, 3921 0) == 0); 3922 } 3923 for (i = 0; i < nchildren; i++) { 3924 if (tops[i] != NULL) 3925 continue; 3926 nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP); 3927 VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE, 3928 VDEV_TYPE_MISSING) == 0); 3929 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, 3930 i) == 0); 3931 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID, 3932 0) == 0); 3933 } 3934 3935 /* 3936 * Create pool config based on the best vdev config. 3937 */ 3938 nvlist_dup(best_cfg, &config, KM_SLEEP); 3939 3940 /* 3941 * Put this pool's top-level vdevs into a root vdev. 3942 */ 3943 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3944 &pgid) == 0); 3945 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3946 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3947 VDEV_TYPE_ROOT) == 0); 3948 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3949 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3950 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3951 tops, nchildren) == 0); 3952 3953 /* 3954 * Replace the existing vdev_tree with the new root vdev in 3955 * this pool's configuration (remove the old, add the new). 3956 */ 3957 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3958 3959 /* 3960 * Drop vdev config elements that should not be present at pool level. 3961 */ 3962 nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64); 3963 nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64); 3964 3965 for (i = 0; i < count; i++) 3966 nvlist_free(configs[i]); 3967 kmem_free(configs, count * sizeof(void *)); 3968 for (i = 0; i < nchildren; i++) 3969 nvlist_free(tops[i]); 3970 kmem_free(tops, nchildren * sizeof(void *)); 3971 nvlist_free(nvroot); 3972 return (config); 3973} 3974 3975int 3976spa_import_rootpool(const char *name) 3977{ 3978 spa_t *spa; 3979 vdev_t *rvd, *bvd, *avd = NULL; 3980 nvlist_t *config, *nvtop; 3981 uint64_t txg; 3982 char *pname; 3983 int error; 3984 3985 /* 3986 * Read the label from the boot device and generate a configuration. 3987 */ 3988 config = spa_generate_rootconf(name); 3989 3990 mutex_enter(&spa_namespace_lock); 3991 if (config != NULL) { 3992 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3993 &pname) == 0 && strcmp(name, pname) == 0); 3994 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) 3995 == 0); 3996 3997 if ((spa = spa_lookup(pname)) != NULL) { 3998 /* 3999 * Remove the existing root pool from the namespace so 4000 * that we can replace it with the correct config 4001 * we just read in. 4002 */ 4003 spa_remove(spa); 4004 } 4005 spa = spa_add(pname, config, NULL); 4006 4007 /* 4008 * Set spa_ubsync.ub_version as it can be used in vdev_alloc() 4009 * via spa_version(). 4010 */ 4011 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 4012 &spa->spa_ubsync.ub_version) != 0) 4013 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 4014 } else if ((spa = spa_lookup(name)) == NULL) { 4015 cmn_err(CE_NOTE, "Cannot find the pool label for '%s'", 4016 name); 4017 return (EIO); 4018 } else { 4019 VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0); 4020 } 4021 spa->spa_is_root = B_TRUE; 4022 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 4023 4024 /* 4025 * Build up a vdev tree based on the boot device's label config. 4026 */ 4027 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4028 &nvtop) == 0); 4029 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4030 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 4031 VDEV_ALLOC_ROOTPOOL); 4032 spa_config_exit(spa, SCL_ALL, FTAG); 4033 if (error) { 4034 mutex_exit(&spa_namespace_lock); 4035 nvlist_free(config); 4036 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 4037 pname); 4038 return (error); 4039 } 4040 4041 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4042 vdev_free(rvd); 4043 spa_config_exit(spa, SCL_ALL, FTAG); 4044 mutex_exit(&spa_namespace_lock); 4045 4046 nvlist_free(config); 4047 return (0); 4048} 4049 4050#endif /* sun */ 4051#endif 4052 4053/* 4054 * Import a non-root pool into the system. 4055 */ 4056int 4057spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 4058{ 4059 spa_t *spa; 4060 char *altroot = NULL; 4061 spa_load_state_t state = SPA_LOAD_IMPORT; 4062 zpool_rewind_policy_t policy; 4063 uint64_t mode = spa_mode_global; 4064 uint64_t readonly = B_FALSE; 4065 int error; 4066 nvlist_t *nvroot; 4067 nvlist_t **spares, **l2cache; 4068 uint_t nspares, nl2cache; 4069 4070 /* 4071 * If a pool with this name exists, return failure. 4072 */ 4073 mutex_enter(&spa_namespace_lock); 4074 if (spa_lookup(pool) != NULL) { 4075 mutex_exit(&spa_namespace_lock); 4076 return (SET_ERROR(EEXIST)); 4077 } 4078 4079 /* 4080 * Create and initialize the spa structure. 4081 */ 4082 (void) nvlist_lookup_string(props, 4083 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4084 (void) nvlist_lookup_uint64(props, 4085 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 4086 if (readonly) 4087 mode = FREAD; 4088 spa = spa_add(pool, config, altroot); 4089 spa->spa_import_flags = flags; 4090 4091 /* 4092 * Verbatim import - Take a pool and insert it into the namespace 4093 * as if it had been loaded at boot. 4094 */ 4095 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 4096 if (props != NULL) 4097 spa_configfile_set(spa, props, B_FALSE); 4098 4099 spa_config_sync(spa, B_FALSE, B_TRUE); 4100 4101 mutex_exit(&spa_namespace_lock); 4102 return (0); 4103 } 4104 4105 spa_activate(spa, mode); 4106 4107 /* 4108 * Don't start async tasks until we know everything is healthy. 4109 */ 4110 spa_async_suspend(spa); 4111 4112 zpool_get_rewind_policy(config, &policy); 4113 if (policy.zrp_request & ZPOOL_DO_REWIND) 4114 state = SPA_LOAD_RECOVER; 4115 4116 /* 4117 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 4118 * because the user-supplied config is actually the one to trust when 4119 * doing an import. 4120 */ 4121 if (state != SPA_LOAD_RECOVER) 4122 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 4123 4124 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 4125 policy.zrp_request); 4126 4127 /* 4128 * Propagate anything learned while loading the pool and pass it 4129 * back to caller (i.e. rewind info, missing devices, etc). 4130 */ 4131 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4132 spa->spa_load_info) == 0); 4133 4134 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4135 /* 4136 * Toss any existing sparelist, as it doesn't have any validity 4137 * anymore, and conflicts with spa_has_spare(). 4138 */ 4139 if (spa->spa_spares.sav_config) { 4140 nvlist_free(spa->spa_spares.sav_config); 4141 spa->spa_spares.sav_config = NULL; 4142 spa_load_spares(spa); 4143 } 4144 if (spa->spa_l2cache.sav_config) { 4145 nvlist_free(spa->spa_l2cache.sav_config); 4146 spa->spa_l2cache.sav_config = NULL; 4147 spa_load_l2cache(spa); 4148 } 4149 4150 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4151 &nvroot) == 0); 4152 if (error == 0) 4153 error = spa_validate_aux(spa, nvroot, -1ULL, 4154 VDEV_ALLOC_SPARE); 4155 if (error == 0) 4156 error = spa_validate_aux(spa, nvroot, -1ULL, 4157 VDEV_ALLOC_L2CACHE); 4158 spa_config_exit(spa, SCL_ALL, FTAG); 4159 4160 if (props != NULL) 4161 spa_configfile_set(spa, props, B_FALSE); 4162 4163 if (error != 0 || (props && spa_writeable(spa) && 4164 (error = spa_prop_set(spa, props)))) { 4165 spa_unload(spa); 4166 spa_deactivate(spa); 4167 spa_remove(spa); 4168 mutex_exit(&spa_namespace_lock); 4169 return (error); 4170 } 4171 4172 spa_async_resume(spa); 4173 4174 /* 4175 * Override any spares and level 2 cache devices as specified by 4176 * the user, as these may have correct device names/devids, etc. 4177 */ 4178 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 4179 &spares, &nspares) == 0) { 4180 if (spa->spa_spares.sav_config) 4181 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 4182 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 4183 else 4184 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 4185 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4186 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 4187 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4188 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4189 spa_load_spares(spa); 4190 spa_config_exit(spa, SCL_ALL, FTAG); 4191 spa->spa_spares.sav_sync = B_TRUE; 4192 } 4193 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 4194 &l2cache, &nl2cache) == 0) { 4195 if (spa->spa_l2cache.sav_config) 4196 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 4197 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 4198 else 4199 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 4200 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4201 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 4202 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4203 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4204 spa_load_l2cache(spa); 4205 spa_config_exit(spa, SCL_ALL, FTAG); 4206 spa->spa_l2cache.sav_sync = B_TRUE; 4207 } 4208 4209 /* 4210 * Check for any removed devices. 4211 */ 4212 if (spa->spa_autoreplace) { 4213 spa_aux_check_removed(&spa->spa_spares); 4214 spa_aux_check_removed(&spa->spa_l2cache); 4215 } 4216 4217 if (spa_writeable(spa)) { 4218 /* 4219 * Update the config cache to include the newly-imported pool. 4220 */ 4221 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4222 } 4223 4224 /* 4225 * It's possible that the pool was expanded while it was exported. 4226 * We kick off an async task to handle this for us. 4227 */ 4228 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4229 4230 mutex_exit(&spa_namespace_lock); 4231 spa_history_log_version(spa, "import"); 4232 4233#ifdef __FreeBSD__ 4234#ifdef _KERNEL 4235 zvol_create_minors(pool); 4236#endif 4237#endif 4238 return (0); 4239} 4240 4241nvlist_t * 4242spa_tryimport(nvlist_t *tryconfig) 4243{ 4244 nvlist_t *config = NULL; 4245 char *poolname; 4246 spa_t *spa; 4247 uint64_t state; 4248 int error; 4249 4250 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4251 return (NULL); 4252 4253 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4254 return (NULL); 4255 4256 /* 4257 * Create and initialize the spa structure. 4258 */ 4259 mutex_enter(&spa_namespace_lock); 4260 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 4261 spa_activate(spa, FREAD); 4262 4263 /* 4264 * Pass off the heavy lifting to spa_load(). 4265 * Pass TRUE for mosconfig because the user-supplied config 4266 * is actually the one to trust when doing an import. 4267 */ 4268 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4269 4270 /* 4271 * If 'tryconfig' was at least parsable, return the current config. 4272 */ 4273 if (spa->spa_root_vdev != NULL) { 4274 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4275 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4276 poolname) == 0); 4277 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4278 state) == 0); 4279 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 4280 spa->spa_uberblock.ub_timestamp) == 0); 4281 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4282 spa->spa_load_info) == 0); 4283 4284 /* 4285 * If the bootfs property exists on this pool then we 4286 * copy it out so that external consumers can tell which 4287 * pools are bootable. 4288 */ 4289 if ((!error || error == EEXIST) && spa->spa_bootfs) { 4290 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4291 4292 /* 4293 * We have to play games with the name since the 4294 * pool was opened as TRYIMPORT_NAME. 4295 */ 4296 if (dsl_dsobj_to_dsname(spa_name(spa), 4297 spa->spa_bootfs, tmpname) == 0) { 4298 char *cp; 4299 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4300 4301 cp = strchr(tmpname, '/'); 4302 if (cp == NULL) { 4303 (void) strlcpy(dsname, tmpname, 4304 MAXPATHLEN); 4305 } else { 4306 (void) snprintf(dsname, MAXPATHLEN, 4307 "%s/%s", poolname, ++cp); 4308 } 4309 VERIFY(nvlist_add_string(config, 4310 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4311 kmem_free(dsname, MAXPATHLEN); 4312 } 4313 kmem_free(tmpname, MAXPATHLEN); 4314 } 4315 4316 /* 4317 * Add the list of hot spares and level 2 cache devices. 4318 */ 4319 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4320 spa_add_spares(spa, config); 4321 spa_add_l2cache(spa, config); 4322 spa_config_exit(spa, SCL_CONFIG, FTAG); 4323 } 4324 4325 spa_unload(spa); 4326 spa_deactivate(spa); 4327 spa_remove(spa); 4328 mutex_exit(&spa_namespace_lock); 4329 4330 return (config); 4331} 4332 4333/* 4334 * Pool export/destroy 4335 * 4336 * The act of destroying or exporting a pool is very simple. We make sure there 4337 * is no more pending I/O and any references to the pool are gone. Then, we 4338 * update the pool state and sync all the labels to disk, removing the 4339 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4340 * we don't sync the labels or remove the configuration cache. 4341 */ 4342static int 4343spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4344 boolean_t force, boolean_t hardforce) 4345{ 4346 spa_t *spa; 4347 4348 if (oldconfig) 4349 *oldconfig = NULL; 4350 4351 if (!(spa_mode_global & FWRITE)) 4352 return (SET_ERROR(EROFS)); 4353 4354 mutex_enter(&spa_namespace_lock); 4355 if ((spa = spa_lookup(pool)) == NULL) { 4356 mutex_exit(&spa_namespace_lock); 4357 return (SET_ERROR(ENOENT)); 4358 } 4359 4360 /* 4361 * Put a hold on the pool, drop the namespace lock, stop async tasks, 4362 * reacquire the namespace lock, and see if we can export. 4363 */ 4364 spa_open_ref(spa, FTAG); 4365 mutex_exit(&spa_namespace_lock); 4366 spa_async_suspend(spa); 4367 mutex_enter(&spa_namespace_lock); 4368 spa_close(spa, FTAG); 4369 4370 /* 4371 * The pool will be in core if it's openable, 4372 * in which case we can modify its state. 4373 */ 4374 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4375 /* 4376 * Objsets may be open only because they're dirty, so we 4377 * have to force it to sync before checking spa_refcnt. 4378 */ 4379 txg_wait_synced(spa->spa_dsl_pool, 0); 4380 4381 /* 4382 * A pool cannot be exported or destroyed if there are active 4383 * references. If we are resetting a pool, allow references by 4384 * fault injection handlers. 4385 */ 4386 if (!spa_refcount_zero(spa) || 4387 (spa->spa_inject_ref != 0 && 4388 new_state != POOL_STATE_UNINITIALIZED)) { 4389 spa_async_resume(spa); 4390 mutex_exit(&spa_namespace_lock); 4391 return (SET_ERROR(EBUSY)); 4392 } 4393 4394 /* 4395 * A pool cannot be exported if it has an active shared spare. 4396 * This is to prevent other pools stealing the active spare 4397 * from an exported pool. At user's own will, such pool can 4398 * be forcedly exported. 4399 */ 4400 if (!force && new_state == POOL_STATE_EXPORTED && 4401 spa_has_active_shared_spare(spa)) { 4402 spa_async_resume(spa); 4403 mutex_exit(&spa_namespace_lock); 4404 return (SET_ERROR(EXDEV)); 4405 } 4406 4407 /* 4408 * We want this to be reflected on every label, 4409 * so mark them all dirty. spa_unload() will do the 4410 * final sync that pushes these changes out. 4411 */ 4412 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4413 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4414 spa->spa_state = new_state; 4415 spa->spa_final_txg = spa_last_synced_txg(spa) + 4416 TXG_DEFER_SIZE + 1; 4417 vdev_config_dirty(spa->spa_root_vdev); 4418 spa_config_exit(spa, SCL_ALL, FTAG); 4419 } 4420 } 4421 4422 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 4423 4424 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4425 spa_unload(spa); 4426 spa_deactivate(spa); 4427 } 4428 4429 if (oldconfig && spa->spa_config) 4430 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 4431 4432 if (new_state != POOL_STATE_UNINITIALIZED) { 4433 if (!hardforce) 4434 spa_config_sync(spa, B_TRUE, B_TRUE); 4435 spa_remove(spa); 4436 } 4437 mutex_exit(&spa_namespace_lock); 4438 4439 return (0); 4440} 4441 4442/* 4443 * Destroy a storage pool. 4444 */ 4445int 4446spa_destroy(char *pool) 4447{ 4448 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4449 B_FALSE, B_FALSE)); 4450} 4451 4452/* 4453 * Export a storage pool. 4454 */ 4455int 4456spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4457 boolean_t hardforce) 4458{ 4459 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4460 force, hardforce)); 4461} 4462 4463/* 4464 * Similar to spa_export(), this unloads the spa_t without actually removing it 4465 * from the namespace in any way. 4466 */ 4467int 4468spa_reset(char *pool) 4469{ 4470 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4471 B_FALSE, B_FALSE)); 4472} 4473 4474/* 4475 * ========================================================================== 4476 * Device manipulation 4477 * ========================================================================== 4478 */ 4479 4480/* 4481 * Add a device to a storage pool. 4482 */ 4483int 4484spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4485{ 4486 uint64_t txg, id; 4487 int error; 4488 vdev_t *rvd = spa->spa_root_vdev; 4489 vdev_t *vd, *tvd; 4490 nvlist_t **spares, **l2cache; 4491 uint_t nspares, nl2cache; 4492 4493 ASSERT(spa_writeable(spa)); 4494 4495 txg = spa_vdev_enter(spa); 4496 4497 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 4498 VDEV_ALLOC_ADD)) != 0) 4499 return (spa_vdev_exit(spa, NULL, txg, error)); 4500 4501 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 4502 4503 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4504 &nspares) != 0) 4505 nspares = 0; 4506 4507 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4508 &nl2cache) != 0) 4509 nl2cache = 0; 4510 4511 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4512 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4513 4514 if (vd->vdev_children != 0 && 4515 (error = vdev_create(vd, txg, B_FALSE)) != 0) 4516 return (spa_vdev_exit(spa, vd, txg, error)); 4517 4518 /* 4519 * We must validate the spares and l2cache devices after checking the 4520 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 4521 */ 4522 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 4523 return (spa_vdev_exit(spa, vd, txg, error)); 4524 4525 /* 4526 * Transfer each new top-level vdev from vd to rvd. 4527 */ 4528 for (int c = 0; c < vd->vdev_children; c++) { 4529 4530 /* 4531 * Set the vdev id to the first hole, if one exists. 4532 */ 4533 for (id = 0; id < rvd->vdev_children; id++) { 4534 if (rvd->vdev_child[id]->vdev_ishole) { 4535 vdev_free(rvd->vdev_child[id]); 4536 break; 4537 } 4538 } 4539 tvd = vd->vdev_child[c]; 4540 vdev_remove_child(vd, tvd); 4541 tvd->vdev_id = id; 4542 vdev_add_child(rvd, tvd); 4543 vdev_config_dirty(tvd); 4544 } 4545 4546 if (nspares != 0) { 4547 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4548 ZPOOL_CONFIG_SPARES); 4549 spa_load_spares(spa); 4550 spa->spa_spares.sav_sync = B_TRUE; 4551 } 4552 4553 if (nl2cache != 0) { 4554 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4555 ZPOOL_CONFIG_L2CACHE); 4556 spa_load_l2cache(spa); 4557 spa->spa_l2cache.sav_sync = B_TRUE; 4558 } 4559 4560 /* 4561 * We have to be careful when adding new vdevs to an existing pool. 4562 * If other threads start allocating from these vdevs before we 4563 * sync the config cache, and we lose power, then upon reboot we may 4564 * fail to open the pool because there are DVAs that the config cache 4565 * can't translate. Therefore, we first add the vdevs without 4566 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 4567 * and then let spa_config_update() initialize the new metaslabs. 4568 * 4569 * spa_load() checks for added-but-not-initialized vdevs, so that 4570 * if we lose power at any point in this sequence, the remaining 4571 * steps will be completed the next time we load the pool. 4572 */ 4573 (void) spa_vdev_exit(spa, vd, txg, 0); 4574 4575 mutex_enter(&spa_namespace_lock); 4576 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4577 mutex_exit(&spa_namespace_lock); 4578 4579 return (0); 4580} 4581 4582/* 4583 * Attach a device to a mirror. The arguments are the path to any device 4584 * in the mirror, and the nvroot for the new device. If the path specifies 4585 * a device that is not mirrored, we automatically insert the mirror vdev. 4586 * 4587 * If 'replacing' is specified, the new device is intended to replace the 4588 * existing device; in this case the two devices are made into their own 4589 * mirror using the 'replacing' vdev, which is functionally identical to 4590 * the mirror vdev (it actually reuses all the same ops) but has a few 4591 * extra rules: you can't attach to it after it's been created, and upon 4592 * completion of resilvering, the first disk (the one being replaced) 4593 * is automatically detached. 4594 */ 4595int 4596spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4597{ 4598 uint64_t txg, dtl_max_txg; 4599 vdev_t *rvd = spa->spa_root_vdev; 4600 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 4601 vdev_ops_t *pvops; 4602 char *oldvdpath, *newvdpath; 4603 int newvd_isspare; 4604 int error; 4605 4606 ASSERT(spa_writeable(spa)); 4607 4608 txg = spa_vdev_enter(spa); 4609 4610 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4611 4612 if (oldvd == NULL) 4613 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4614 4615 if (!oldvd->vdev_ops->vdev_op_leaf) 4616 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4617 4618 pvd = oldvd->vdev_parent; 4619 4620 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4621 VDEV_ALLOC_ATTACH)) != 0) 4622 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4623 4624 if (newrootvd->vdev_children != 1) 4625 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4626 4627 newvd = newrootvd->vdev_child[0]; 4628 4629 if (!newvd->vdev_ops->vdev_op_leaf) 4630 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4631 4632 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4633 return (spa_vdev_exit(spa, newrootvd, txg, error)); 4634 4635 /* 4636 * Spares can't replace logs 4637 */ 4638 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 4639 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4640 4641 if (!replacing) { 4642 /* 4643 * For attach, the only allowable parent is a mirror or the root 4644 * vdev. 4645 */ 4646 if (pvd->vdev_ops != &vdev_mirror_ops && 4647 pvd->vdev_ops != &vdev_root_ops) 4648 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4649 4650 pvops = &vdev_mirror_ops; 4651 } else { 4652 /* 4653 * Active hot spares can only be replaced by inactive hot 4654 * spares. 4655 */ 4656 if (pvd->vdev_ops == &vdev_spare_ops && 4657 oldvd->vdev_isspare && 4658 !spa_has_spare(spa, newvd->vdev_guid)) 4659 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4660 4661 /* 4662 * If the source is a hot spare, and the parent isn't already a 4663 * spare, then we want to create a new hot spare. Otherwise, we 4664 * want to create a replacing vdev. The user is not allowed to 4665 * attach to a spared vdev child unless the 'isspare' state is 4666 * the same (spare replaces spare, non-spare replaces 4667 * non-spare). 4668 */ 4669 if (pvd->vdev_ops == &vdev_replacing_ops && 4670 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 4671 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4672 } else if (pvd->vdev_ops == &vdev_spare_ops && 4673 newvd->vdev_isspare != oldvd->vdev_isspare) { 4674 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4675 } 4676 4677 if (newvd->vdev_isspare) 4678 pvops = &vdev_spare_ops; 4679 else 4680 pvops = &vdev_replacing_ops; 4681 } 4682 4683 /* 4684 * Make sure the new device is big enough. 4685 */ 4686 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4687 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4688 4689 /* 4690 * The new device cannot have a higher alignment requirement 4691 * than the top-level vdev. 4692 */ 4693 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4694 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4695 4696 /* 4697 * If this is an in-place replacement, update oldvd's path and devid 4698 * to make it distinguishable from newvd, and unopenable from now on. 4699 */ 4700 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4701 spa_strfree(oldvd->vdev_path); 4702 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4703 KM_SLEEP); 4704 (void) sprintf(oldvd->vdev_path, "%s/%s", 4705 newvd->vdev_path, "old"); 4706 if (oldvd->vdev_devid != NULL) { 4707 spa_strfree(oldvd->vdev_devid); 4708 oldvd->vdev_devid = NULL; 4709 } 4710 } 4711 4712 /* mark the device being resilvered */ 4713 newvd->vdev_resilver_txg = txg; 4714 4715 /* 4716 * If the parent is not a mirror, or if we're replacing, insert the new 4717 * mirror/replacing/spare vdev above oldvd. 4718 */ 4719 if (pvd->vdev_ops != pvops) 4720 pvd = vdev_add_parent(oldvd, pvops); 4721 4722 ASSERT(pvd->vdev_top->vdev_parent == rvd); 4723 ASSERT(pvd->vdev_ops == pvops); 4724 ASSERT(oldvd->vdev_parent == pvd); 4725 4726 /* 4727 * Extract the new device from its root and add it to pvd. 4728 */ 4729 vdev_remove_child(newrootvd, newvd); 4730 newvd->vdev_id = pvd->vdev_children; 4731 newvd->vdev_crtxg = oldvd->vdev_crtxg; 4732 vdev_add_child(pvd, newvd); 4733 4734 tvd = newvd->vdev_top; 4735 ASSERT(pvd->vdev_top == tvd); 4736 ASSERT(tvd->vdev_parent == rvd); 4737 4738 vdev_config_dirty(tvd); 4739 4740 /* 4741 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 4742 * for any dmu_sync-ed blocks. It will propagate upward when 4743 * spa_vdev_exit() calls vdev_dtl_reassess(). 4744 */ 4745 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 4746 4747 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 4748 dtl_max_txg - TXG_INITIAL); 4749 4750 if (newvd->vdev_isspare) { 4751 spa_spare_activate(newvd); 4752 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 4753 } 4754 4755 oldvdpath = spa_strdup(oldvd->vdev_path); 4756 newvdpath = spa_strdup(newvd->vdev_path); 4757 newvd_isspare = newvd->vdev_isspare; 4758 4759 /* 4760 * Mark newvd's DTL dirty in this txg. 4761 */ 4762 vdev_dirty(tvd, VDD_DTL, newvd, txg); 4763 4764 /* 4765 * Restart the resilver 4766 */ 4767 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 4768 4769 /* 4770 * Commit the config 4771 */ 4772 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 4773 4774 spa_history_log_internal(spa, "vdev attach", NULL, 4775 "%s vdev=%s %s vdev=%s", 4776 replacing && newvd_isspare ? "spare in" : 4777 replacing ? "replace" : "attach", newvdpath, 4778 replacing ? "for" : "to", oldvdpath); 4779 4780 spa_strfree(oldvdpath); 4781 spa_strfree(newvdpath); 4782 4783 if (spa->spa_bootfs) 4784 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 4785 4786 return (0); 4787} 4788 4789/* 4790 * Detach a device from a mirror or replacing vdev. 4791 * 4792 * If 'replace_done' is specified, only detach if the parent 4793 * is a replacing vdev. 4794 */ 4795int 4796spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 4797{ 4798 uint64_t txg; 4799 int error; 4800 vdev_t *rvd = spa->spa_root_vdev; 4801 vdev_t *vd, *pvd, *cvd, *tvd; 4802 boolean_t unspare = B_FALSE; 4803 uint64_t unspare_guid = 0; 4804 char *vdpath; 4805 4806 ASSERT(spa_writeable(spa)); 4807 4808 txg = spa_vdev_enter(spa); 4809 4810 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 4811 4812 if (vd == NULL) 4813 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4814 4815 if (!vd->vdev_ops->vdev_op_leaf) 4816 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4817 4818 pvd = vd->vdev_parent; 4819 4820 /* 4821 * If the parent/child relationship is not as expected, don't do it. 4822 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 4823 * vdev that's replacing B with C. The user's intent in replacing 4824 * is to go from M(A,B) to M(A,C). If the user decides to cancel 4825 * the replace by detaching C, the expected behavior is to end up 4826 * M(A,B). But suppose that right after deciding to detach C, 4827 * the replacement of B completes. We would have M(A,C), and then 4828 * ask to detach C, which would leave us with just A -- not what 4829 * the user wanted. To prevent this, we make sure that the 4830 * parent/child relationship hasn't changed -- in this example, 4831 * that C's parent is still the replacing vdev R. 4832 */ 4833 if (pvd->vdev_guid != pguid && pguid != 0) 4834 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4835 4836 /* 4837 * Only 'replacing' or 'spare' vdevs can be replaced. 4838 */ 4839 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 4840 pvd->vdev_ops != &vdev_spare_ops) 4841 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4842 4843 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 4844 spa_version(spa) >= SPA_VERSION_SPARES); 4845 4846 /* 4847 * Only mirror, replacing, and spare vdevs support detach. 4848 */ 4849 if (pvd->vdev_ops != &vdev_replacing_ops && 4850 pvd->vdev_ops != &vdev_mirror_ops && 4851 pvd->vdev_ops != &vdev_spare_ops) 4852 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4853 4854 /* 4855 * If this device has the only valid copy of some data, 4856 * we cannot safely detach it. 4857 */ 4858 if (vdev_dtl_required(vd)) 4859 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4860 4861 ASSERT(pvd->vdev_children >= 2); 4862 4863 /* 4864 * If we are detaching the second disk from a replacing vdev, then 4865 * check to see if we changed the original vdev's path to have "/old" 4866 * at the end in spa_vdev_attach(). If so, undo that change now. 4867 */ 4868 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 4869 vd->vdev_path != NULL) { 4870 size_t len = strlen(vd->vdev_path); 4871 4872 for (int c = 0; c < pvd->vdev_children; c++) { 4873 cvd = pvd->vdev_child[c]; 4874 4875 if (cvd == vd || cvd->vdev_path == NULL) 4876 continue; 4877 4878 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 4879 strcmp(cvd->vdev_path + len, "/old") == 0) { 4880 spa_strfree(cvd->vdev_path); 4881 cvd->vdev_path = spa_strdup(vd->vdev_path); 4882 break; 4883 } 4884 } 4885 } 4886 4887 /* 4888 * If we are detaching the original disk from a spare, then it implies 4889 * that the spare should become a real disk, and be removed from the 4890 * active spare list for the pool. 4891 */ 4892 if (pvd->vdev_ops == &vdev_spare_ops && 4893 vd->vdev_id == 0 && 4894 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 4895 unspare = B_TRUE; 4896 4897 /* 4898 * Erase the disk labels so the disk can be used for other things. 4899 * This must be done after all other error cases are handled, 4900 * but before we disembowel vd (so we can still do I/O to it). 4901 * But if we can't do it, don't treat the error as fatal -- 4902 * it may be that the unwritability of the disk is the reason 4903 * it's being detached! 4904 */ 4905 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 4906 4907 /* 4908 * Remove vd from its parent and compact the parent's children. 4909 */ 4910 vdev_remove_child(pvd, vd); 4911 vdev_compact_children(pvd); 4912 4913 /* 4914 * Remember one of the remaining children so we can get tvd below. 4915 */ 4916 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 4917 4918 /* 4919 * If we need to remove the remaining child from the list of hot spares, 4920 * do it now, marking the vdev as no longer a spare in the process. 4921 * We must do this before vdev_remove_parent(), because that can 4922 * change the GUID if it creates a new toplevel GUID. For a similar 4923 * reason, we must remove the spare now, in the same txg as the detach; 4924 * otherwise someone could attach a new sibling, change the GUID, and 4925 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 4926 */ 4927 if (unspare) { 4928 ASSERT(cvd->vdev_isspare); 4929 spa_spare_remove(cvd); 4930 unspare_guid = cvd->vdev_guid; 4931 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 4932 cvd->vdev_unspare = B_TRUE; 4933 } 4934 4935 /* 4936 * If the parent mirror/replacing vdev only has one child, 4937 * the parent is no longer needed. Remove it from the tree. 4938 */ 4939 if (pvd->vdev_children == 1) { 4940 if (pvd->vdev_ops == &vdev_spare_ops) 4941 cvd->vdev_unspare = B_FALSE; 4942 vdev_remove_parent(cvd); 4943 } 4944 4945 4946 /* 4947 * We don't set tvd until now because the parent we just removed 4948 * may have been the previous top-level vdev. 4949 */ 4950 tvd = cvd->vdev_top; 4951 ASSERT(tvd->vdev_parent == rvd); 4952 4953 /* 4954 * Reevaluate the parent vdev state. 4955 */ 4956 vdev_propagate_state(cvd); 4957 4958 /* 4959 * If the 'autoexpand' property is set on the pool then automatically 4960 * try to expand the size of the pool. For example if the device we 4961 * just detached was smaller than the others, it may be possible to 4962 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 4963 * first so that we can obtain the updated sizes of the leaf vdevs. 4964 */ 4965 if (spa->spa_autoexpand) { 4966 vdev_reopen(tvd); 4967 vdev_expand(tvd, txg); 4968 } 4969 4970 vdev_config_dirty(tvd); 4971 4972 /* 4973 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 4974 * vd->vdev_detached is set and free vd's DTL object in syncing context. 4975 * But first make sure we're not on any *other* txg's DTL list, to 4976 * prevent vd from being accessed after it's freed. 4977 */ 4978 vdpath = spa_strdup(vd->vdev_path); 4979 for (int t = 0; t < TXG_SIZE; t++) 4980 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 4981 vd->vdev_detached = B_TRUE; 4982 vdev_dirty(tvd, VDD_DTL, vd, txg); 4983 4984 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 4985 4986 /* hang on to the spa before we release the lock */ 4987 spa_open_ref(spa, FTAG); 4988 4989 error = spa_vdev_exit(spa, vd, txg, 0); 4990 4991 spa_history_log_internal(spa, "detach", NULL, 4992 "vdev=%s", vdpath); 4993 spa_strfree(vdpath); 4994 4995 /* 4996 * If this was the removal of the original device in a hot spare vdev, 4997 * then we want to go through and remove the device from the hot spare 4998 * list of every other pool. 4999 */ 5000 if (unspare) { 5001 spa_t *altspa = NULL; 5002 5003 mutex_enter(&spa_namespace_lock); 5004 while ((altspa = spa_next(altspa)) != NULL) { 5005 if (altspa->spa_state != POOL_STATE_ACTIVE || 5006 altspa == spa) 5007 continue; 5008 5009 spa_open_ref(altspa, FTAG); 5010 mutex_exit(&spa_namespace_lock); 5011 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 5012 mutex_enter(&spa_namespace_lock); 5013 spa_close(altspa, FTAG); 5014 } 5015 mutex_exit(&spa_namespace_lock); 5016 5017 /* search the rest of the vdevs for spares to remove */ 5018 spa_vdev_resilver_done(spa); 5019 } 5020 5021 /* all done with the spa; OK to release */ 5022 mutex_enter(&spa_namespace_lock); 5023 spa_close(spa, FTAG); 5024 mutex_exit(&spa_namespace_lock); 5025 5026 return (error); 5027} 5028 5029/* 5030 * Split a set of devices from their mirrors, and create a new pool from them. 5031 */ 5032int 5033spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 5034 nvlist_t *props, boolean_t exp) 5035{ 5036 int error = 0; 5037 uint64_t txg, *glist; 5038 spa_t *newspa; 5039 uint_t c, children, lastlog; 5040 nvlist_t **child, *nvl, *tmp; 5041 dmu_tx_t *tx; 5042 char *altroot = NULL; 5043 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 5044 boolean_t activate_slog; 5045 5046 ASSERT(spa_writeable(spa)); 5047 5048 txg = spa_vdev_enter(spa); 5049 5050 /* clear the log and flush everything up to now */ 5051 activate_slog = spa_passivate_log(spa); 5052 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5053 error = spa_offline_log(spa); 5054 txg = spa_vdev_config_enter(spa); 5055 5056 if (activate_slog) 5057 spa_activate_log(spa); 5058 5059 if (error != 0) 5060 return (spa_vdev_exit(spa, NULL, txg, error)); 5061 5062 /* check new spa name before going any further */ 5063 if (spa_lookup(newname) != NULL) 5064 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 5065 5066 /* 5067 * scan through all the children to ensure they're all mirrors 5068 */ 5069 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 5070 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 5071 &children) != 0) 5072 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5073 5074 /* first, check to ensure we've got the right child count */ 5075 rvd = spa->spa_root_vdev; 5076 lastlog = 0; 5077 for (c = 0; c < rvd->vdev_children; c++) { 5078 vdev_t *vd = rvd->vdev_child[c]; 5079 5080 /* don't count the holes & logs as children */ 5081 if (vd->vdev_islog || vd->vdev_ishole) { 5082 if (lastlog == 0) 5083 lastlog = c; 5084 continue; 5085 } 5086 5087 lastlog = 0; 5088 } 5089 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 5090 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5091 5092 /* next, ensure no spare or cache devices are part of the split */ 5093 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 5094 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 5095 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5096 5097 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 5098 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 5099 5100 /* then, loop over each vdev and validate it */ 5101 for (c = 0; c < children; c++) { 5102 uint64_t is_hole = 0; 5103 5104 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 5105 &is_hole); 5106 5107 if (is_hole != 0) { 5108 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 5109 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 5110 continue; 5111 } else { 5112 error = SET_ERROR(EINVAL); 5113 break; 5114 } 5115 } 5116 5117 /* which disk is going to be split? */ 5118 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 5119 &glist[c]) != 0) { 5120 error = SET_ERROR(EINVAL); 5121 break; 5122 } 5123 5124 /* look it up in the spa */ 5125 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 5126 if (vml[c] == NULL) { 5127 error = SET_ERROR(ENODEV); 5128 break; 5129 } 5130 5131 /* make sure there's nothing stopping the split */ 5132 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 5133 vml[c]->vdev_islog || 5134 vml[c]->vdev_ishole || 5135 vml[c]->vdev_isspare || 5136 vml[c]->vdev_isl2cache || 5137 !vdev_writeable(vml[c]) || 5138 vml[c]->vdev_children != 0 || 5139 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 5140 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 5141 error = SET_ERROR(EINVAL); 5142 break; 5143 } 5144 5145 if (vdev_dtl_required(vml[c])) { 5146 error = SET_ERROR(EBUSY); 5147 break; 5148 } 5149 5150 /* we need certain info from the top level */ 5151 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 5152 vml[c]->vdev_top->vdev_ms_array) == 0); 5153 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 5154 vml[c]->vdev_top->vdev_ms_shift) == 0); 5155 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 5156 vml[c]->vdev_top->vdev_asize) == 0); 5157 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 5158 vml[c]->vdev_top->vdev_ashift) == 0); 5159 } 5160 5161 if (error != 0) { 5162 kmem_free(vml, children * sizeof (vdev_t *)); 5163 kmem_free(glist, children * sizeof (uint64_t)); 5164 return (spa_vdev_exit(spa, NULL, txg, error)); 5165 } 5166 5167 /* stop writers from using the disks */ 5168 for (c = 0; c < children; c++) { 5169 if (vml[c] != NULL) 5170 vml[c]->vdev_offline = B_TRUE; 5171 } 5172 vdev_reopen(spa->spa_root_vdev); 5173 5174 /* 5175 * Temporarily record the splitting vdevs in the spa config. This 5176 * will disappear once the config is regenerated. 5177 */ 5178 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5179 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 5180 glist, children) == 0); 5181 kmem_free(glist, children * sizeof (uint64_t)); 5182 5183 mutex_enter(&spa->spa_props_lock); 5184 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 5185 nvl) == 0); 5186 mutex_exit(&spa->spa_props_lock); 5187 spa->spa_config_splitting = nvl; 5188 vdev_config_dirty(spa->spa_root_vdev); 5189 5190 /* configure and create the new pool */ 5191 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 5192 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 5193 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 5194 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5195 spa_version(spa)) == 0); 5196 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 5197 spa->spa_config_txg) == 0); 5198 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5199 spa_generate_guid(NULL)) == 0); 5200 (void) nvlist_lookup_string(props, 5201 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5202 5203 /* add the new pool to the namespace */ 5204 newspa = spa_add(newname, config, altroot); 5205 newspa->spa_config_txg = spa->spa_config_txg; 5206 spa_set_log_state(newspa, SPA_LOG_CLEAR); 5207 5208 /* release the spa config lock, retaining the namespace lock */ 5209 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5210 5211 if (zio_injection_enabled) 5212 zio_handle_panic_injection(spa, FTAG, 1); 5213 5214 spa_activate(newspa, spa_mode_global); 5215 spa_async_suspend(newspa); 5216 5217#ifndef sun 5218 /* mark that we are creating new spa by splitting */ 5219 newspa->spa_splitting_newspa = B_TRUE; 5220#endif 5221 /* create the new pool from the disks of the original pool */ 5222 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 5223#ifndef sun 5224 newspa->spa_splitting_newspa = B_FALSE; 5225#endif 5226 if (error) 5227 goto out; 5228 5229 /* if that worked, generate a real config for the new pool */ 5230 if (newspa->spa_root_vdev != NULL) { 5231 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 5232 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5233 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 5234 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 5235 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 5236 B_TRUE)); 5237 } 5238 5239 /* set the props */ 5240 if (props != NULL) { 5241 spa_configfile_set(newspa, props, B_FALSE); 5242 error = spa_prop_set(newspa, props); 5243 if (error) 5244 goto out; 5245 } 5246 5247 /* flush everything */ 5248 txg = spa_vdev_config_enter(newspa); 5249 vdev_config_dirty(newspa->spa_root_vdev); 5250 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 5251 5252 if (zio_injection_enabled) 5253 zio_handle_panic_injection(spa, FTAG, 2); 5254 5255 spa_async_resume(newspa); 5256 5257 /* finally, update the original pool's config */ 5258 txg = spa_vdev_config_enter(spa); 5259 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 5260 error = dmu_tx_assign(tx, TXG_WAIT); 5261 if (error != 0) 5262 dmu_tx_abort(tx); 5263 for (c = 0; c < children; c++) { 5264 if (vml[c] != NULL) { 5265 vdev_split(vml[c]); 5266 if (error == 0) 5267 spa_history_log_internal(spa, "detach", tx, 5268 "vdev=%s", vml[c]->vdev_path); 5269 vdev_free(vml[c]); 5270 } 5271 } 5272 vdev_config_dirty(spa->spa_root_vdev); 5273 spa->spa_config_splitting = NULL; 5274 nvlist_free(nvl); 5275 if (error == 0) 5276 dmu_tx_commit(tx); 5277 (void) spa_vdev_exit(spa, NULL, txg, 0); 5278 5279 if (zio_injection_enabled) 5280 zio_handle_panic_injection(spa, FTAG, 3); 5281 5282 /* split is complete; log a history record */ 5283 spa_history_log_internal(newspa, "split", NULL, 5284 "from pool %s", spa_name(spa)); 5285 5286 kmem_free(vml, children * sizeof (vdev_t *)); 5287 5288 /* if we're not going to mount the filesystems in userland, export */ 5289 if (exp) 5290 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 5291 B_FALSE, B_FALSE); 5292 5293 return (error); 5294 5295out: 5296 spa_unload(newspa); 5297 spa_deactivate(newspa); 5298 spa_remove(newspa); 5299 5300 txg = spa_vdev_config_enter(spa); 5301 5302 /* re-online all offlined disks */ 5303 for (c = 0; c < children; c++) { 5304 if (vml[c] != NULL) 5305 vml[c]->vdev_offline = B_FALSE; 5306 } 5307 vdev_reopen(spa->spa_root_vdev); 5308 5309 nvlist_free(spa->spa_config_splitting); 5310 spa->spa_config_splitting = NULL; 5311 (void) spa_vdev_exit(spa, NULL, txg, error); 5312 5313 kmem_free(vml, children * sizeof (vdev_t *)); 5314 return (error); 5315} 5316 5317static nvlist_t * 5318spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 5319{ 5320 for (int i = 0; i < count; i++) { 5321 uint64_t guid; 5322 5323 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5324 &guid) == 0); 5325 5326 if (guid == target_guid) 5327 return (nvpp[i]); 5328 } 5329 5330 return (NULL); 5331} 5332 5333static void 5334spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5335 nvlist_t *dev_to_remove) 5336{ 5337 nvlist_t **newdev = NULL; 5338 5339 if (count > 1) 5340 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5341 5342 for (int i = 0, j = 0; i < count; i++) { 5343 if (dev[i] == dev_to_remove) 5344 continue; 5345 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5346 } 5347 5348 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5349 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5350 5351 for (int i = 0; i < count - 1; i++) 5352 nvlist_free(newdev[i]); 5353 5354 if (count > 1) 5355 kmem_free(newdev, (count - 1) * sizeof (void *)); 5356} 5357 5358/* 5359 * Evacuate the device. 5360 */ 5361static int 5362spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 5363{ 5364 uint64_t txg; 5365 int error = 0; 5366 5367 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5368 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5369 ASSERT(vd == vd->vdev_top); 5370 5371 /* 5372 * Evacuate the device. We don't hold the config lock as writer 5373 * since we need to do I/O but we do keep the 5374 * spa_namespace_lock held. Once this completes the device 5375 * should no longer have any blocks allocated on it. 5376 */ 5377 if (vd->vdev_islog) { 5378 if (vd->vdev_stat.vs_alloc != 0) 5379 error = spa_offline_log(spa); 5380 } else { 5381 error = SET_ERROR(ENOTSUP); 5382 } 5383 5384 if (error) 5385 return (error); 5386 5387 /* 5388 * The evacuation succeeded. Remove any remaining MOS metadata 5389 * associated with this vdev, and wait for these changes to sync. 5390 */ 5391 ASSERT0(vd->vdev_stat.vs_alloc); 5392 txg = spa_vdev_config_enter(spa); 5393 vd->vdev_removing = B_TRUE; 5394 vdev_dirty(vd, 0, NULL, txg); 5395 vdev_config_dirty(vd); 5396 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5397 5398 return (0); 5399} 5400 5401/* 5402 * Complete the removal by cleaning up the namespace. 5403 */ 5404static void 5405spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 5406{ 5407 vdev_t *rvd = spa->spa_root_vdev; 5408 uint64_t id = vd->vdev_id; 5409 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 5410 5411 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5412 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5413 ASSERT(vd == vd->vdev_top); 5414 5415 /* 5416 * Only remove any devices which are empty. 5417 */ 5418 if (vd->vdev_stat.vs_alloc != 0) 5419 return; 5420 5421 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5422 5423 if (list_link_active(&vd->vdev_state_dirty_node)) 5424 vdev_state_clean(vd); 5425 if (list_link_active(&vd->vdev_config_dirty_node)) 5426 vdev_config_clean(vd); 5427 5428 vdev_free(vd); 5429 5430 if (last_vdev) { 5431 vdev_compact_children(rvd); 5432 } else { 5433 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 5434 vdev_add_child(rvd, vd); 5435 } 5436 vdev_config_dirty(rvd); 5437 5438 /* 5439 * Reassess the health of our root vdev. 5440 */ 5441 vdev_reopen(rvd); 5442} 5443 5444/* 5445 * Remove a device from the pool - 5446 * 5447 * Removing a device from the vdev namespace requires several steps 5448 * and can take a significant amount of time. As a result we use 5449 * the spa_vdev_config_[enter/exit] functions which allow us to 5450 * grab and release the spa_config_lock while still holding the namespace 5451 * lock. During each step the configuration is synced out. 5452 * 5453 * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5454 * devices. 5455 */ 5456int 5457spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5458{ 5459 vdev_t *vd; 5460 metaslab_group_t *mg; 5461 nvlist_t **spares, **l2cache, *nv; 5462 uint64_t txg = 0; 5463 uint_t nspares, nl2cache; 5464 int error = 0; 5465 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5466 5467 ASSERT(spa_writeable(spa)); 5468 5469 if (!locked) 5470 txg = spa_vdev_enter(spa); 5471 5472 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5473 5474 if (spa->spa_spares.sav_vdevs != NULL && 5475 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5476 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5477 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5478 /* 5479 * Only remove the hot spare if it's not currently in use 5480 * in this pool. 5481 */ 5482 if (vd == NULL || unspare) { 5483 spa_vdev_remove_aux(spa->spa_spares.sav_config, 5484 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 5485 spa_load_spares(spa); 5486 spa->spa_spares.sav_sync = B_TRUE; 5487 } else { 5488 error = SET_ERROR(EBUSY); 5489 } 5490 } else if (spa->spa_l2cache.sav_vdevs != NULL && 5491 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5492 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5493 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5494 /* 5495 * Cache devices can always be removed. 5496 */ 5497 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5498 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5499 spa_load_l2cache(spa); 5500 spa->spa_l2cache.sav_sync = B_TRUE; 5501 } else if (vd != NULL && vd->vdev_islog) { 5502 ASSERT(!locked); 5503 ASSERT(vd == vd->vdev_top); 5504 5505 /* 5506 * XXX - Once we have bp-rewrite this should 5507 * become the common case. 5508 */ 5509 5510 mg = vd->vdev_mg; 5511 5512 /* 5513 * Stop allocating from this vdev. 5514 */ 5515 metaslab_group_passivate(mg); 5516 5517 /* 5518 * Wait for the youngest allocations and frees to sync, 5519 * and then wait for the deferral of those frees to finish. 5520 */ 5521 spa_vdev_config_exit(spa, NULL, 5522 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5523 5524 /* 5525 * Attempt to evacuate the vdev. 5526 */ 5527 error = spa_vdev_remove_evacuate(spa, vd); 5528 5529 txg = spa_vdev_config_enter(spa); 5530 5531 /* 5532 * If we couldn't evacuate the vdev, unwind. 5533 */ 5534 if (error) { 5535 metaslab_group_activate(mg); 5536 return (spa_vdev_exit(spa, NULL, txg, error)); 5537 } 5538 5539 /* 5540 * Clean up the vdev namespace. 5541 */ 5542 spa_vdev_remove_from_namespace(spa, vd); 5543 5544 } else if (vd != NULL) { 5545 /* 5546 * Normal vdevs cannot be removed (yet). 5547 */ 5548 error = SET_ERROR(ENOTSUP); 5549 } else { 5550 /* 5551 * There is no vdev of any kind with the specified guid. 5552 */ 5553 error = SET_ERROR(ENOENT); 5554 } 5555 5556 if (!locked) 5557 return (spa_vdev_exit(spa, NULL, txg, error)); 5558 5559 return (error); 5560} 5561 5562/* 5563 * Find any device that's done replacing, or a vdev marked 'unspare' that's 5564 * currently spared, so we can detach it. 5565 */ 5566static vdev_t * 5567spa_vdev_resilver_done_hunt(vdev_t *vd) 5568{ 5569 vdev_t *newvd, *oldvd; 5570 5571 for (int c = 0; c < vd->vdev_children; c++) { 5572 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5573 if (oldvd != NULL) 5574 return (oldvd); 5575 } 5576 5577 /* 5578 * Check for a completed replacement. We always consider the first 5579 * vdev in the list to be the oldest vdev, and the last one to be 5580 * the newest (see spa_vdev_attach() for how that works). In 5581 * the case where the newest vdev is faulted, we will not automatically 5582 * remove it after a resilver completes. This is OK as it will require 5583 * user intervention to determine which disk the admin wishes to keep. 5584 */ 5585 if (vd->vdev_ops == &vdev_replacing_ops) { 5586 ASSERT(vd->vdev_children > 1); 5587 5588 newvd = vd->vdev_child[vd->vdev_children - 1]; 5589 oldvd = vd->vdev_child[0]; 5590 5591 if (vdev_dtl_empty(newvd, DTL_MISSING) && 5592 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5593 !vdev_dtl_required(oldvd)) 5594 return (oldvd); 5595 } 5596 5597 /* 5598 * Check for a completed resilver with the 'unspare' flag set. 5599 */ 5600 if (vd->vdev_ops == &vdev_spare_ops) { 5601 vdev_t *first = vd->vdev_child[0]; 5602 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 5603 5604 if (last->vdev_unspare) { 5605 oldvd = first; 5606 newvd = last; 5607 } else if (first->vdev_unspare) { 5608 oldvd = last; 5609 newvd = first; 5610 } else { 5611 oldvd = NULL; 5612 } 5613 5614 if (oldvd != NULL && 5615 vdev_dtl_empty(newvd, DTL_MISSING) && 5616 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5617 !vdev_dtl_required(oldvd)) 5618 return (oldvd); 5619 5620 /* 5621 * If there are more than two spares attached to a disk, 5622 * and those spares are not required, then we want to 5623 * attempt to free them up now so that they can be used 5624 * by other pools. Once we're back down to a single 5625 * disk+spare, we stop removing them. 5626 */ 5627 if (vd->vdev_children > 2) { 5628 newvd = vd->vdev_child[1]; 5629 5630 if (newvd->vdev_isspare && last->vdev_isspare && 5631 vdev_dtl_empty(last, DTL_MISSING) && 5632 vdev_dtl_empty(last, DTL_OUTAGE) && 5633 !vdev_dtl_required(newvd)) 5634 return (newvd); 5635 } 5636 } 5637 5638 return (NULL); 5639} 5640 5641static void 5642spa_vdev_resilver_done(spa_t *spa) 5643{ 5644 vdev_t *vd, *pvd, *ppvd; 5645 uint64_t guid, sguid, pguid, ppguid; 5646 5647 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5648 5649 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 5650 pvd = vd->vdev_parent; 5651 ppvd = pvd->vdev_parent; 5652 guid = vd->vdev_guid; 5653 pguid = pvd->vdev_guid; 5654 ppguid = ppvd->vdev_guid; 5655 sguid = 0; 5656 /* 5657 * If we have just finished replacing a hot spared device, then 5658 * we need to detach the parent's first child (the original hot 5659 * spare) as well. 5660 */ 5661 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5662 ppvd->vdev_children == 2) { 5663 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 5664 sguid = ppvd->vdev_child[1]->vdev_guid; 5665 } 5666 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5667 5668 spa_config_exit(spa, SCL_ALL, FTAG); 5669 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5670 return; 5671 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 5672 return; 5673 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5674 } 5675 5676 spa_config_exit(spa, SCL_ALL, FTAG); 5677} 5678 5679/* 5680 * Update the stored path or FRU for this vdev. 5681 */ 5682int 5683spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 5684 boolean_t ispath) 5685{ 5686 vdev_t *vd; 5687 boolean_t sync = B_FALSE; 5688 5689 ASSERT(spa_writeable(spa)); 5690 5691 spa_vdev_state_enter(spa, SCL_ALL); 5692 5693 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 5694 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 5695 5696 if (!vd->vdev_ops->vdev_op_leaf) 5697 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 5698 5699 if (ispath) { 5700 if (strcmp(value, vd->vdev_path) != 0) { 5701 spa_strfree(vd->vdev_path); 5702 vd->vdev_path = spa_strdup(value); 5703 sync = B_TRUE; 5704 } 5705 } else { 5706 if (vd->vdev_fru == NULL) { 5707 vd->vdev_fru = spa_strdup(value); 5708 sync = B_TRUE; 5709 } else if (strcmp(value, vd->vdev_fru) != 0) { 5710 spa_strfree(vd->vdev_fru); 5711 vd->vdev_fru = spa_strdup(value); 5712 sync = B_TRUE; 5713 } 5714 } 5715 5716 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 5717} 5718 5719int 5720spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 5721{ 5722 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 5723} 5724 5725int 5726spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 5727{ 5728 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 5729} 5730 5731/* 5732 * ========================================================================== 5733 * SPA Scanning 5734 * ========================================================================== 5735 */ 5736 5737int 5738spa_scan_stop(spa_t *spa) 5739{ 5740 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5741 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 5742 return (SET_ERROR(EBUSY)); 5743 return (dsl_scan_cancel(spa->spa_dsl_pool)); 5744} 5745 5746int 5747spa_scan(spa_t *spa, pool_scan_func_t func) 5748{ 5749 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5750 5751 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 5752 return (SET_ERROR(ENOTSUP)); 5753 5754 /* 5755 * If a resilver was requested, but there is no DTL on a 5756 * writeable leaf device, we have nothing to do. 5757 */ 5758 if (func == POOL_SCAN_RESILVER && 5759 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5760 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 5761 return (0); 5762 } 5763 5764 return (dsl_scan(spa->spa_dsl_pool, func)); 5765} 5766 5767/* 5768 * ========================================================================== 5769 * SPA async task processing 5770 * ========================================================================== 5771 */ 5772 5773static void 5774spa_async_remove(spa_t *spa, vdev_t *vd) 5775{ 5776 if (vd->vdev_remove_wanted) { 5777 vd->vdev_remove_wanted = B_FALSE; 5778 vd->vdev_delayed_close = B_FALSE; 5779 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 5780 5781 /* 5782 * We want to clear the stats, but we don't want to do a full 5783 * vdev_clear() as that will cause us to throw away 5784 * degraded/faulted state as well as attempt to reopen the 5785 * device, all of which is a waste. 5786 */ 5787 vd->vdev_stat.vs_read_errors = 0; 5788 vd->vdev_stat.vs_write_errors = 0; 5789 vd->vdev_stat.vs_checksum_errors = 0; 5790 5791 vdev_state_dirty(vd->vdev_top); 5792 } 5793 5794 for (int c = 0; c < vd->vdev_children; c++) 5795 spa_async_remove(spa, vd->vdev_child[c]); 5796} 5797 5798static void 5799spa_async_probe(spa_t *spa, vdev_t *vd) 5800{ 5801 if (vd->vdev_probe_wanted) { 5802 vd->vdev_probe_wanted = B_FALSE; 5803 vdev_reopen(vd); /* vdev_open() does the actual probe */ 5804 } 5805 5806 for (int c = 0; c < vd->vdev_children; c++) 5807 spa_async_probe(spa, vd->vdev_child[c]); 5808} 5809 5810static void 5811spa_async_autoexpand(spa_t *spa, vdev_t *vd) 5812{ 5813 sysevent_id_t eid; 5814 nvlist_t *attr; 5815 char *physpath; 5816 5817 if (!spa->spa_autoexpand) 5818 return; 5819 5820 for (int c = 0; c < vd->vdev_children; c++) { 5821 vdev_t *cvd = vd->vdev_child[c]; 5822 spa_async_autoexpand(spa, cvd); 5823 } 5824 5825 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 5826 return; 5827 5828 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5829 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 5830 5831 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5832 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 5833 5834 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 5835 ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP); 5836 5837 nvlist_free(attr); 5838 kmem_free(physpath, MAXPATHLEN); 5839} 5840 5841static void 5842spa_async_thread(void *arg) 5843{ 5844 spa_t *spa = arg; 5845 int tasks; 5846 5847 ASSERT(spa->spa_sync_on); 5848 5849 mutex_enter(&spa->spa_async_lock); 5850 tasks = spa->spa_async_tasks; 5851 spa->spa_async_tasks &= SPA_ASYNC_REMOVE; 5852 mutex_exit(&spa->spa_async_lock); 5853 5854 /* 5855 * See if the config needs to be updated. 5856 */ 5857 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 5858 uint64_t old_space, new_space; 5859 5860 mutex_enter(&spa_namespace_lock); 5861 old_space = metaslab_class_get_space(spa_normal_class(spa)); 5862 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5863 new_space = metaslab_class_get_space(spa_normal_class(spa)); 5864 mutex_exit(&spa_namespace_lock); 5865 5866 /* 5867 * If the pool grew as a result of the config update, 5868 * then log an internal history event. 5869 */ 5870 if (new_space != old_space) { 5871 spa_history_log_internal(spa, "vdev online", NULL, 5872 "pool '%s' size: %llu(+%llu)", 5873 spa_name(spa), new_space, new_space - old_space); 5874 } 5875 } 5876 5877 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 5878 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5879 spa_async_autoexpand(spa, spa->spa_root_vdev); 5880 spa_config_exit(spa, SCL_CONFIG, FTAG); 5881 } 5882 5883 /* 5884 * See if any devices need to be probed. 5885 */ 5886 if (tasks & SPA_ASYNC_PROBE) { 5887 spa_vdev_state_enter(spa, SCL_NONE); 5888 spa_async_probe(spa, spa->spa_root_vdev); 5889 (void) spa_vdev_state_exit(spa, NULL, 0); 5890 } 5891 5892 /* 5893 * If any devices are done replacing, detach them. 5894 */ 5895 if (tasks & SPA_ASYNC_RESILVER_DONE) 5896 spa_vdev_resilver_done(spa); 5897 5898 /* 5899 * Kick off a resilver. 5900 */ 5901 if (tasks & SPA_ASYNC_RESILVER) 5902 dsl_resilver_restart(spa->spa_dsl_pool, 0); 5903 5904 /* 5905 * Let the world know that we're done. 5906 */ 5907 mutex_enter(&spa->spa_async_lock); 5908 spa->spa_async_thread = NULL; 5909 cv_broadcast(&spa->spa_async_cv); 5910 mutex_exit(&spa->spa_async_lock); 5911 thread_exit(); 5912} 5913 5914static void 5915spa_async_thread_vd(void *arg) 5916{ 5917 spa_t *spa = arg; 5918 int tasks; 5919 5920 ASSERT(spa->spa_sync_on); 5921 5922 mutex_enter(&spa->spa_async_lock); 5923 tasks = spa->spa_async_tasks; 5924retry: 5925 spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE; 5926 mutex_exit(&spa->spa_async_lock); 5927 5928 /* 5929 * See if any devices need to be marked REMOVED. 5930 */ 5931 if (tasks & SPA_ASYNC_REMOVE) { 5932 spa_vdev_state_enter(spa, SCL_NONE); 5933 spa_async_remove(spa, spa->spa_root_vdev); 5934 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 5935 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 5936 for (int i = 0; i < spa->spa_spares.sav_count; i++) 5937 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 5938 (void) spa_vdev_state_exit(spa, NULL, 0); 5939 } 5940 5941 /* 5942 * Let the world know that we're done. 5943 */ 5944 mutex_enter(&spa->spa_async_lock); 5945 tasks = spa->spa_async_tasks; 5946 if ((tasks & SPA_ASYNC_REMOVE) != 0) 5947 goto retry; 5948 spa->spa_async_thread_vd = NULL; 5949 cv_broadcast(&spa->spa_async_cv); 5950 mutex_exit(&spa->spa_async_lock); 5951 thread_exit(); 5952} 5953 5954void 5955spa_async_suspend(spa_t *spa) 5956{ 5957 mutex_enter(&spa->spa_async_lock); 5958 spa->spa_async_suspended++; 5959 while (spa->spa_async_thread != NULL && 5960 spa->spa_async_thread_vd != NULL) 5961 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 5962 mutex_exit(&spa->spa_async_lock); 5963} 5964 5965void 5966spa_async_resume(spa_t *spa) 5967{ 5968 mutex_enter(&spa->spa_async_lock); 5969 ASSERT(spa->spa_async_suspended != 0); 5970 spa->spa_async_suspended--; 5971 mutex_exit(&spa->spa_async_lock); 5972} 5973 5974static boolean_t 5975spa_async_tasks_pending(spa_t *spa) 5976{ 5977 uint_t non_config_tasks; 5978 uint_t config_task; 5979 boolean_t config_task_suspended; 5980 5981 non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE | 5982 SPA_ASYNC_REMOVE); 5983 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 5984 if (spa->spa_ccw_fail_time == 0) { 5985 config_task_suspended = B_FALSE; 5986 } else { 5987 config_task_suspended = 5988 (gethrtime() - spa->spa_ccw_fail_time) < 5989 (zfs_ccw_retry_interval * NANOSEC); 5990 } 5991 5992 return (non_config_tasks || (config_task && !config_task_suspended)); 5993} 5994 5995static void 5996spa_async_dispatch(spa_t *spa) 5997{ 5998 mutex_enter(&spa->spa_async_lock); 5999 if (spa_async_tasks_pending(spa) && 6000 !spa->spa_async_suspended && 6001 spa->spa_async_thread == NULL && 6002 rootdir != NULL) 6003 spa->spa_async_thread = thread_create(NULL, 0, 6004 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 6005 mutex_exit(&spa->spa_async_lock); 6006} 6007 6008static void 6009spa_async_dispatch_vd(spa_t *spa) 6010{ 6011 mutex_enter(&spa->spa_async_lock); 6012 if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 && 6013 !spa->spa_async_suspended && 6014 spa->spa_async_thread_vd == NULL && 6015 rootdir != NULL) 6016 spa->spa_async_thread_vd = thread_create(NULL, 0, 6017 spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri); 6018 mutex_exit(&spa->spa_async_lock); 6019} 6020 6021void 6022spa_async_request(spa_t *spa, int task) 6023{ 6024 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 6025 mutex_enter(&spa->spa_async_lock); 6026 spa->spa_async_tasks |= task; 6027 mutex_exit(&spa->spa_async_lock); 6028 spa_async_dispatch_vd(spa); 6029} 6030 6031/* 6032 * ========================================================================== 6033 * SPA syncing routines 6034 * ========================================================================== 6035 */ 6036 6037static int 6038bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6039{ 6040 bpobj_t *bpo = arg; 6041 bpobj_enqueue(bpo, bp, tx); 6042 return (0); 6043} 6044 6045static int 6046spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6047{ 6048 zio_t *zio = arg; 6049 6050 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 6051 BP_GET_PSIZE(bp), zio->io_flags)); 6052 return (0); 6053} 6054 6055static void 6056spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 6057{ 6058 char *packed = NULL; 6059 size_t bufsize; 6060 size_t nvsize = 0; 6061 dmu_buf_t *db; 6062 6063 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 6064 6065 /* 6066 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 6067 * information. This avoids the dbuf_will_dirty() path and 6068 * saves us a pre-read to get data we don't actually care about. 6069 */ 6070 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 6071 packed = kmem_alloc(bufsize, KM_SLEEP); 6072 6073 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 6074 KM_SLEEP) == 0); 6075 bzero(packed + nvsize, bufsize - nvsize); 6076 6077 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 6078 6079 kmem_free(packed, bufsize); 6080 6081 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 6082 dmu_buf_will_dirty(db, tx); 6083 *(uint64_t *)db->db_data = nvsize; 6084 dmu_buf_rele(db, FTAG); 6085} 6086 6087static void 6088spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 6089 const char *config, const char *entry) 6090{ 6091 nvlist_t *nvroot; 6092 nvlist_t **list; 6093 int i; 6094 6095 if (!sav->sav_sync) 6096 return; 6097 6098 /* 6099 * Update the MOS nvlist describing the list of available devices. 6100 * spa_validate_aux() will have already made sure this nvlist is 6101 * valid and the vdevs are labeled appropriately. 6102 */ 6103 if (sav->sav_object == 0) { 6104 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 6105 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 6106 sizeof (uint64_t), tx); 6107 VERIFY(zap_update(spa->spa_meta_objset, 6108 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 6109 &sav->sav_object, tx) == 0); 6110 } 6111 6112 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6113 if (sav->sav_count == 0) { 6114 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 6115 } else { 6116 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 6117 for (i = 0; i < sav->sav_count; i++) 6118 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 6119 B_FALSE, VDEV_CONFIG_L2CACHE); 6120 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 6121 sav->sav_count) == 0); 6122 for (i = 0; i < sav->sav_count; i++) 6123 nvlist_free(list[i]); 6124 kmem_free(list, sav->sav_count * sizeof (void *)); 6125 } 6126 6127 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 6128 nvlist_free(nvroot); 6129 6130 sav->sav_sync = B_FALSE; 6131} 6132 6133static void 6134spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 6135{ 6136 nvlist_t *config; 6137 6138 if (list_is_empty(&spa->spa_config_dirty_list)) 6139 return; 6140 6141 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6142 6143 config = spa_config_generate(spa, spa->spa_root_vdev, 6144 dmu_tx_get_txg(tx), B_FALSE); 6145 6146 /* 6147 * If we're upgrading the spa version then make sure that 6148 * the config object gets updated with the correct version. 6149 */ 6150 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 6151 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 6152 spa->spa_uberblock.ub_version); 6153 6154 spa_config_exit(spa, SCL_STATE, FTAG); 6155 6156 if (spa->spa_config_syncing) 6157 nvlist_free(spa->spa_config_syncing); 6158 spa->spa_config_syncing = config; 6159 6160 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 6161} 6162 6163static void 6164spa_sync_version(void *arg, dmu_tx_t *tx) 6165{ 6166 uint64_t *versionp = arg; 6167 uint64_t version = *versionp; 6168 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6169 6170 /* 6171 * Setting the version is special cased when first creating the pool. 6172 */ 6173 ASSERT(tx->tx_txg != TXG_INITIAL); 6174 6175 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6176 ASSERT(version >= spa_version(spa)); 6177 6178 spa->spa_uberblock.ub_version = version; 6179 vdev_config_dirty(spa->spa_root_vdev); 6180 spa_history_log_internal(spa, "set", tx, "version=%lld", version); 6181} 6182 6183/* 6184 * Set zpool properties. 6185 */ 6186static void 6187spa_sync_props(void *arg, dmu_tx_t *tx) 6188{ 6189 nvlist_t *nvp = arg; 6190 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6191 objset_t *mos = spa->spa_meta_objset; 6192 nvpair_t *elem = NULL; 6193 6194 mutex_enter(&spa->spa_props_lock); 6195 6196 while ((elem = nvlist_next_nvpair(nvp, elem))) { 6197 uint64_t intval; 6198 char *strval, *fname; 6199 zpool_prop_t prop; 6200 const char *propname; 6201 zprop_type_t proptype; 6202 zfeature_info_t *feature; 6203 6204 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 6205 case ZPROP_INVAL: 6206 /* 6207 * We checked this earlier in spa_prop_validate(). 6208 */ 6209 ASSERT(zpool_prop_feature(nvpair_name(elem))); 6210 6211 fname = strchr(nvpair_name(elem), '@') + 1; 6212 VERIFY3U(0, ==, zfeature_lookup_name(fname, &feature)); 6213 6214 spa_feature_enable(spa, feature, tx); 6215 spa_history_log_internal(spa, "set", tx, 6216 "%s=enabled", nvpair_name(elem)); 6217 break; 6218 6219 case ZPOOL_PROP_VERSION: 6220 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 6221 /* 6222 * The version is synced seperatly before other 6223 * properties and should be correct by now. 6224 */ 6225 ASSERT3U(spa_version(spa), >=, intval); 6226 break; 6227 6228 case ZPOOL_PROP_ALTROOT: 6229 /* 6230 * 'altroot' is a non-persistent property. It should 6231 * have been set temporarily at creation or import time. 6232 */ 6233 ASSERT(spa->spa_root != NULL); 6234 break; 6235 6236 case ZPOOL_PROP_READONLY: 6237 case ZPOOL_PROP_CACHEFILE: 6238 /* 6239 * 'readonly' and 'cachefile' are also non-persisitent 6240 * properties. 6241 */ 6242 break; 6243 case ZPOOL_PROP_COMMENT: 6244 VERIFY(nvpair_value_string(elem, &strval) == 0); 6245 if (spa->spa_comment != NULL) 6246 spa_strfree(spa->spa_comment); 6247 spa->spa_comment = spa_strdup(strval); 6248 /* 6249 * We need to dirty the configuration on all the vdevs 6250 * so that their labels get updated. It's unnecessary 6251 * to do this for pool creation since the vdev's 6252 * configuratoin has already been dirtied. 6253 */ 6254 if (tx->tx_txg != TXG_INITIAL) 6255 vdev_config_dirty(spa->spa_root_vdev); 6256 spa_history_log_internal(spa, "set", tx, 6257 "%s=%s", nvpair_name(elem), strval); 6258 break; 6259 default: 6260 /* 6261 * Set pool property values in the poolprops mos object. 6262 */ 6263 if (spa->spa_pool_props_object == 0) { 6264 spa->spa_pool_props_object = 6265 zap_create_link(mos, DMU_OT_POOL_PROPS, 6266 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6267 tx); 6268 } 6269 6270 /* normalize the property name */ 6271 propname = zpool_prop_to_name(prop); 6272 proptype = zpool_prop_get_type(prop); 6273 6274 if (nvpair_type(elem) == DATA_TYPE_STRING) { 6275 ASSERT(proptype == PROP_TYPE_STRING); 6276 VERIFY(nvpair_value_string(elem, &strval) == 0); 6277 VERIFY(zap_update(mos, 6278 spa->spa_pool_props_object, propname, 6279 1, strlen(strval) + 1, strval, tx) == 0); 6280 spa_history_log_internal(spa, "set", tx, 6281 "%s=%s", nvpair_name(elem), strval); 6282 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 6283 VERIFY(nvpair_value_uint64(elem, &intval) == 0); 6284 6285 if (proptype == PROP_TYPE_INDEX) { 6286 const char *unused; 6287 VERIFY(zpool_prop_index_to_string( 6288 prop, intval, &unused) == 0); 6289 } 6290 VERIFY(zap_update(mos, 6291 spa->spa_pool_props_object, propname, 6292 8, 1, &intval, tx) == 0); 6293 spa_history_log_internal(spa, "set", tx, 6294 "%s=%lld", nvpair_name(elem), intval); 6295 } else { 6296 ASSERT(0); /* not allowed */ 6297 } 6298 6299 switch (prop) { 6300 case ZPOOL_PROP_DELEGATION: 6301 spa->spa_delegation = intval; 6302 break; 6303 case ZPOOL_PROP_BOOTFS: 6304 spa->spa_bootfs = intval; 6305 break; 6306 case ZPOOL_PROP_FAILUREMODE: 6307 spa->spa_failmode = intval; 6308 break; 6309 case ZPOOL_PROP_AUTOEXPAND: 6310 spa->spa_autoexpand = intval; 6311 if (tx->tx_txg != TXG_INITIAL) 6312 spa_async_request(spa, 6313 SPA_ASYNC_AUTOEXPAND); 6314 break; 6315 case ZPOOL_PROP_DEDUPDITTO: 6316 spa->spa_dedup_ditto = intval; 6317 break; 6318 default: 6319 break; 6320 } 6321 } 6322 6323 } 6324 6325 mutex_exit(&spa->spa_props_lock); 6326} 6327 6328/* 6329 * Perform one-time upgrade on-disk changes. spa_version() does not 6330 * reflect the new version this txg, so there must be no changes this 6331 * txg to anything that the upgrade code depends on after it executes. 6332 * Therefore this must be called after dsl_pool_sync() does the sync 6333 * tasks. 6334 */ 6335static void 6336spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6337{ 6338 dsl_pool_t *dp = spa->spa_dsl_pool; 6339 6340 ASSERT(spa->spa_sync_pass == 1); 6341 6342 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 6343 6344 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6345 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6346 dsl_pool_create_origin(dp, tx); 6347 6348 /* Keeping the origin open increases spa_minref */ 6349 spa->spa_minref += 3; 6350 } 6351 6352 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6353 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6354 dsl_pool_upgrade_clones(dp, tx); 6355 } 6356 6357 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6358 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6359 dsl_pool_upgrade_dir_clones(dp, tx); 6360 6361 /* Keeping the freedir open increases spa_minref */ 6362 spa->spa_minref += 3; 6363 } 6364 6365 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6366 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6367 spa_feature_create_zap_objects(spa, tx); 6368 } 6369 rrw_exit(&dp->dp_config_rwlock, FTAG); 6370} 6371 6372/* 6373 * Sync the specified transaction group. New blocks may be dirtied as 6374 * part of the process, so we iterate until it converges. 6375 */ 6376void 6377spa_sync(spa_t *spa, uint64_t txg) 6378{ 6379 dsl_pool_t *dp = spa->spa_dsl_pool; 6380 objset_t *mos = spa->spa_meta_objset; 6381 bpobj_t *defer_bpo = &spa->spa_deferred_bpobj; 6382 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 6383 vdev_t *rvd = spa->spa_root_vdev; 6384 vdev_t *vd; 6385 dmu_tx_t *tx; 6386 int error; 6387 6388 VERIFY(spa_writeable(spa)); 6389 6390 /* 6391 * Lock out configuration changes. 6392 */ 6393 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6394 6395 spa->spa_syncing_txg = txg; 6396 spa->spa_sync_pass = 0; 6397 6398 /* 6399 * If there are any pending vdev state changes, convert them 6400 * into config changes that go out with this transaction group. 6401 */ 6402 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6403 while (list_head(&spa->spa_state_dirty_list) != NULL) { 6404 /* 6405 * We need the write lock here because, for aux vdevs, 6406 * calling vdev_config_dirty() modifies sav_config. 6407 * This is ugly and will become unnecessary when we 6408 * eliminate the aux vdev wart by integrating all vdevs 6409 * into the root vdev tree. 6410 */ 6411 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6412 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6413 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6414 vdev_state_clean(vd); 6415 vdev_config_dirty(vd); 6416 } 6417 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6418 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6419 } 6420 spa_config_exit(spa, SCL_STATE, FTAG); 6421 6422 tx = dmu_tx_create_assigned(dp, txg); 6423 6424 spa->spa_sync_starttime = gethrtime(); 6425#ifdef illumos 6426 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6427 spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6428#else /* FreeBSD */ 6429#ifdef _KERNEL 6430 callout_reset(&spa->spa_deadman_cycid, 6431 hz * spa->spa_deadman_synctime / NANOSEC, spa_deadman, spa); 6432#endif 6433#endif 6434 6435 /* 6436 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 6437 * set spa_deflate if we have no raid-z vdevs. 6438 */ 6439 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6440 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 6441 int i; 6442 6443 for (i = 0; i < rvd->vdev_children; i++) { 6444 vd = rvd->vdev_child[i]; 6445 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 6446 break; 6447 } 6448 if (i == rvd->vdev_children) { 6449 spa->spa_deflate = TRUE; 6450 VERIFY(0 == zap_add(spa->spa_meta_objset, 6451 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6452 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 6453 } 6454 } 6455 6456 /* 6457 * If anything has changed in this txg, or if someone is waiting 6458 * for this txg to sync (eg, spa_vdev_remove()), push the 6459 * deferred frees from the previous txg. If not, leave them 6460 * alone so that we don't generate work on an otherwise idle 6461 * system. 6462 */ 6463 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 6464 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 6465 !txg_list_empty(&dp->dp_sync_tasks, txg) || 6466 ((dsl_scan_active(dp->dp_scan) || 6467 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) { 6468 zio_t *zio = zio_root(spa, NULL, NULL, 0); 6469 VERIFY3U(bpobj_iterate(defer_bpo, 6470 spa_free_sync_cb, zio, tx), ==, 0); 6471 VERIFY0(zio_wait(zio)); 6472 } 6473 6474 /* 6475 * Iterate to convergence. 6476 */ 6477 do { 6478 int pass = ++spa->spa_sync_pass; 6479 6480 spa_sync_config_object(spa, tx); 6481 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6482 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6483 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6484 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6485 spa_errlog_sync(spa, txg); 6486 dsl_pool_sync(dp, txg); 6487 6488 if (pass < zfs_sync_pass_deferred_free) { 6489 zio_t *zio = zio_root(spa, NULL, NULL, 0); 6490 bplist_iterate(free_bpl, spa_free_sync_cb, 6491 zio, tx); 6492 VERIFY(zio_wait(zio) == 0); 6493 } else { 6494 bplist_iterate(free_bpl, bpobj_enqueue_cb, 6495 defer_bpo, tx); 6496 } 6497 6498 ddt_sync(spa, txg); 6499 dsl_scan_sync(dp, tx); 6500 6501 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6502 vdev_sync(vd, txg); 6503 6504 if (pass == 1) 6505 spa_sync_upgrades(spa, tx); 6506 6507 } while (dmu_objset_is_dirty(mos, txg)); 6508 6509 /* 6510 * Rewrite the vdev configuration (which includes the uberblock) 6511 * to commit the transaction group. 6512 * 6513 * If there are no dirty vdevs, we sync the uberblock to a few 6514 * random top-level vdevs that are known to be visible in the 6515 * config cache (see spa_vdev_add() for a complete description). 6516 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 6517 */ 6518 for (;;) { 6519 /* 6520 * We hold SCL_STATE to prevent vdev open/close/etc. 6521 * while we're attempting to write the vdev labels. 6522 */ 6523 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6524 6525 if (list_is_empty(&spa->spa_config_dirty_list)) { 6526 vdev_t *svd[SPA_DVAS_PER_BP]; 6527 int svdcount = 0; 6528 int children = rvd->vdev_children; 6529 int c0 = spa_get_random(children); 6530 6531 for (int c = 0; c < children; c++) { 6532 vd = rvd->vdev_child[(c0 + c) % children]; 6533 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 6534 continue; 6535 svd[svdcount++] = vd; 6536 if (svdcount == SPA_DVAS_PER_BP) 6537 break; 6538 } 6539 error = vdev_config_sync(svd, svdcount, txg, B_FALSE); 6540 if (error != 0) 6541 error = vdev_config_sync(svd, svdcount, txg, 6542 B_TRUE); 6543 } else { 6544 error = vdev_config_sync(rvd->vdev_child, 6545 rvd->vdev_children, txg, B_FALSE); 6546 if (error != 0) 6547 error = vdev_config_sync(rvd->vdev_child, 6548 rvd->vdev_children, txg, B_TRUE); 6549 } 6550 6551 if (error == 0) 6552 spa->spa_last_synced_guid = rvd->vdev_guid; 6553 6554 spa_config_exit(spa, SCL_STATE, FTAG); 6555 6556 if (error == 0) 6557 break; 6558 zio_suspend(spa, NULL); 6559 zio_resume_wait(spa); 6560 } 6561 dmu_tx_commit(tx); 6562 6563#ifdef illumos 6564 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 6565#else /* FreeBSD */ 6566#ifdef _KERNEL 6567 callout_drain(&spa->spa_deadman_cycid); 6568#endif 6569#endif 6570 6571 /* 6572 * Clear the dirty config list. 6573 */ 6574 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 6575 vdev_config_clean(vd); 6576 6577 /* 6578 * Now that the new config has synced transactionally, 6579 * let it become visible to the config cache. 6580 */ 6581 if (spa->spa_config_syncing != NULL) { 6582 spa_config_set(spa, spa->spa_config_syncing); 6583 spa->spa_config_txg = txg; 6584 spa->spa_config_syncing = NULL; 6585 } 6586 6587 spa->spa_ubsync = spa->spa_uberblock; 6588 6589 dsl_pool_sync_done(dp, txg); 6590 6591 /* 6592 * Update usable space statistics. 6593 */ 6594 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 6595 vdev_sync_done(vd, txg); 6596 6597 spa_update_dspace(spa); 6598 6599 /* 6600 * It had better be the case that we didn't dirty anything 6601 * since vdev_config_sync(). 6602 */ 6603 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 6604 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6605 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 6606 6607 spa->spa_sync_pass = 0; 6608 6609 spa_config_exit(spa, SCL_CONFIG, FTAG); 6610 6611 spa_handle_ignored_writes(spa); 6612 6613 /* 6614 * If any async tasks have been requested, kick them off. 6615 */ 6616 spa_async_dispatch(spa); 6617 spa_async_dispatch_vd(spa); 6618} 6619 6620/* 6621 * Sync all pools. We don't want to hold the namespace lock across these 6622 * operations, so we take a reference on the spa_t and drop the lock during the 6623 * sync. 6624 */ 6625void 6626spa_sync_allpools(void) 6627{ 6628 spa_t *spa = NULL; 6629 mutex_enter(&spa_namespace_lock); 6630 while ((spa = spa_next(spa)) != NULL) { 6631 if (spa_state(spa) != POOL_STATE_ACTIVE || 6632 !spa_writeable(spa) || spa_suspended(spa)) 6633 continue; 6634 spa_open_ref(spa, FTAG); 6635 mutex_exit(&spa_namespace_lock); 6636 txg_wait_synced(spa_get_dsl(spa), 0); 6637 mutex_enter(&spa_namespace_lock); 6638 spa_close(spa, FTAG); 6639 } 6640 mutex_exit(&spa_namespace_lock); 6641} 6642 6643/* 6644 * ========================================================================== 6645 * Miscellaneous routines 6646 * ========================================================================== 6647 */ 6648 6649/* 6650 * Remove all pools in the system. 6651 */ 6652void 6653spa_evict_all(void) 6654{ 6655 spa_t *spa; 6656 6657 /* 6658 * Remove all cached state. All pools should be closed now, 6659 * so every spa in the AVL tree should be unreferenced. 6660 */ 6661 mutex_enter(&spa_namespace_lock); 6662 while ((spa = spa_next(NULL)) != NULL) { 6663 /* 6664 * Stop async tasks. The async thread may need to detach 6665 * a device that's been replaced, which requires grabbing 6666 * spa_namespace_lock, so we must drop it here. 6667 */ 6668 spa_open_ref(spa, FTAG); 6669 mutex_exit(&spa_namespace_lock); 6670 spa_async_suspend(spa); 6671 mutex_enter(&spa_namespace_lock); 6672 spa_close(spa, FTAG); 6673 6674 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6675 spa_unload(spa); 6676 spa_deactivate(spa); 6677 } 6678 spa_remove(spa); 6679 } 6680 mutex_exit(&spa_namespace_lock); 6681} 6682 6683vdev_t * 6684spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 6685{ 6686 vdev_t *vd; 6687 int i; 6688 6689 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 6690 return (vd); 6691 6692 if (aux) { 6693 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 6694 vd = spa->spa_l2cache.sav_vdevs[i]; 6695 if (vd->vdev_guid == guid) 6696 return (vd); 6697 } 6698 6699 for (i = 0; i < spa->spa_spares.sav_count; i++) { 6700 vd = spa->spa_spares.sav_vdevs[i]; 6701 if (vd->vdev_guid == guid) 6702 return (vd); 6703 } 6704 } 6705 6706 return (NULL); 6707} 6708 6709void 6710spa_upgrade(spa_t *spa, uint64_t version) 6711{ 6712 ASSERT(spa_writeable(spa)); 6713 6714 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6715 6716 /* 6717 * This should only be called for a non-faulted pool, and since a 6718 * future version would result in an unopenable pool, this shouldn't be 6719 * possible. 6720 */ 6721 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 6722 ASSERT(version >= spa->spa_uberblock.ub_version); 6723 6724 spa->spa_uberblock.ub_version = version; 6725 vdev_config_dirty(spa->spa_root_vdev); 6726 6727 spa_config_exit(spa, SCL_ALL, FTAG); 6728 6729 txg_wait_synced(spa_get_dsl(spa), 0); 6730} 6731 6732boolean_t 6733spa_has_spare(spa_t *spa, uint64_t guid) 6734{ 6735 int i; 6736 uint64_t spareguid; 6737 spa_aux_vdev_t *sav = &spa->spa_spares; 6738 6739 for (i = 0; i < sav->sav_count; i++) 6740 if (sav->sav_vdevs[i]->vdev_guid == guid) 6741 return (B_TRUE); 6742 6743 for (i = 0; i < sav->sav_npending; i++) { 6744 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 6745 &spareguid) == 0 && spareguid == guid) 6746 return (B_TRUE); 6747 } 6748 6749 return (B_FALSE); 6750} 6751 6752/* 6753 * Check if a pool has an active shared spare device. 6754 * Note: reference count of an active spare is 2, as a spare and as a replace 6755 */ 6756static boolean_t 6757spa_has_active_shared_spare(spa_t *spa) 6758{ 6759 int i, refcnt; 6760 uint64_t pool; 6761 spa_aux_vdev_t *sav = &spa->spa_spares; 6762 6763 for (i = 0; i < sav->sav_count; i++) { 6764 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 6765 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 6766 refcnt > 2) 6767 return (B_TRUE); 6768 } 6769 6770 return (B_FALSE); 6771} 6772 6773/* 6774 * Post a sysevent corresponding to the given event. The 'name' must be one of 6775 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 6776 * filled in from the spa and (optionally) the vdev. This doesn't do anything 6777 * in the userland libzpool, as we don't want consumers to misinterpret ztest 6778 * or zdb as real changes. 6779 */ 6780void 6781spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 6782{ 6783#ifdef _KERNEL 6784 sysevent_t *ev; 6785 sysevent_attr_list_t *attr = NULL; 6786 sysevent_value_t value; 6787 sysevent_id_t eid; 6788 6789 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 6790 SE_SLEEP); 6791 6792 value.value_type = SE_DATA_TYPE_STRING; 6793 value.value.sv_string = spa_name(spa); 6794 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 6795 goto done; 6796 6797 value.value_type = SE_DATA_TYPE_UINT64; 6798 value.value.sv_uint64 = spa_guid(spa); 6799 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 6800 goto done; 6801 6802 if (vd) { 6803 value.value_type = SE_DATA_TYPE_UINT64; 6804 value.value.sv_uint64 = vd->vdev_guid; 6805 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 6806 SE_SLEEP) != 0) 6807 goto done; 6808 6809 if (vd->vdev_path) { 6810 value.value_type = SE_DATA_TYPE_STRING; 6811 value.value.sv_string = vd->vdev_path; 6812 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 6813 &value, SE_SLEEP) != 0) 6814 goto done; 6815 } 6816 } 6817 6818 if (sysevent_attach_attributes(ev, attr) != 0) 6819 goto done; 6820 attr = NULL; 6821 6822 (void) log_sysevent(ev, SE_SLEEP, &eid); 6823 6824done: 6825 if (attr) 6826 sysevent_free_attr(attr); 6827 sysevent_free(ev); 6828#endif 6829} 6830