spa.c revision 269219
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2013 by Delphix. All rights reserved. 25 * Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 */ 28 29/* 30 * SPA: Storage Pool Allocator 31 * 32 * This file contains all the routines used when modifying on-disk SPA state. 33 * This includes opening, importing, destroying, exporting a pool, and syncing a 34 * pool. 35 */ 36 37#include <sys/zfs_context.h> 38#include <sys/fm/fs/zfs.h> 39#include <sys/spa_impl.h> 40#include <sys/zio.h> 41#include <sys/zio_checksum.h> 42#include <sys/dmu.h> 43#include <sys/dmu_tx.h> 44#include <sys/zap.h> 45#include <sys/zil.h> 46#include <sys/ddt.h> 47#include <sys/vdev_impl.h> 48#include <sys/metaslab.h> 49#include <sys/metaslab_impl.h> 50#include <sys/uberblock_impl.h> 51#include <sys/txg.h> 52#include <sys/avl.h> 53#include <sys/dmu_traverse.h> 54#include <sys/dmu_objset.h> 55#include <sys/unique.h> 56#include <sys/dsl_pool.h> 57#include <sys/dsl_dataset.h> 58#include <sys/dsl_dir.h> 59#include <sys/dsl_prop.h> 60#include <sys/dsl_synctask.h> 61#include <sys/fs/zfs.h> 62#include <sys/arc.h> 63#include <sys/callb.h> 64#include <sys/spa_boot.h> 65#include <sys/zfs_ioctl.h> 66#include <sys/dsl_scan.h> 67#include <sys/dmu_send.h> 68#include <sys/dsl_destroy.h> 69#include <sys/dsl_userhold.h> 70#include <sys/zfeature.h> 71#include <sys/zvol.h> 72#include <sys/trim_map.h> 73 74#ifdef _KERNEL 75#include <sys/callb.h> 76#include <sys/cpupart.h> 77#include <sys/zone.h> 78#endif /* _KERNEL */ 79 80#include "zfs_prop.h" 81#include "zfs_comutil.h" 82 83/* Check hostid on import? */ 84static int check_hostid = 1; 85 86SYSCTL_DECL(_vfs_zfs); 87TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid); 88SYSCTL_INT(_vfs_zfs, OID_AUTO, check_hostid, CTLFLAG_RW, &check_hostid, 0, 89 "Check hostid on import?"); 90 91/* 92 * The interval, in seconds, at which failed configuration cache file writes 93 * should be retried. 94 */ 95static int zfs_ccw_retry_interval = 300; 96 97typedef enum zti_modes { 98 ZTI_MODE_FIXED, /* value is # of threads (min 1) */ 99 ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */ 100 ZTI_MODE_NULL, /* don't create a taskq */ 101 ZTI_NMODES 102} zti_modes_t; 103 104#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) } 105#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 } 106#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 } 107 108#define ZTI_N(n) ZTI_P(n, 1) 109#define ZTI_ONE ZTI_N(1) 110 111typedef struct zio_taskq_info { 112 zti_modes_t zti_mode; 113 uint_t zti_value; 114 uint_t zti_count; 115} zio_taskq_info_t; 116 117static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = { 118 "issue", "issue_high", "intr", "intr_high" 119}; 120 121/* 122 * This table defines the taskq settings for each ZFS I/O type. When 123 * initializing a pool, we use this table to create an appropriately sized 124 * taskq. Some operations are low volume and therefore have a small, static 125 * number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE 126 * macros. Other operations process a large amount of data; the ZTI_BATCH 127 * macro causes us to create a taskq oriented for throughput. Some operations 128 * are so high frequency and short-lived that the taskq itself can become a a 129 * point of lock contention. The ZTI_P(#, #) macro indicates that we need an 130 * additional degree of parallelism specified by the number of threads per- 131 * taskq and the number of taskqs; when dispatching an event in this case, the 132 * particular taskq is chosen at random. 133 * 134 * The different taskq priorities are to handle the different contexts (issue 135 * and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that 136 * need to be handled with minimum delay. 137 */ 138const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = { 139 /* ISSUE ISSUE_HIGH INTR INTR_HIGH */ 140 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */ 141 { ZTI_N(8), ZTI_NULL, ZTI_BATCH, ZTI_NULL }, /* READ */ 142 { ZTI_BATCH, ZTI_N(5), ZTI_N(8), ZTI_N(5) }, /* WRITE */ 143 { ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */ 144 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */ 145 { ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */ 146}; 147 148static void spa_sync_version(void *arg, dmu_tx_t *tx); 149static void spa_sync_props(void *arg, dmu_tx_t *tx); 150static boolean_t spa_has_active_shared_spare(spa_t *spa); 151static int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config, 152 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 153 char **ereport); 154static void spa_vdev_resilver_done(spa_t *spa); 155 156uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */ 157#ifdef PSRSET_BIND 158id_t zio_taskq_psrset_bind = PS_NONE; 159#endif 160#ifdef SYSDC 161boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ 162#endif 163uint_t zio_taskq_basedc = 80; /* base duty cycle */ 164 165boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ 166extern int zfs_sync_pass_deferred_free; 167 168#ifndef illumos 169extern void spa_deadman(void *arg); 170#endif 171 172/* 173 * This (illegal) pool name is used when temporarily importing a spa_t in order 174 * to get the vdev stats associated with the imported devices. 175 */ 176#define TRYIMPORT_NAME "$import" 177 178/* 179 * ========================================================================== 180 * SPA properties routines 181 * ========================================================================== 182 */ 183 184/* 185 * Add a (source=src, propname=propval) list to an nvlist. 186 */ 187static void 188spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval, 189 uint64_t intval, zprop_source_t src) 190{ 191 const char *propname = zpool_prop_to_name(prop); 192 nvlist_t *propval; 193 194 VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0); 195 VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0); 196 197 if (strval != NULL) 198 VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0); 199 else 200 VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0); 201 202 VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0); 203 nvlist_free(propval); 204} 205 206/* 207 * Get property values from the spa configuration. 208 */ 209static void 210spa_prop_get_config(spa_t *spa, nvlist_t **nvp) 211{ 212 vdev_t *rvd = spa->spa_root_vdev; 213 dsl_pool_t *pool = spa->spa_dsl_pool; 214 uint64_t size; 215 uint64_t alloc; 216 uint64_t space; 217 uint64_t cap, version; 218 zprop_source_t src = ZPROP_SRC_NONE; 219 spa_config_dirent_t *dp; 220 221 ASSERT(MUTEX_HELD(&spa->spa_props_lock)); 222 223 if (rvd != NULL) { 224 alloc = metaslab_class_get_alloc(spa_normal_class(spa)); 225 size = metaslab_class_get_space(spa_normal_class(spa)); 226 spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src); 227 spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src); 228 spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src); 229 spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL, 230 size - alloc, src); 231 232 space = 0; 233 for (int c = 0; c < rvd->vdev_children; c++) { 234 vdev_t *tvd = rvd->vdev_child[c]; 235 space += tvd->vdev_max_asize - tvd->vdev_asize; 236 } 237 spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL, space, 238 src); 239 240 spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL, 241 (spa_mode(spa) == FREAD), src); 242 243 cap = (size == 0) ? 0 : (alloc * 100 / size); 244 spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src); 245 246 spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL, 247 ddt_get_pool_dedup_ratio(spa), src); 248 249 spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL, 250 rvd->vdev_state, src); 251 252 version = spa_version(spa); 253 if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) 254 src = ZPROP_SRC_DEFAULT; 255 else 256 src = ZPROP_SRC_LOCAL; 257 spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL, version, src); 258 } 259 260 if (pool != NULL) { 261 /* 262 * The $FREE directory was introduced in SPA_VERSION_DEADLISTS, 263 * when opening pools before this version freedir will be NULL. 264 */ 265 if (pool->dp_free_dir != NULL) { 266 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL, 267 pool->dp_free_dir->dd_phys->dd_used_bytes, src); 268 } else { 269 spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, 270 NULL, 0, src); 271 } 272 273 if (pool->dp_leak_dir != NULL) { 274 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL, 275 pool->dp_leak_dir->dd_phys->dd_used_bytes, src); 276 } else { 277 spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, 278 NULL, 0, src); 279 } 280 } 281 282 spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src); 283 284 if (spa->spa_comment != NULL) { 285 spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment, 286 0, ZPROP_SRC_LOCAL); 287 } 288 289 if (spa->spa_root != NULL) 290 spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root, 291 0, ZPROP_SRC_LOCAL); 292 293 if ((dp = list_head(&spa->spa_config_list)) != NULL) { 294 if (dp->scd_path == NULL) { 295 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 296 "none", 0, ZPROP_SRC_LOCAL); 297 } else if (strcmp(dp->scd_path, spa_config_path) != 0) { 298 spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE, 299 dp->scd_path, 0, ZPROP_SRC_LOCAL); 300 } 301 } 302} 303 304/* 305 * Get zpool property values. 306 */ 307int 308spa_prop_get(spa_t *spa, nvlist_t **nvp) 309{ 310 objset_t *mos = spa->spa_meta_objset; 311 zap_cursor_t zc; 312 zap_attribute_t za; 313 int err; 314 315 VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0); 316 317 mutex_enter(&spa->spa_props_lock); 318 319 /* 320 * Get properties from the spa config. 321 */ 322 spa_prop_get_config(spa, nvp); 323 324 /* If no pool property object, no more prop to get. */ 325 if (mos == NULL || spa->spa_pool_props_object == 0) { 326 mutex_exit(&spa->spa_props_lock); 327 return (0); 328 } 329 330 /* 331 * Get properties from the MOS pool property object. 332 */ 333 for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object); 334 (err = zap_cursor_retrieve(&zc, &za)) == 0; 335 zap_cursor_advance(&zc)) { 336 uint64_t intval = 0; 337 char *strval = NULL; 338 zprop_source_t src = ZPROP_SRC_DEFAULT; 339 zpool_prop_t prop; 340 341 if ((prop = zpool_name_to_prop(za.za_name)) == ZPROP_INVAL) 342 continue; 343 344 switch (za.za_integer_length) { 345 case 8: 346 /* integer property */ 347 if (za.za_first_integer != 348 zpool_prop_default_numeric(prop)) 349 src = ZPROP_SRC_LOCAL; 350 351 if (prop == ZPOOL_PROP_BOOTFS) { 352 dsl_pool_t *dp; 353 dsl_dataset_t *ds = NULL; 354 355 dp = spa_get_dsl(spa); 356 dsl_pool_config_enter(dp, FTAG); 357 if (err = dsl_dataset_hold_obj(dp, 358 za.za_first_integer, FTAG, &ds)) { 359 dsl_pool_config_exit(dp, FTAG); 360 break; 361 } 362 363 strval = kmem_alloc( 364 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1, 365 KM_SLEEP); 366 dsl_dataset_name(ds, strval); 367 dsl_dataset_rele(ds, FTAG); 368 dsl_pool_config_exit(dp, FTAG); 369 } else { 370 strval = NULL; 371 intval = za.za_first_integer; 372 } 373 374 spa_prop_add_list(*nvp, prop, strval, intval, src); 375 376 if (strval != NULL) 377 kmem_free(strval, 378 MAXNAMELEN + strlen(MOS_DIR_NAME) + 1); 379 380 break; 381 382 case 1: 383 /* string property */ 384 strval = kmem_alloc(za.za_num_integers, KM_SLEEP); 385 err = zap_lookup(mos, spa->spa_pool_props_object, 386 za.za_name, 1, za.za_num_integers, strval); 387 if (err) { 388 kmem_free(strval, za.za_num_integers); 389 break; 390 } 391 spa_prop_add_list(*nvp, prop, strval, 0, src); 392 kmem_free(strval, za.za_num_integers); 393 break; 394 395 default: 396 break; 397 } 398 } 399 zap_cursor_fini(&zc); 400 mutex_exit(&spa->spa_props_lock); 401out: 402 if (err && err != ENOENT) { 403 nvlist_free(*nvp); 404 *nvp = NULL; 405 return (err); 406 } 407 408 return (0); 409} 410 411/* 412 * Validate the given pool properties nvlist and modify the list 413 * for the property values to be set. 414 */ 415static int 416spa_prop_validate(spa_t *spa, nvlist_t *props) 417{ 418 nvpair_t *elem; 419 int error = 0, reset_bootfs = 0; 420 uint64_t objnum = 0; 421 boolean_t has_feature = B_FALSE; 422 423 elem = NULL; 424 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) { 425 uint64_t intval; 426 char *strval, *slash, *check, *fname; 427 const char *propname = nvpair_name(elem); 428 zpool_prop_t prop = zpool_name_to_prop(propname); 429 430 switch (prop) { 431 case ZPROP_INVAL: 432 if (!zpool_prop_feature(propname)) { 433 error = SET_ERROR(EINVAL); 434 break; 435 } 436 437 /* 438 * Sanitize the input. 439 */ 440 if (nvpair_type(elem) != DATA_TYPE_UINT64) { 441 error = SET_ERROR(EINVAL); 442 break; 443 } 444 445 if (nvpair_value_uint64(elem, &intval) != 0) { 446 error = SET_ERROR(EINVAL); 447 break; 448 } 449 450 if (intval != 0) { 451 error = SET_ERROR(EINVAL); 452 break; 453 } 454 455 fname = strchr(propname, '@') + 1; 456 if (zfeature_lookup_name(fname, NULL) != 0) { 457 error = SET_ERROR(EINVAL); 458 break; 459 } 460 461 has_feature = B_TRUE; 462 break; 463 464 case ZPOOL_PROP_VERSION: 465 error = nvpair_value_uint64(elem, &intval); 466 if (!error && 467 (intval < spa_version(spa) || 468 intval > SPA_VERSION_BEFORE_FEATURES || 469 has_feature)) 470 error = SET_ERROR(EINVAL); 471 break; 472 473 case ZPOOL_PROP_DELEGATION: 474 case ZPOOL_PROP_AUTOREPLACE: 475 case ZPOOL_PROP_LISTSNAPS: 476 case ZPOOL_PROP_AUTOEXPAND: 477 error = nvpair_value_uint64(elem, &intval); 478 if (!error && intval > 1) 479 error = SET_ERROR(EINVAL); 480 break; 481 482 case ZPOOL_PROP_BOOTFS: 483 /* 484 * If the pool version is less than SPA_VERSION_BOOTFS, 485 * or the pool is still being created (version == 0), 486 * the bootfs property cannot be set. 487 */ 488 if (spa_version(spa) < SPA_VERSION_BOOTFS) { 489 error = SET_ERROR(ENOTSUP); 490 break; 491 } 492 493 /* 494 * Make sure the vdev config is bootable 495 */ 496 if (!vdev_is_bootable(spa->spa_root_vdev)) { 497 error = SET_ERROR(ENOTSUP); 498 break; 499 } 500 501 reset_bootfs = 1; 502 503 error = nvpair_value_string(elem, &strval); 504 505 if (!error) { 506 objset_t *os; 507 uint64_t compress; 508 509 if (strval == NULL || strval[0] == '\0') { 510 objnum = zpool_prop_default_numeric( 511 ZPOOL_PROP_BOOTFS); 512 break; 513 } 514 515 if (error = dmu_objset_hold(strval, FTAG, &os)) 516 break; 517 518 /* Must be ZPL and not gzip compressed. */ 519 520 if (dmu_objset_type(os) != DMU_OST_ZFS) { 521 error = SET_ERROR(ENOTSUP); 522 } else if ((error = 523 dsl_prop_get_int_ds(dmu_objset_ds(os), 524 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 525 &compress)) == 0 && 526 !BOOTFS_COMPRESS_VALID(compress)) { 527 error = SET_ERROR(ENOTSUP); 528 } else { 529 objnum = dmu_objset_id(os); 530 } 531 dmu_objset_rele(os, FTAG); 532 } 533 break; 534 535 case ZPOOL_PROP_FAILUREMODE: 536 error = nvpair_value_uint64(elem, &intval); 537 if (!error && (intval < ZIO_FAILURE_MODE_WAIT || 538 intval > ZIO_FAILURE_MODE_PANIC)) 539 error = SET_ERROR(EINVAL); 540 541 /* 542 * This is a special case which only occurs when 543 * the pool has completely failed. This allows 544 * the user to change the in-core failmode property 545 * without syncing it out to disk (I/Os might 546 * currently be blocked). We do this by returning 547 * EIO to the caller (spa_prop_set) to trick it 548 * into thinking we encountered a property validation 549 * error. 550 */ 551 if (!error && spa_suspended(spa)) { 552 spa->spa_failmode = intval; 553 error = SET_ERROR(EIO); 554 } 555 break; 556 557 case ZPOOL_PROP_CACHEFILE: 558 if ((error = nvpair_value_string(elem, &strval)) != 0) 559 break; 560 561 if (strval[0] == '\0') 562 break; 563 564 if (strcmp(strval, "none") == 0) 565 break; 566 567 if (strval[0] != '/') { 568 error = SET_ERROR(EINVAL); 569 break; 570 } 571 572 slash = strrchr(strval, '/'); 573 ASSERT(slash != NULL); 574 575 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 || 576 strcmp(slash, "/..") == 0) 577 error = SET_ERROR(EINVAL); 578 break; 579 580 case ZPOOL_PROP_COMMENT: 581 if ((error = nvpair_value_string(elem, &strval)) != 0) 582 break; 583 for (check = strval; *check != '\0'; check++) { 584 /* 585 * The kernel doesn't have an easy isprint() 586 * check. For this kernel check, we merely 587 * check ASCII apart from DEL. Fix this if 588 * there is an easy-to-use kernel isprint(). 589 */ 590 if (*check >= 0x7f) { 591 error = SET_ERROR(EINVAL); 592 break; 593 } 594 check++; 595 } 596 if (strlen(strval) > ZPROP_MAX_COMMENT) 597 error = E2BIG; 598 break; 599 600 case ZPOOL_PROP_DEDUPDITTO: 601 if (spa_version(spa) < SPA_VERSION_DEDUP) 602 error = SET_ERROR(ENOTSUP); 603 else 604 error = nvpair_value_uint64(elem, &intval); 605 if (error == 0 && 606 intval != 0 && intval < ZIO_DEDUPDITTO_MIN) 607 error = SET_ERROR(EINVAL); 608 break; 609 } 610 611 if (error) 612 break; 613 } 614 615 if (!error && reset_bootfs) { 616 error = nvlist_remove(props, 617 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING); 618 619 if (!error) { 620 error = nvlist_add_uint64(props, 621 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum); 622 } 623 } 624 625 return (error); 626} 627 628void 629spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync) 630{ 631 char *cachefile; 632 spa_config_dirent_t *dp; 633 634 if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), 635 &cachefile) != 0) 636 return; 637 638 dp = kmem_alloc(sizeof (spa_config_dirent_t), 639 KM_SLEEP); 640 641 if (cachefile[0] == '\0') 642 dp->scd_path = spa_strdup(spa_config_path); 643 else if (strcmp(cachefile, "none") == 0) 644 dp->scd_path = NULL; 645 else 646 dp->scd_path = spa_strdup(cachefile); 647 648 list_insert_head(&spa->spa_config_list, dp); 649 if (need_sync) 650 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 651} 652 653int 654spa_prop_set(spa_t *spa, nvlist_t *nvp) 655{ 656 int error; 657 nvpair_t *elem = NULL; 658 boolean_t need_sync = B_FALSE; 659 660 if ((error = spa_prop_validate(spa, nvp)) != 0) 661 return (error); 662 663 while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) { 664 zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem)); 665 666 if (prop == ZPOOL_PROP_CACHEFILE || 667 prop == ZPOOL_PROP_ALTROOT || 668 prop == ZPOOL_PROP_READONLY) 669 continue; 670 671 if (prop == ZPOOL_PROP_VERSION || prop == ZPROP_INVAL) { 672 uint64_t ver; 673 674 if (prop == ZPOOL_PROP_VERSION) { 675 VERIFY(nvpair_value_uint64(elem, &ver) == 0); 676 } else { 677 ASSERT(zpool_prop_feature(nvpair_name(elem))); 678 ver = SPA_VERSION_FEATURES; 679 need_sync = B_TRUE; 680 } 681 682 /* Save time if the version is already set. */ 683 if (ver == spa_version(spa)) 684 continue; 685 686 /* 687 * In addition to the pool directory object, we might 688 * create the pool properties object, the features for 689 * read object, the features for write object, or the 690 * feature descriptions object. 691 */ 692 error = dsl_sync_task(spa->spa_name, NULL, 693 spa_sync_version, &ver, 694 6, ZFS_SPACE_CHECK_RESERVED); 695 if (error) 696 return (error); 697 continue; 698 } 699 700 need_sync = B_TRUE; 701 break; 702 } 703 704 if (need_sync) { 705 return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props, 706 nvp, 6, ZFS_SPACE_CHECK_RESERVED)); 707 } 708 709 return (0); 710} 711 712/* 713 * If the bootfs property value is dsobj, clear it. 714 */ 715void 716spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx) 717{ 718 if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) { 719 VERIFY(zap_remove(spa->spa_meta_objset, 720 spa->spa_pool_props_object, 721 zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0); 722 spa->spa_bootfs = 0; 723 } 724} 725 726/*ARGSUSED*/ 727static int 728spa_change_guid_check(void *arg, dmu_tx_t *tx) 729{ 730 uint64_t *newguid = arg; 731 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 732 vdev_t *rvd = spa->spa_root_vdev; 733 uint64_t vdev_state; 734 735 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 736 vdev_state = rvd->vdev_state; 737 spa_config_exit(spa, SCL_STATE, FTAG); 738 739 if (vdev_state != VDEV_STATE_HEALTHY) 740 return (SET_ERROR(ENXIO)); 741 742 ASSERT3U(spa_guid(spa), !=, *newguid); 743 744 return (0); 745} 746 747static void 748spa_change_guid_sync(void *arg, dmu_tx_t *tx) 749{ 750 uint64_t *newguid = arg; 751 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 752 uint64_t oldguid; 753 vdev_t *rvd = spa->spa_root_vdev; 754 755 oldguid = spa_guid(spa); 756 757 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 758 rvd->vdev_guid = *newguid; 759 rvd->vdev_guid_sum += (*newguid - oldguid); 760 vdev_config_dirty(rvd); 761 spa_config_exit(spa, SCL_STATE, FTAG); 762 763 spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu", 764 oldguid, *newguid); 765} 766 767/* 768 * Change the GUID for the pool. This is done so that we can later 769 * re-import a pool built from a clone of our own vdevs. We will modify 770 * the root vdev's guid, our own pool guid, and then mark all of our 771 * vdevs dirty. Note that we must make sure that all our vdevs are 772 * online when we do this, or else any vdevs that weren't present 773 * would be orphaned from our pool. We are also going to issue a 774 * sysevent to update any watchers. 775 */ 776int 777spa_change_guid(spa_t *spa) 778{ 779 int error; 780 uint64_t guid; 781 782 mutex_enter(&spa->spa_vdev_top_lock); 783 mutex_enter(&spa_namespace_lock); 784 guid = spa_generate_guid(NULL); 785 786 error = dsl_sync_task(spa->spa_name, spa_change_guid_check, 787 spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED); 788 789 if (error == 0) { 790 spa_config_sync(spa, B_FALSE, B_TRUE); 791 spa_event_notify(spa, NULL, ESC_ZFS_POOL_REGUID); 792 } 793 794 mutex_exit(&spa_namespace_lock); 795 mutex_exit(&spa->spa_vdev_top_lock); 796 797 return (error); 798} 799 800/* 801 * ========================================================================== 802 * SPA state manipulation (open/create/destroy/import/export) 803 * ========================================================================== 804 */ 805 806static int 807spa_error_entry_compare(const void *a, const void *b) 808{ 809 spa_error_entry_t *sa = (spa_error_entry_t *)a; 810 spa_error_entry_t *sb = (spa_error_entry_t *)b; 811 int ret; 812 813 ret = bcmp(&sa->se_bookmark, &sb->se_bookmark, 814 sizeof (zbookmark_phys_t)); 815 816 if (ret < 0) 817 return (-1); 818 else if (ret > 0) 819 return (1); 820 else 821 return (0); 822} 823 824/* 825 * Utility function which retrieves copies of the current logs and 826 * re-initializes them in the process. 827 */ 828void 829spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub) 830{ 831 ASSERT(MUTEX_HELD(&spa->spa_errlist_lock)); 832 833 bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t)); 834 bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t)); 835 836 avl_create(&spa->spa_errlist_scrub, 837 spa_error_entry_compare, sizeof (spa_error_entry_t), 838 offsetof(spa_error_entry_t, se_avl)); 839 avl_create(&spa->spa_errlist_last, 840 spa_error_entry_compare, sizeof (spa_error_entry_t), 841 offsetof(spa_error_entry_t, se_avl)); 842} 843 844static void 845spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 846{ 847 const zio_taskq_info_t *ztip = &zio_taskqs[t][q]; 848 enum zti_modes mode = ztip->zti_mode; 849 uint_t value = ztip->zti_value; 850 uint_t count = ztip->zti_count; 851 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 852 char name[32]; 853 uint_t flags = 0; 854 boolean_t batch = B_FALSE; 855 856 if (mode == ZTI_MODE_NULL) { 857 tqs->stqs_count = 0; 858 tqs->stqs_taskq = NULL; 859 return; 860 } 861 862 ASSERT3U(count, >, 0); 863 864 tqs->stqs_count = count; 865 tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP); 866 867 switch (mode) { 868 case ZTI_MODE_FIXED: 869 ASSERT3U(value, >=, 1); 870 value = MAX(value, 1); 871 break; 872 873 case ZTI_MODE_BATCH: 874 batch = B_TRUE; 875 flags |= TASKQ_THREADS_CPU_PCT; 876 value = zio_taskq_batch_pct; 877 break; 878 879 default: 880 panic("unrecognized mode for %s_%s taskq (%u:%u) in " 881 "spa_activate()", 882 zio_type_name[t], zio_taskq_types[q], mode, value); 883 break; 884 } 885 886 for (uint_t i = 0; i < count; i++) { 887 taskq_t *tq; 888 889 if (count > 1) { 890 (void) snprintf(name, sizeof (name), "%s_%s_%u", 891 zio_type_name[t], zio_taskq_types[q], i); 892 } else { 893 (void) snprintf(name, sizeof (name), "%s_%s", 894 zio_type_name[t], zio_taskq_types[q]); 895 } 896 897#ifdef SYSDC 898 if (zio_taskq_sysdc && spa->spa_proc != &p0) { 899 if (batch) 900 flags |= TASKQ_DC_BATCH; 901 902 tq = taskq_create_sysdc(name, value, 50, INT_MAX, 903 spa->spa_proc, zio_taskq_basedc, flags); 904 } else { 905#endif 906 pri_t pri = maxclsyspri; 907 /* 908 * The write issue taskq can be extremely CPU 909 * intensive. Run it at slightly lower priority 910 * than the other taskqs. 911 */ 912 if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) 913 pri--; 914 915 tq = taskq_create_proc(name, value, pri, 50, 916 INT_MAX, spa->spa_proc, flags); 917#ifdef SYSDC 918 } 919#endif 920 921 tqs->stqs_taskq[i] = tq; 922 } 923} 924 925static void 926spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q) 927{ 928 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 929 930 if (tqs->stqs_taskq == NULL) { 931 ASSERT0(tqs->stqs_count); 932 return; 933 } 934 935 for (uint_t i = 0; i < tqs->stqs_count; i++) { 936 ASSERT3P(tqs->stqs_taskq[i], !=, NULL); 937 taskq_destroy(tqs->stqs_taskq[i]); 938 } 939 940 kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *)); 941 tqs->stqs_taskq = NULL; 942} 943 944/* 945 * Dispatch a task to the appropriate taskq for the ZFS I/O type and priority. 946 * Note that a type may have multiple discrete taskqs to avoid lock contention 947 * on the taskq itself. In that case we choose which taskq at random by using 948 * the low bits of gethrtime(). 949 */ 950void 951spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q, 952 task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent) 953{ 954 spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q]; 955 taskq_t *tq; 956 957 ASSERT3P(tqs->stqs_taskq, !=, NULL); 958 ASSERT3U(tqs->stqs_count, !=, 0); 959 960 if (tqs->stqs_count == 1) { 961 tq = tqs->stqs_taskq[0]; 962 } else { 963#ifdef _KERNEL 964 tq = tqs->stqs_taskq[cpu_ticks() % tqs->stqs_count]; 965#else 966 tq = tqs->stqs_taskq[gethrtime() % tqs->stqs_count]; 967#endif 968 } 969 970 taskq_dispatch_ent(tq, func, arg, flags, ent); 971} 972 973static void 974spa_create_zio_taskqs(spa_t *spa) 975{ 976 for (int t = 0; t < ZIO_TYPES; t++) { 977 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 978 spa_taskqs_init(spa, t, q); 979 } 980 } 981} 982 983#ifdef _KERNEL 984#ifdef SPA_PROCESS 985static void 986spa_thread(void *arg) 987{ 988 callb_cpr_t cprinfo; 989 990 spa_t *spa = arg; 991 user_t *pu = PTOU(curproc); 992 993 CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr, 994 spa->spa_name); 995 996 ASSERT(curproc != &p0); 997 (void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), 998 "zpool-%s", spa->spa_name); 999 (void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm)); 1000 1001#ifdef PSRSET_BIND 1002 /* bind this thread to the requested psrset */ 1003 if (zio_taskq_psrset_bind != PS_NONE) { 1004 pool_lock(); 1005 mutex_enter(&cpu_lock); 1006 mutex_enter(&pidlock); 1007 mutex_enter(&curproc->p_lock); 1008 1009 if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind, 1010 0, NULL, NULL) == 0) { 1011 curthread->t_bind_pset = zio_taskq_psrset_bind; 1012 } else { 1013 cmn_err(CE_WARN, 1014 "Couldn't bind process for zfs pool \"%s\" to " 1015 "pset %d\n", spa->spa_name, zio_taskq_psrset_bind); 1016 } 1017 1018 mutex_exit(&curproc->p_lock); 1019 mutex_exit(&pidlock); 1020 mutex_exit(&cpu_lock); 1021 pool_unlock(); 1022 } 1023#endif 1024 1025#ifdef SYSDC 1026 if (zio_taskq_sysdc) { 1027 sysdc_thread_enter(curthread, 100, 0); 1028 } 1029#endif 1030 1031 spa->spa_proc = curproc; 1032 spa->spa_did = curthread->t_did; 1033 1034 spa_create_zio_taskqs(spa); 1035 1036 mutex_enter(&spa->spa_proc_lock); 1037 ASSERT(spa->spa_proc_state == SPA_PROC_CREATED); 1038 1039 spa->spa_proc_state = SPA_PROC_ACTIVE; 1040 cv_broadcast(&spa->spa_proc_cv); 1041 1042 CALLB_CPR_SAFE_BEGIN(&cprinfo); 1043 while (spa->spa_proc_state == SPA_PROC_ACTIVE) 1044 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1045 CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock); 1046 1047 ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE); 1048 spa->spa_proc_state = SPA_PROC_GONE; 1049 spa->spa_proc = &p0; 1050 cv_broadcast(&spa->spa_proc_cv); 1051 CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */ 1052 1053 mutex_enter(&curproc->p_lock); 1054 lwp_exit(); 1055} 1056#endif /* SPA_PROCESS */ 1057#endif 1058 1059/* 1060 * Activate an uninitialized pool. 1061 */ 1062static void 1063spa_activate(spa_t *spa, int mode) 1064{ 1065 ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); 1066 1067 spa->spa_state = POOL_STATE_ACTIVE; 1068 spa->spa_mode = mode; 1069 1070 spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops); 1071 spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops); 1072 1073 /* Try to create a covering process */ 1074 mutex_enter(&spa->spa_proc_lock); 1075 ASSERT(spa->spa_proc_state == SPA_PROC_NONE); 1076 ASSERT(spa->spa_proc == &p0); 1077 spa->spa_did = 0; 1078 1079#ifdef SPA_PROCESS 1080 /* Only create a process if we're going to be around a while. */ 1081 if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) { 1082 if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri, 1083 NULL, 0) == 0) { 1084 spa->spa_proc_state = SPA_PROC_CREATED; 1085 while (spa->spa_proc_state == SPA_PROC_CREATED) { 1086 cv_wait(&spa->spa_proc_cv, 1087 &spa->spa_proc_lock); 1088 } 1089 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1090 ASSERT(spa->spa_proc != &p0); 1091 ASSERT(spa->spa_did != 0); 1092 } else { 1093#ifdef _KERNEL 1094 cmn_err(CE_WARN, 1095 "Couldn't create process for zfs pool \"%s\"\n", 1096 spa->spa_name); 1097#endif 1098 } 1099 } 1100#endif /* SPA_PROCESS */ 1101 mutex_exit(&spa->spa_proc_lock); 1102 1103 /* If we didn't create a process, we need to create our taskqs. */ 1104 ASSERT(spa->spa_proc == &p0); 1105 if (spa->spa_proc == &p0) { 1106 spa_create_zio_taskqs(spa); 1107 } 1108 1109 /* 1110 * Start TRIM thread. 1111 */ 1112 trim_thread_create(spa); 1113 1114 list_create(&spa->spa_config_dirty_list, sizeof (vdev_t), 1115 offsetof(vdev_t, vdev_config_dirty_node)); 1116 list_create(&spa->spa_state_dirty_list, sizeof (vdev_t), 1117 offsetof(vdev_t, vdev_state_dirty_node)); 1118 1119 txg_list_create(&spa->spa_vdev_txg_list, 1120 offsetof(struct vdev, vdev_txg_node)); 1121 1122 avl_create(&spa->spa_errlist_scrub, 1123 spa_error_entry_compare, sizeof (spa_error_entry_t), 1124 offsetof(spa_error_entry_t, se_avl)); 1125 avl_create(&spa->spa_errlist_last, 1126 spa_error_entry_compare, sizeof (spa_error_entry_t), 1127 offsetof(spa_error_entry_t, se_avl)); 1128} 1129 1130/* 1131 * Opposite of spa_activate(). 1132 */ 1133static void 1134spa_deactivate(spa_t *spa) 1135{ 1136 ASSERT(spa->spa_sync_on == B_FALSE); 1137 ASSERT(spa->spa_dsl_pool == NULL); 1138 ASSERT(spa->spa_root_vdev == NULL); 1139 ASSERT(spa->spa_async_zio_root == NULL); 1140 ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED); 1141 1142 /* 1143 * Stop TRIM thread in case spa_unload() wasn't called directly 1144 * before spa_deactivate(). 1145 */ 1146 trim_thread_destroy(spa); 1147 1148 txg_list_destroy(&spa->spa_vdev_txg_list); 1149 1150 list_destroy(&spa->spa_config_dirty_list); 1151 list_destroy(&spa->spa_state_dirty_list); 1152 1153 for (int t = 0; t < ZIO_TYPES; t++) { 1154 for (int q = 0; q < ZIO_TASKQ_TYPES; q++) { 1155 spa_taskqs_fini(spa, t, q); 1156 } 1157 } 1158 1159 metaslab_class_destroy(spa->spa_normal_class); 1160 spa->spa_normal_class = NULL; 1161 1162 metaslab_class_destroy(spa->spa_log_class); 1163 spa->spa_log_class = NULL; 1164 1165 /* 1166 * If this was part of an import or the open otherwise failed, we may 1167 * still have errors left in the queues. Empty them just in case. 1168 */ 1169 spa_errlog_drain(spa); 1170 1171 avl_destroy(&spa->spa_errlist_scrub); 1172 avl_destroy(&spa->spa_errlist_last); 1173 1174 spa->spa_state = POOL_STATE_UNINITIALIZED; 1175 1176 mutex_enter(&spa->spa_proc_lock); 1177 if (spa->spa_proc_state != SPA_PROC_NONE) { 1178 ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE); 1179 spa->spa_proc_state = SPA_PROC_DEACTIVATE; 1180 cv_broadcast(&spa->spa_proc_cv); 1181 while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) { 1182 ASSERT(spa->spa_proc != &p0); 1183 cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock); 1184 } 1185 ASSERT(spa->spa_proc_state == SPA_PROC_GONE); 1186 spa->spa_proc_state = SPA_PROC_NONE; 1187 } 1188 ASSERT(spa->spa_proc == &p0); 1189 mutex_exit(&spa->spa_proc_lock); 1190 1191#ifdef SPA_PROCESS 1192 /* 1193 * We want to make sure spa_thread() has actually exited the ZFS 1194 * module, so that the module can't be unloaded out from underneath 1195 * it. 1196 */ 1197 if (spa->spa_did != 0) { 1198 thread_join(spa->spa_did); 1199 spa->spa_did = 0; 1200 } 1201#endif /* SPA_PROCESS */ 1202} 1203 1204/* 1205 * Verify a pool configuration, and construct the vdev tree appropriately. This 1206 * will create all the necessary vdevs in the appropriate layout, with each vdev 1207 * in the CLOSED state. This will prep the pool before open/creation/import. 1208 * All vdev validation is done by the vdev_alloc() routine. 1209 */ 1210static int 1211spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, 1212 uint_t id, int atype) 1213{ 1214 nvlist_t **child; 1215 uint_t children; 1216 int error; 1217 1218 if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0) 1219 return (error); 1220 1221 if ((*vdp)->vdev_ops->vdev_op_leaf) 1222 return (0); 1223 1224 error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, 1225 &child, &children); 1226 1227 if (error == ENOENT) 1228 return (0); 1229 1230 if (error) { 1231 vdev_free(*vdp); 1232 *vdp = NULL; 1233 return (SET_ERROR(EINVAL)); 1234 } 1235 1236 for (int c = 0; c < children; c++) { 1237 vdev_t *vd; 1238 if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c, 1239 atype)) != 0) { 1240 vdev_free(*vdp); 1241 *vdp = NULL; 1242 return (error); 1243 } 1244 } 1245 1246 ASSERT(*vdp != NULL); 1247 1248 return (0); 1249} 1250 1251/* 1252 * Opposite of spa_load(). 1253 */ 1254static void 1255spa_unload(spa_t *spa) 1256{ 1257 int i; 1258 1259 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1260 1261 /* 1262 * Stop TRIM thread. 1263 */ 1264 trim_thread_destroy(spa); 1265 1266 /* 1267 * Stop async tasks. 1268 */ 1269 spa_async_suspend(spa); 1270 1271 /* 1272 * Stop syncing. 1273 */ 1274 if (spa->spa_sync_on) { 1275 txg_sync_stop(spa->spa_dsl_pool); 1276 spa->spa_sync_on = B_FALSE; 1277 } 1278 1279 /* 1280 * Wait for any outstanding async I/O to complete. 1281 */ 1282 if (spa->spa_async_zio_root != NULL) { 1283 (void) zio_wait(spa->spa_async_zio_root); 1284 spa->spa_async_zio_root = NULL; 1285 } 1286 1287 bpobj_close(&spa->spa_deferred_bpobj); 1288 1289 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1290 1291 /* 1292 * Close all vdevs. 1293 */ 1294 if (spa->spa_root_vdev) 1295 vdev_free(spa->spa_root_vdev); 1296 ASSERT(spa->spa_root_vdev == NULL); 1297 1298 /* 1299 * Close the dsl pool. 1300 */ 1301 if (spa->spa_dsl_pool) { 1302 dsl_pool_close(spa->spa_dsl_pool); 1303 spa->spa_dsl_pool = NULL; 1304 spa->spa_meta_objset = NULL; 1305 } 1306 1307 ddt_unload(spa); 1308 1309 1310 /* 1311 * Drop and purge level 2 cache 1312 */ 1313 spa_l2cache_drop(spa); 1314 1315 for (i = 0; i < spa->spa_spares.sav_count; i++) 1316 vdev_free(spa->spa_spares.sav_vdevs[i]); 1317 if (spa->spa_spares.sav_vdevs) { 1318 kmem_free(spa->spa_spares.sav_vdevs, 1319 spa->spa_spares.sav_count * sizeof (void *)); 1320 spa->spa_spares.sav_vdevs = NULL; 1321 } 1322 if (spa->spa_spares.sav_config) { 1323 nvlist_free(spa->spa_spares.sav_config); 1324 spa->spa_spares.sav_config = NULL; 1325 } 1326 spa->spa_spares.sav_count = 0; 1327 1328 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 1329 vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]); 1330 vdev_free(spa->spa_l2cache.sav_vdevs[i]); 1331 } 1332 if (spa->spa_l2cache.sav_vdevs) { 1333 kmem_free(spa->spa_l2cache.sav_vdevs, 1334 spa->spa_l2cache.sav_count * sizeof (void *)); 1335 spa->spa_l2cache.sav_vdevs = NULL; 1336 } 1337 if (spa->spa_l2cache.sav_config) { 1338 nvlist_free(spa->spa_l2cache.sav_config); 1339 spa->spa_l2cache.sav_config = NULL; 1340 } 1341 spa->spa_l2cache.sav_count = 0; 1342 1343 spa->spa_async_suspended = 0; 1344 1345 if (spa->spa_comment != NULL) { 1346 spa_strfree(spa->spa_comment); 1347 spa->spa_comment = NULL; 1348 } 1349 1350 spa_config_exit(spa, SCL_ALL, FTAG); 1351} 1352 1353/* 1354 * Load (or re-load) the current list of vdevs describing the active spares for 1355 * this pool. When this is called, we have some form of basic information in 1356 * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and 1357 * then re-generate a more complete list including status information. 1358 */ 1359static void 1360spa_load_spares(spa_t *spa) 1361{ 1362 nvlist_t **spares; 1363 uint_t nspares; 1364 int i; 1365 vdev_t *vd, *tvd; 1366 1367 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1368 1369 /* 1370 * First, close and free any existing spare vdevs. 1371 */ 1372 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1373 vd = spa->spa_spares.sav_vdevs[i]; 1374 1375 /* Undo the call to spa_activate() below */ 1376 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1377 B_FALSE)) != NULL && tvd->vdev_isspare) 1378 spa_spare_remove(tvd); 1379 vdev_close(vd); 1380 vdev_free(vd); 1381 } 1382 1383 if (spa->spa_spares.sav_vdevs) 1384 kmem_free(spa->spa_spares.sav_vdevs, 1385 spa->spa_spares.sav_count * sizeof (void *)); 1386 1387 if (spa->spa_spares.sav_config == NULL) 1388 nspares = 0; 1389 else 1390 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1391 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 1392 1393 spa->spa_spares.sav_count = (int)nspares; 1394 spa->spa_spares.sav_vdevs = NULL; 1395 1396 if (nspares == 0) 1397 return; 1398 1399 /* 1400 * Construct the array of vdevs, opening them to get status in the 1401 * process. For each spare, there is potentially two different vdev_t 1402 * structures associated with it: one in the list of spares (used only 1403 * for basic validation purposes) and one in the active vdev 1404 * configuration (if it's spared in). During this phase we open and 1405 * validate each vdev on the spare list. If the vdev also exists in the 1406 * active configuration, then we also mark this vdev as an active spare. 1407 */ 1408 spa->spa_spares.sav_vdevs = kmem_alloc(nspares * sizeof (void *), 1409 KM_SLEEP); 1410 for (i = 0; i < spa->spa_spares.sav_count; i++) { 1411 VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0, 1412 VDEV_ALLOC_SPARE) == 0); 1413 ASSERT(vd != NULL); 1414 1415 spa->spa_spares.sav_vdevs[i] = vd; 1416 1417 if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid, 1418 B_FALSE)) != NULL) { 1419 if (!tvd->vdev_isspare) 1420 spa_spare_add(tvd); 1421 1422 /* 1423 * We only mark the spare active if we were successfully 1424 * able to load the vdev. Otherwise, importing a pool 1425 * with a bad active spare would result in strange 1426 * behavior, because multiple pool would think the spare 1427 * is actively in use. 1428 * 1429 * There is a vulnerability here to an equally bizarre 1430 * circumstance, where a dead active spare is later 1431 * brought back to life (onlined or otherwise). Given 1432 * the rarity of this scenario, and the extra complexity 1433 * it adds, we ignore the possibility. 1434 */ 1435 if (!vdev_is_dead(tvd)) 1436 spa_spare_activate(tvd); 1437 } 1438 1439 vd->vdev_top = vd; 1440 vd->vdev_aux = &spa->spa_spares; 1441 1442 if (vdev_open(vd) != 0) 1443 continue; 1444 1445 if (vdev_validate_aux(vd) == 0) 1446 spa_spare_add(vd); 1447 } 1448 1449 /* 1450 * Recompute the stashed list of spares, with status information 1451 * this time. 1452 */ 1453 VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES, 1454 DATA_TYPE_NVLIST_ARRAY) == 0); 1455 1456 spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *), 1457 KM_SLEEP); 1458 for (i = 0; i < spa->spa_spares.sav_count; i++) 1459 spares[i] = vdev_config_generate(spa, 1460 spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE); 1461 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 1462 ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0); 1463 for (i = 0; i < spa->spa_spares.sav_count; i++) 1464 nvlist_free(spares[i]); 1465 kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); 1466} 1467 1468/* 1469 * Load (or re-load) the current list of vdevs describing the active l2cache for 1470 * this pool. When this is called, we have some form of basic information in 1471 * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and 1472 * then re-generate a more complete list including status information. 1473 * Devices which are already active have their details maintained, and are 1474 * not re-opened. 1475 */ 1476static void 1477spa_load_l2cache(spa_t *spa) 1478{ 1479 nvlist_t **l2cache; 1480 uint_t nl2cache; 1481 int i, j, oldnvdevs; 1482 uint64_t guid; 1483 vdev_t *vd, **oldvdevs, **newvdevs; 1484 spa_aux_vdev_t *sav = &spa->spa_l2cache; 1485 1486 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1487 1488 if (sav->sav_config != NULL) { 1489 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, 1490 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 1491 newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP); 1492 } else { 1493 nl2cache = 0; 1494 newvdevs = NULL; 1495 } 1496 1497 oldvdevs = sav->sav_vdevs; 1498 oldnvdevs = sav->sav_count; 1499 sav->sav_vdevs = NULL; 1500 sav->sav_count = 0; 1501 1502 /* 1503 * Process new nvlist of vdevs. 1504 */ 1505 for (i = 0; i < nl2cache; i++) { 1506 VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID, 1507 &guid) == 0); 1508 1509 newvdevs[i] = NULL; 1510 for (j = 0; j < oldnvdevs; j++) { 1511 vd = oldvdevs[j]; 1512 if (vd != NULL && guid == vd->vdev_guid) { 1513 /* 1514 * Retain previous vdev for add/remove ops. 1515 */ 1516 newvdevs[i] = vd; 1517 oldvdevs[j] = NULL; 1518 break; 1519 } 1520 } 1521 1522 if (newvdevs[i] == NULL) { 1523 /* 1524 * Create new vdev 1525 */ 1526 VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0, 1527 VDEV_ALLOC_L2CACHE) == 0); 1528 ASSERT(vd != NULL); 1529 newvdevs[i] = vd; 1530 1531 /* 1532 * Commit this vdev as an l2cache device, 1533 * even if it fails to open. 1534 */ 1535 spa_l2cache_add(vd); 1536 1537 vd->vdev_top = vd; 1538 vd->vdev_aux = sav; 1539 1540 spa_l2cache_activate(vd); 1541 1542 if (vdev_open(vd) != 0) 1543 continue; 1544 1545 (void) vdev_validate_aux(vd); 1546 1547 if (!vdev_is_dead(vd)) 1548 l2arc_add_vdev(spa, vd); 1549 } 1550 } 1551 1552 /* 1553 * Purge vdevs that were dropped 1554 */ 1555 for (i = 0; i < oldnvdevs; i++) { 1556 uint64_t pool; 1557 1558 vd = oldvdevs[i]; 1559 if (vd != NULL) { 1560 ASSERT(vd->vdev_isl2cache); 1561 1562 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 1563 pool != 0ULL && l2arc_vdev_present(vd)) 1564 l2arc_remove_vdev(vd); 1565 vdev_clear_stats(vd); 1566 vdev_free(vd); 1567 } 1568 } 1569 1570 if (oldvdevs) 1571 kmem_free(oldvdevs, oldnvdevs * sizeof (void *)); 1572 1573 if (sav->sav_config == NULL) 1574 goto out; 1575 1576 sav->sav_vdevs = newvdevs; 1577 sav->sav_count = (int)nl2cache; 1578 1579 /* 1580 * Recompute the stashed list of l2cache devices, with status 1581 * information this time. 1582 */ 1583 VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE, 1584 DATA_TYPE_NVLIST_ARRAY) == 0); 1585 1586 l2cache = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 1587 for (i = 0; i < sav->sav_count; i++) 1588 l2cache[i] = vdev_config_generate(spa, 1589 sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE); 1590 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 1591 ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0); 1592out: 1593 for (i = 0; i < sav->sav_count; i++) 1594 nvlist_free(l2cache[i]); 1595 if (sav->sav_count) 1596 kmem_free(l2cache, sav->sav_count * sizeof (void *)); 1597} 1598 1599static int 1600load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value) 1601{ 1602 dmu_buf_t *db; 1603 char *packed = NULL; 1604 size_t nvsize = 0; 1605 int error; 1606 *value = NULL; 1607 1608 error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db); 1609 if (error != 0) 1610 return (error); 1611 nvsize = *(uint64_t *)db->db_data; 1612 dmu_buf_rele(db, FTAG); 1613 1614 packed = kmem_alloc(nvsize, KM_SLEEP); 1615 error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed, 1616 DMU_READ_PREFETCH); 1617 if (error == 0) 1618 error = nvlist_unpack(packed, nvsize, value, 0); 1619 kmem_free(packed, nvsize); 1620 1621 return (error); 1622} 1623 1624/* 1625 * Checks to see if the given vdev could not be opened, in which case we post a 1626 * sysevent to notify the autoreplace code that the device has been removed. 1627 */ 1628static void 1629spa_check_removed(vdev_t *vd) 1630{ 1631 for (int c = 0; c < vd->vdev_children; c++) 1632 spa_check_removed(vd->vdev_child[c]); 1633 1634 if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) && 1635 !vd->vdev_ishole) { 1636 zfs_post_autoreplace(vd->vdev_spa, vd); 1637 spa_event_notify(vd->vdev_spa, vd, ESC_ZFS_VDEV_CHECK); 1638 } 1639} 1640 1641/* 1642 * Validate the current config against the MOS config 1643 */ 1644static boolean_t 1645spa_config_valid(spa_t *spa, nvlist_t *config) 1646{ 1647 vdev_t *mrvd, *rvd = spa->spa_root_vdev; 1648 nvlist_t *nv; 1649 1650 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0); 1651 1652 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1653 VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0); 1654 1655 ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children); 1656 1657 /* 1658 * If we're doing a normal import, then build up any additional 1659 * diagnostic information about missing devices in this config. 1660 * We'll pass this up to the user for further processing. 1661 */ 1662 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) { 1663 nvlist_t **child, *nv; 1664 uint64_t idx = 0; 1665 1666 child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t **), 1667 KM_SLEEP); 1668 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0); 1669 1670 for (int c = 0; c < rvd->vdev_children; c++) { 1671 vdev_t *tvd = rvd->vdev_child[c]; 1672 vdev_t *mtvd = mrvd->vdev_child[c]; 1673 1674 if (tvd->vdev_ops == &vdev_missing_ops && 1675 mtvd->vdev_ops != &vdev_missing_ops && 1676 mtvd->vdev_islog) 1677 child[idx++] = vdev_config_generate(spa, mtvd, 1678 B_FALSE, 0); 1679 } 1680 1681 if (idx) { 1682 VERIFY(nvlist_add_nvlist_array(nv, 1683 ZPOOL_CONFIG_CHILDREN, child, idx) == 0); 1684 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 1685 ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0); 1686 1687 for (int i = 0; i < idx; i++) 1688 nvlist_free(child[i]); 1689 } 1690 nvlist_free(nv); 1691 kmem_free(child, rvd->vdev_children * sizeof (char **)); 1692 } 1693 1694 /* 1695 * Compare the root vdev tree with the information we have 1696 * from the MOS config (mrvd). Check each top-level vdev 1697 * with the corresponding MOS config top-level (mtvd). 1698 */ 1699 for (int c = 0; c < rvd->vdev_children; c++) { 1700 vdev_t *tvd = rvd->vdev_child[c]; 1701 vdev_t *mtvd = mrvd->vdev_child[c]; 1702 1703 /* 1704 * Resolve any "missing" vdevs in the current configuration. 1705 * If we find that the MOS config has more accurate information 1706 * about the top-level vdev then use that vdev instead. 1707 */ 1708 if (tvd->vdev_ops == &vdev_missing_ops && 1709 mtvd->vdev_ops != &vdev_missing_ops) { 1710 1711 if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) 1712 continue; 1713 1714 /* 1715 * Device specific actions. 1716 */ 1717 if (mtvd->vdev_islog) { 1718 spa_set_log_state(spa, SPA_LOG_CLEAR); 1719 } else { 1720 /* 1721 * XXX - once we have 'readonly' pool 1722 * support we should be able to handle 1723 * missing data devices by transitioning 1724 * the pool to readonly. 1725 */ 1726 continue; 1727 } 1728 1729 /* 1730 * Swap the missing vdev with the data we were 1731 * able to obtain from the MOS config. 1732 */ 1733 vdev_remove_child(rvd, tvd); 1734 vdev_remove_child(mrvd, mtvd); 1735 1736 vdev_add_child(rvd, mtvd); 1737 vdev_add_child(mrvd, tvd); 1738 1739 spa_config_exit(spa, SCL_ALL, FTAG); 1740 vdev_load(mtvd); 1741 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 1742 1743 vdev_reopen(rvd); 1744 } else if (mtvd->vdev_islog) { 1745 /* 1746 * Load the slog device's state from the MOS config 1747 * since it's possible that the label does not 1748 * contain the most up-to-date information. 1749 */ 1750 vdev_load_log_state(tvd, mtvd); 1751 vdev_reopen(tvd); 1752 } 1753 } 1754 vdev_free(mrvd); 1755 spa_config_exit(spa, SCL_ALL, FTAG); 1756 1757 /* 1758 * Ensure we were able to validate the config. 1759 */ 1760 return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); 1761} 1762 1763/* 1764 * Check for missing log devices 1765 */ 1766static boolean_t 1767spa_check_logs(spa_t *spa) 1768{ 1769 boolean_t rv = B_FALSE; 1770 1771 switch (spa->spa_log_state) { 1772 case SPA_LOG_MISSING: 1773 /* need to recheck in case slog has been restored */ 1774 case SPA_LOG_UNKNOWN: 1775 rv = (dmu_objset_find(spa->spa_name, zil_check_log_chain, 1776 NULL, DS_FIND_CHILDREN) != 0); 1777 if (rv) 1778 spa_set_log_state(spa, SPA_LOG_MISSING); 1779 break; 1780 } 1781 return (rv); 1782} 1783 1784static boolean_t 1785spa_passivate_log(spa_t *spa) 1786{ 1787 vdev_t *rvd = spa->spa_root_vdev; 1788 boolean_t slog_found = B_FALSE; 1789 1790 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1791 1792 if (!spa_has_slogs(spa)) 1793 return (B_FALSE); 1794 1795 for (int c = 0; c < rvd->vdev_children; c++) { 1796 vdev_t *tvd = rvd->vdev_child[c]; 1797 metaslab_group_t *mg = tvd->vdev_mg; 1798 1799 if (tvd->vdev_islog) { 1800 metaslab_group_passivate(mg); 1801 slog_found = B_TRUE; 1802 } 1803 } 1804 1805 return (slog_found); 1806} 1807 1808static void 1809spa_activate_log(spa_t *spa) 1810{ 1811 vdev_t *rvd = spa->spa_root_vdev; 1812 1813 ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER)); 1814 1815 for (int c = 0; c < rvd->vdev_children; c++) { 1816 vdev_t *tvd = rvd->vdev_child[c]; 1817 metaslab_group_t *mg = tvd->vdev_mg; 1818 1819 if (tvd->vdev_islog) 1820 metaslab_group_activate(mg); 1821 } 1822} 1823 1824int 1825spa_offline_log(spa_t *spa) 1826{ 1827 int error; 1828 1829 error = dmu_objset_find(spa_name(spa), zil_vdev_offline, 1830 NULL, DS_FIND_CHILDREN); 1831 if (error == 0) { 1832 /* 1833 * We successfully offlined the log device, sync out the 1834 * current txg so that the "stubby" block can be removed 1835 * by zil_sync(). 1836 */ 1837 txg_wait_synced(spa->spa_dsl_pool, 0); 1838 } 1839 return (error); 1840} 1841 1842static void 1843spa_aux_check_removed(spa_aux_vdev_t *sav) 1844{ 1845 int i; 1846 1847 for (i = 0; i < sav->sav_count; i++) 1848 spa_check_removed(sav->sav_vdevs[i]); 1849} 1850 1851void 1852spa_claim_notify(zio_t *zio) 1853{ 1854 spa_t *spa = zio->io_spa; 1855 1856 if (zio->io_error) 1857 return; 1858 1859 mutex_enter(&spa->spa_props_lock); /* any mutex will do */ 1860 if (spa->spa_claim_max_txg < zio->io_bp->blk_birth) 1861 spa->spa_claim_max_txg = zio->io_bp->blk_birth; 1862 mutex_exit(&spa->spa_props_lock); 1863} 1864 1865typedef struct spa_load_error { 1866 uint64_t sle_meta_count; 1867 uint64_t sle_data_count; 1868} spa_load_error_t; 1869 1870static void 1871spa_load_verify_done(zio_t *zio) 1872{ 1873 blkptr_t *bp = zio->io_bp; 1874 spa_load_error_t *sle = zio->io_private; 1875 dmu_object_type_t type = BP_GET_TYPE(bp); 1876 int error = zio->io_error; 1877 spa_t *spa = zio->io_spa; 1878 1879 if (error) { 1880 if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) && 1881 type != DMU_OT_INTENT_LOG) 1882 atomic_add_64(&sle->sle_meta_count, 1); 1883 else 1884 atomic_add_64(&sle->sle_data_count, 1); 1885 } 1886 zio_data_buf_free(zio->io_data, zio->io_size); 1887 1888 mutex_enter(&spa->spa_scrub_lock); 1889 spa->spa_scrub_inflight--; 1890 cv_broadcast(&spa->spa_scrub_io_cv); 1891 mutex_exit(&spa->spa_scrub_lock); 1892} 1893 1894/* 1895 * Maximum number of concurrent scrub i/os to create while verifying 1896 * a pool while importing it. 1897 */ 1898int spa_load_verify_maxinflight = 10000; 1899boolean_t spa_load_verify_metadata = B_TRUE; 1900boolean_t spa_load_verify_data = B_TRUE; 1901 1902SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_maxinflight, CTLFLAG_RWTUN, 1903 &spa_load_verify_maxinflight, 0, 1904 "Maximum number of concurrent scrub I/Os to create while verifying a " 1905 "pool while importing it"); 1906 1907SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_metadata, CTLFLAG_RWTUN, 1908 &spa_load_verify_metadata, 0, 1909 "Check metadata on import?"); 1910 1911SYSCTL_INT(_vfs_zfs, OID_AUTO, spa_load_verify_data, CTLFLAG_RWTUN, 1912 &spa_load_verify_data, 0, 1913 "Check user data on import?"); 1914 1915/*ARGSUSED*/ 1916static int 1917spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1918 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1919{ 1920 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) 1921 return (0); 1922 /* 1923 * Note: normally this routine will not be called if 1924 * spa_load_verify_metadata is not set. However, it may be useful 1925 * to manually set the flag after the traversal has begun. 1926 */ 1927 if (!spa_load_verify_metadata) 1928 return (0); 1929 if (BP_GET_BUFC_TYPE(bp) == ARC_BUFC_DATA && !spa_load_verify_data) 1930 return (0); 1931 1932 zio_t *rio = arg; 1933 size_t size = BP_GET_PSIZE(bp); 1934 void *data = zio_data_buf_alloc(size); 1935 1936 mutex_enter(&spa->spa_scrub_lock); 1937 while (spa->spa_scrub_inflight >= spa_load_verify_maxinflight) 1938 cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock); 1939 spa->spa_scrub_inflight++; 1940 mutex_exit(&spa->spa_scrub_lock); 1941 1942 zio_nowait(zio_read(rio, spa, bp, data, size, 1943 spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB, 1944 ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL | 1945 ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb)); 1946 return (0); 1947} 1948 1949static int 1950spa_load_verify(spa_t *spa) 1951{ 1952 zio_t *rio; 1953 spa_load_error_t sle = { 0 }; 1954 zpool_rewind_policy_t policy; 1955 boolean_t verify_ok = B_FALSE; 1956 int error = 0; 1957 1958 zpool_get_rewind_policy(spa->spa_config, &policy); 1959 1960 if (policy.zrp_request & ZPOOL_NEVER_REWIND) 1961 return (0); 1962 1963 rio = zio_root(spa, NULL, &sle, 1964 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE); 1965 1966 if (spa_load_verify_metadata) { 1967 error = traverse_pool(spa, spa->spa_verify_min_txg, 1968 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, 1969 spa_load_verify_cb, rio); 1970 } 1971 1972 (void) zio_wait(rio); 1973 1974 spa->spa_load_meta_errors = sle.sle_meta_count; 1975 spa->spa_load_data_errors = sle.sle_data_count; 1976 1977 if (!error && sle.sle_meta_count <= policy.zrp_maxmeta && 1978 sle.sle_data_count <= policy.zrp_maxdata) { 1979 int64_t loss = 0; 1980 1981 verify_ok = B_TRUE; 1982 spa->spa_load_txg = spa->spa_uberblock.ub_txg; 1983 spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp; 1984 1985 loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts; 1986 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1987 ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0); 1988 VERIFY(nvlist_add_int64(spa->spa_load_info, 1989 ZPOOL_CONFIG_REWIND_TIME, loss) == 0); 1990 VERIFY(nvlist_add_uint64(spa->spa_load_info, 1991 ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0); 1992 } else { 1993 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg; 1994 } 1995 1996 if (error) { 1997 if (error != ENXIO && error != EIO) 1998 error = SET_ERROR(EIO); 1999 return (error); 2000 } 2001 2002 return (verify_ok ? 0 : EIO); 2003} 2004 2005/* 2006 * Find a value in the pool props object. 2007 */ 2008static void 2009spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val) 2010{ 2011 (void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object, 2012 zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); 2013} 2014 2015/* 2016 * Find a value in the pool directory object. 2017 */ 2018static int 2019spa_dir_prop(spa_t *spa, const char *name, uint64_t *val) 2020{ 2021 return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT, 2022 name, sizeof (uint64_t), 1, val)); 2023} 2024 2025static int 2026spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err) 2027{ 2028 vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux); 2029 return (err); 2030} 2031 2032/* 2033 * Fix up config after a partly-completed split. This is done with the 2034 * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off 2035 * pool have that entry in their config, but only the splitting one contains 2036 * a list of all the guids of the vdevs that are being split off. 2037 * 2038 * This function determines what to do with that list: either rejoin 2039 * all the disks to the pool, or complete the splitting process. To attempt 2040 * the rejoin, each disk that is offlined is marked online again, and 2041 * we do a reopen() call. If the vdev label for every disk that was 2042 * marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL) 2043 * then we call vdev_split() on each disk, and complete the split. 2044 * 2045 * Otherwise we leave the config alone, with all the vdevs in place in 2046 * the original pool. 2047 */ 2048static void 2049spa_try_repair(spa_t *spa, nvlist_t *config) 2050{ 2051 uint_t extracted; 2052 uint64_t *glist; 2053 uint_t i, gcount; 2054 nvlist_t *nvl; 2055 vdev_t **vd; 2056 boolean_t attempt_reopen; 2057 2058 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0) 2059 return; 2060 2061 /* check that the config is complete */ 2062 if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 2063 &glist, &gcount) != 0) 2064 return; 2065 2066 vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP); 2067 2068 /* attempt to online all the vdevs & validate */ 2069 attempt_reopen = B_TRUE; 2070 for (i = 0; i < gcount; i++) { 2071 if (glist[i] == 0) /* vdev is hole */ 2072 continue; 2073 2074 vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE); 2075 if (vd[i] == NULL) { 2076 /* 2077 * Don't bother attempting to reopen the disks; 2078 * just do the split. 2079 */ 2080 attempt_reopen = B_FALSE; 2081 } else { 2082 /* attempt to re-online it */ 2083 vd[i]->vdev_offline = B_FALSE; 2084 } 2085 } 2086 2087 if (attempt_reopen) { 2088 vdev_reopen(spa->spa_root_vdev); 2089 2090 /* check each device to see what state it's in */ 2091 for (extracted = 0, i = 0; i < gcount; i++) { 2092 if (vd[i] != NULL && 2093 vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL) 2094 break; 2095 ++extracted; 2096 } 2097 } 2098 2099 /* 2100 * If every disk has been moved to the new pool, or if we never 2101 * even attempted to look at them, then we split them off for 2102 * good. 2103 */ 2104 if (!attempt_reopen || gcount == extracted) { 2105 for (i = 0; i < gcount; i++) 2106 if (vd[i] != NULL) 2107 vdev_split(vd[i]); 2108 vdev_reopen(spa->spa_root_vdev); 2109 } 2110 2111 kmem_free(vd, gcount * sizeof (vdev_t *)); 2112} 2113 2114static int 2115spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type, 2116 boolean_t mosconfig) 2117{ 2118 nvlist_t *config = spa->spa_config; 2119 char *ereport = FM_EREPORT_ZFS_POOL; 2120 char *comment; 2121 int error; 2122 uint64_t pool_guid; 2123 nvlist_t *nvl; 2124 2125 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) 2126 return (SET_ERROR(EINVAL)); 2127 2128 ASSERT(spa->spa_comment == NULL); 2129 if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0) 2130 spa->spa_comment = spa_strdup(comment); 2131 2132 /* 2133 * Versioning wasn't explicitly added to the label until later, so if 2134 * it's not present treat it as the initial version. 2135 */ 2136 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 2137 &spa->spa_ubsync.ub_version) != 0) 2138 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 2139 2140 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, 2141 &spa->spa_config_txg); 2142 2143 if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) && 2144 spa_guid_exists(pool_guid, 0)) { 2145 error = SET_ERROR(EEXIST); 2146 } else { 2147 spa->spa_config_guid = pool_guid; 2148 2149 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, 2150 &nvl) == 0) { 2151 VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting, 2152 KM_SLEEP) == 0); 2153 } 2154 2155 nvlist_free(spa->spa_load_info); 2156 spa->spa_load_info = fnvlist_alloc(); 2157 2158 gethrestime(&spa->spa_loaded_ts); 2159 error = spa_load_impl(spa, pool_guid, config, state, type, 2160 mosconfig, &ereport); 2161 } 2162 2163 spa->spa_minref = refcount_count(&spa->spa_refcount); 2164 if (error) { 2165 if (error != EEXIST) { 2166 spa->spa_loaded_ts.tv_sec = 0; 2167 spa->spa_loaded_ts.tv_nsec = 0; 2168 } 2169 if (error != EBADF) { 2170 zfs_ereport_post(ereport, spa, NULL, NULL, 0, 0); 2171 } 2172 } 2173 spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE; 2174 spa->spa_ena = 0; 2175 2176 return (error); 2177} 2178 2179/* 2180 * Load an existing storage pool, using the pool's builtin spa_config as a 2181 * source of configuration information. 2182 */ 2183static int 2184spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config, 2185 spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig, 2186 char **ereport) 2187{ 2188 int error = 0; 2189 nvlist_t *nvroot = NULL; 2190 nvlist_t *label; 2191 vdev_t *rvd; 2192 uberblock_t *ub = &spa->spa_uberblock; 2193 uint64_t children, config_cache_txg = spa->spa_config_txg; 2194 int orig_mode = spa->spa_mode; 2195 int parse; 2196 uint64_t obj; 2197 boolean_t missing_feat_write = B_FALSE; 2198 2199 /* 2200 * If this is an untrusted config, access the pool in read-only mode. 2201 * This prevents things like resilvering recently removed devices. 2202 */ 2203 if (!mosconfig) 2204 spa->spa_mode = FREAD; 2205 2206 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 2207 2208 spa->spa_load_state = state; 2209 2210 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot)) 2211 return (SET_ERROR(EINVAL)); 2212 2213 parse = (type == SPA_IMPORT_EXISTING ? 2214 VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT); 2215 2216 /* 2217 * Create "The Godfather" zio to hold all async IOs 2218 */ 2219 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 2220 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 2221 2222 /* 2223 * Parse the configuration into a vdev tree. We explicitly set the 2224 * value that will be returned by spa_version() since parsing the 2225 * configuration requires knowing the version number. 2226 */ 2227 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2228 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse); 2229 spa_config_exit(spa, SCL_ALL, FTAG); 2230 2231 if (error != 0) 2232 return (error); 2233 2234 ASSERT(spa->spa_root_vdev == rvd); 2235 2236 if (type != SPA_IMPORT_ASSEMBLE) { 2237 ASSERT(spa_guid(spa) == pool_guid); 2238 } 2239 2240 /* 2241 * Try to open all vdevs, loading each label in the process. 2242 */ 2243 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2244 error = vdev_open(rvd); 2245 spa_config_exit(spa, SCL_ALL, FTAG); 2246 if (error != 0) 2247 return (error); 2248 2249 /* 2250 * We need to validate the vdev labels against the configuration that 2251 * we have in hand, which is dependent on the setting of mosconfig. If 2252 * mosconfig is true then we're validating the vdev labels based on 2253 * that config. Otherwise, we're validating against the cached config 2254 * (zpool.cache) that was read when we loaded the zfs module, and then 2255 * later we will recursively call spa_load() and validate against 2256 * the vdev config. 2257 * 2258 * If we're assembling a new pool that's been split off from an 2259 * existing pool, the labels haven't yet been updated so we skip 2260 * validation for now. 2261 */ 2262 if (type != SPA_IMPORT_ASSEMBLE) { 2263 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2264 error = vdev_validate(rvd, mosconfig); 2265 spa_config_exit(spa, SCL_ALL, FTAG); 2266 2267 if (error != 0) 2268 return (error); 2269 2270 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2271 return (SET_ERROR(ENXIO)); 2272 } 2273 2274 /* 2275 * Find the best uberblock. 2276 */ 2277 vdev_uberblock_load(rvd, ub, &label); 2278 2279 /* 2280 * If we weren't able to find a single valid uberblock, return failure. 2281 */ 2282 if (ub->ub_txg == 0) { 2283 nvlist_free(label); 2284 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO)); 2285 } 2286 2287 /* 2288 * If the pool has an unsupported version we can't open it. 2289 */ 2290 if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) { 2291 nvlist_free(label); 2292 return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP)); 2293 } 2294 2295 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2296 nvlist_t *features; 2297 2298 /* 2299 * If we weren't able to find what's necessary for reading the 2300 * MOS in the label, return failure. 2301 */ 2302 if (label == NULL || nvlist_lookup_nvlist(label, 2303 ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) { 2304 nvlist_free(label); 2305 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2306 ENXIO)); 2307 } 2308 2309 /* 2310 * Update our in-core representation with the definitive values 2311 * from the label. 2312 */ 2313 nvlist_free(spa->spa_label_features); 2314 VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); 2315 } 2316 2317 nvlist_free(label); 2318 2319 /* 2320 * Look through entries in the label nvlist's features_for_read. If 2321 * there is a feature listed there which we don't understand then we 2322 * cannot open a pool. 2323 */ 2324 if (ub->ub_version >= SPA_VERSION_FEATURES) { 2325 nvlist_t *unsup_feat; 2326 2327 VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) == 2328 0); 2329 2330 for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features, 2331 NULL); nvp != NULL; 2332 nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) { 2333 if (!zfeature_is_supported(nvpair_name(nvp))) { 2334 VERIFY(nvlist_add_string(unsup_feat, 2335 nvpair_name(nvp), "") == 0); 2336 } 2337 } 2338 2339 if (!nvlist_empty(unsup_feat)) { 2340 VERIFY(nvlist_add_nvlist(spa->spa_load_info, 2341 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0); 2342 nvlist_free(unsup_feat); 2343 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2344 ENOTSUP)); 2345 } 2346 2347 nvlist_free(unsup_feat); 2348 } 2349 2350 /* 2351 * If the vdev guid sum doesn't match the uberblock, we have an 2352 * incomplete configuration. We first check to see if the pool 2353 * is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN). 2354 * If it is, defer the vdev_guid_sum check till later so we 2355 * can handle missing vdevs. 2356 */ 2357 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN, 2358 &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE && 2359 rvd->vdev_guid_sum != ub->ub_guid_sum) 2360 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO)); 2361 2362 if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) { 2363 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2364 spa_try_repair(spa, config); 2365 spa_config_exit(spa, SCL_ALL, FTAG); 2366 nvlist_free(spa->spa_config_splitting); 2367 spa->spa_config_splitting = NULL; 2368 } 2369 2370 /* 2371 * Initialize internal SPA structures. 2372 */ 2373 spa->spa_state = POOL_STATE_ACTIVE; 2374 spa->spa_ubsync = spa->spa_uberblock; 2375 spa->spa_verify_min_txg = spa->spa_extreme_rewind ? 2376 TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1; 2377 spa->spa_first_txg = spa->spa_last_ubsync_txg ? 2378 spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1; 2379 spa->spa_claim_max_txg = spa->spa_first_txg; 2380 spa->spa_prev_software_version = ub->ub_software_version; 2381 2382 error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool); 2383 if (error) 2384 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2385 spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset; 2386 2387 if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0) 2388 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2389 2390 if (spa_version(spa) >= SPA_VERSION_FEATURES) { 2391 boolean_t missing_feat_read = B_FALSE; 2392 nvlist_t *unsup_feat, *enabled_feat; 2393 2394 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ, 2395 &spa->spa_feat_for_read_obj) != 0) { 2396 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2397 } 2398 2399 if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE, 2400 &spa->spa_feat_for_write_obj) != 0) { 2401 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2402 } 2403 2404 if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS, 2405 &spa->spa_feat_desc_obj) != 0) { 2406 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2407 } 2408 2409 enabled_feat = fnvlist_alloc(); 2410 unsup_feat = fnvlist_alloc(); 2411 2412 if (!spa_features_check(spa, B_FALSE, 2413 unsup_feat, enabled_feat)) 2414 missing_feat_read = B_TRUE; 2415 2416 if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) { 2417 if (!spa_features_check(spa, B_TRUE, 2418 unsup_feat, enabled_feat)) { 2419 missing_feat_write = B_TRUE; 2420 } 2421 } 2422 2423 fnvlist_add_nvlist(spa->spa_load_info, 2424 ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat); 2425 2426 if (!nvlist_empty(unsup_feat)) { 2427 fnvlist_add_nvlist(spa->spa_load_info, 2428 ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat); 2429 } 2430 2431 fnvlist_free(enabled_feat); 2432 fnvlist_free(unsup_feat); 2433 2434 if (!missing_feat_read) { 2435 fnvlist_add_boolean(spa->spa_load_info, 2436 ZPOOL_CONFIG_CAN_RDONLY); 2437 } 2438 2439 /* 2440 * If the state is SPA_LOAD_TRYIMPORT, our objective is 2441 * twofold: to determine whether the pool is available for 2442 * import in read-write mode and (if it is not) whether the 2443 * pool is available for import in read-only mode. If the pool 2444 * is available for import in read-write mode, it is displayed 2445 * as available in userland; if it is not available for import 2446 * in read-only mode, it is displayed as unavailable in 2447 * userland. If the pool is available for import in read-only 2448 * mode but not read-write mode, it is displayed as unavailable 2449 * in userland with a special note that the pool is actually 2450 * available for open in read-only mode. 2451 * 2452 * As a result, if the state is SPA_LOAD_TRYIMPORT and we are 2453 * missing a feature for write, we must first determine whether 2454 * the pool can be opened read-only before returning to 2455 * userland in order to know whether to display the 2456 * abovementioned note. 2457 */ 2458 if (missing_feat_read || (missing_feat_write && 2459 spa_writeable(spa))) { 2460 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, 2461 ENOTSUP)); 2462 } 2463 2464 /* 2465 * Load refcounts for ZFS features from disk into an in-memory 2466 * cache during SPA initialization. 2467 */ 2468 for (spa_feature_t i = 0; i < SPA_FEATURES; i++) { 2469 uint64_t refcount; 2470 2471 error = feature_get_refcount_from_disk(spa, 2472 &spa_feature_table[i], &refcount); 2473 if (error == 0) { 2474 spa->spa_feat_refcount_cache[i] = refcount; 2475 } else if (error == ENOTSUP) { 2476 spa->spa_feat_refcount_cache[i] = 2477 SPA_FEATURE_DISABLED; 2478 } else { 2479 return (spa_vdev_err(rvd, 2480 VDEV_AUX_CORRUPT_DATA, EIO)); 2481 } 2482 } 2483 } 2484 2485 if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) { 2486 if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG, 2487 &spa->spa_feat_enabled_txg_obj) != 0) 2488 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2489 } 2490 2491 spa->spa_is_initializing = B_TRUE; 2492 error = dsl_pool_open(spa->spa_dsl_pool); 2493 spa->spa_is_initializing = B_FALSE; 2494 if (error != 0) 2495 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2496 2497 if (!mosconfig) { 2498 uint64_t hostid; 2499 nvlist_t *policy = NULL, *nvconfig; 2500 2501 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2502 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2503 2504 if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig, 2505 ZPOOL_CONFIG_HOSTID, &hostid) == 0) { 2506 char *hostname; 2507 unsigned long myhostid = 0; 2508 2509 VERIFY(nvlist_lookup_string(nvconfig, 2510 ZPOOL_CONFIG_HOSTNAME, &hostname) == 0); 2511 2512#ifdef _KERNEL 2513 myhostid = zone_get_hostid(NULL); 2514#else /* _KERNEL */ 2515 /* 2516 * We're emulating the system's hostid in userland, so 2517 * we can't use zone_get_hostid(). 2518 */ 2519 (void) ddi_strtoul(hw_serial, NULL, 10, &myhostid); 2520#endif /* _KERNEL */ 2521 if (check_hostid && hostid != 0 && myhostid != 0 && 2522 hostid != myhostid) { 2523 nvlist_free(nvconfig); 2524 cmn_err(CE_WARN, "pool '%s' could not be " 2525 "loaded as it was last accessed by " 2526 "another system (host: %s hostid: 0x%lx). " 2527 "See: http://illumos.org/msg/ZFS-8000-EY", 2528 spa_name(spa), hostname, 2529 (unsigned long)hostid); 2530 return (SET_ERROR(EBADF)); 2531 } 2532 } 2533 if (nvlist_lookup_nvlist(spa->spa_config, 2534 ZPOOL_REWIND_POLICY, &policy) == 0) 2535 VERIFY(nvlist_add_nvlist(nvconfig, 2536 ZPOOL_REWIND_POLICY, policy) == 0); 2537 2538 spa_config_set(spa, nvconfig); 2539 spa_unload(spa); 2540 spa_deactivate(spa); 2541 spa_activate(spa, orig_mode); 2542 2543 return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE)); 2544 } 2545 2546 if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0) 2547 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2548 error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj); 2549 if (error != 0) 2550 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2551 2552 /* 2553 * Load the bit that tells us to use the new accounting function 2554 * (raid-z deflation). If we have an older pool, this will not 2555 * be present. 2556 */ 2557 error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate); 2558 if (error != 0 && error != ENOENT) 2559 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2560 2561 error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION, 2562 &spa->spa_creation_version); 2563 if (error != 0 && error != ENOENT) 2564 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2565 2566 /* 2567 * Load the persistent error log. If we have an older pool, this will 2568 * not be present. 2569 */ 2570 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last); 2571 if (error != 0 && error != ENOENT) 2572 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2573 2574 error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB, 2575 &spa->spa_errlog_scrub); 2576 if (error != 0 && error != ENOENT) 2577 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2578 2579 /* 2580 * Load the history object. If we have an older pool, this 2581 * will not be present. 2582 */ 2583 error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history); 2584 if (error != 0 && error != ENOENT) 2585 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2586 2587 /* 2588 * If we're assembling the pool from the split-off vdevs of 2589 * an existing pool, we don't want to attach the spares & cache 2590 * devices. 2591 */ 2592 2593 /* 2594 * Load any hot spares for this pool. 2595 */ 2596 error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object); 2597 if (error != 0 && error != ENOENT) 2598 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2599 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2600 ASSERT(spa_version(spa) >= SPA_VERSION_SPARES); 2601 if (load_nvlist(spa, spa->spa_spares.sav_object, 2602 &spa->spa_spares.sav_config) != 0) 2603 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2604 2605 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2606 spa_load_spares(spa); 2607 spa_config_exit(spa, SCL_ALL, FTAG); 2608 } else if (error == 0) { 2609 spa->spa_spares.sav_sync = B_TRUE; 2610 } 2611 2612 /* 2613 * Load any level 2 ARC devices for this pool. 2614 */ 2615 error = spa_dir_prop(spa, DMU_POOL_L2CACHE, 2616 &spa->spa_l2cache.sav_object); 2617 if (error != 0 && error != ENOENT) 2618 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2619 if (error == 0 && type != SPA_IMPORT_ASSEMBLE) { 2620 ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE); 2621 if (load_nvlist(spa, spa->spa_l2cache.sav_object, 2622 &spa->spa_l2cache.sav_config) != 0) 2623 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2624 2625 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2626 spa_load_l2cache(spa); 2627 spa_config_exit(spa, SCL_ALL, FTAG); 2628 } else if (error == 0) { 2629 spa->spa_l2cache.sav_sync = B_TRUE; 2630 } 2631 2632 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 2633 2634 error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object); 2635 if (error && error != ENOENT) 2636 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2637 2638 if (error == 0) { 2639 uint64_t autoreplace; 2640 2641 spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs); 2642 spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace); 2643 spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation); 2644 spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode); 2645 spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand); 2646 spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO, 2647 &spa->spa_dedup_ditto); 2648 2649 spa->spa_autoreplace = (autoreplace != 0); 2650 } 2651 2652 /* 2653 * If the 'autoreplace' property is set, then post a resource notifying 2654 * the ZFS DE that it should not issue any faults for unopenable 2655 * devices. We also iterate over the vdevs, and post a sysevent for any 2656 * unopenable vdevs so that the normal autoreplace handler can take 2657 * over. 2658 */ 2659 if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) { 2660 spa_check_removed(spa->spa_root_vdev); 2661 /* 2662 * For the import case, this is done in spa_import(), because 2663 * at this point we're using the spare definitions from 2664 * the MOS config, not necessarily from the userland config. 2665 */ 2666 if (state != SPA_LOAD_IMPORT) { 2667 spa_aux_check_removed(&spa->spa_spares); 2668 spa_aux_check_removed(&spa->spa_l2cache); 2669 } 2670 } 2671 2672 /* 2673 * Load the vdev state for all toplevel vdevs. 2674 */ 2675 vdev_load(rvd); 2676 2677 /* 2678 * Propagate the leaf DTLs we just loaded all the way up the tree. 2679 */ 2680 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 2681 vdev_dtl_reassess(rvd, 0, 0, B_FALSE); 2682 spa_config_exit(spa, SCL_ALL, FTAG); 2683 2684 /* 2685 * Load the DDTs (dedup tables). 2686 */ 2687 error = ddt_load(spa); 2688 if (error != 0) 2689 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2690 2691 spa_update_dspace(spa); 2692 2693 /* 2694 * Validate the config, using the MOS config to fill in any 2695 * information which might be missing. If we fail to validate 2696 * the config then declare the pool unfit for use. If we're 2697 * assembling a pool from a split, the log is not transferred 2698 * over. 2699 */ 2700 if (type != SPA_IMPORT_ASSEMBLE) { 2701 nvlist_t *nvconfig; 2702 2703 if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0) 2704 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO)); 2705 2706 if (!spa_config_valid(spa, nvconfig)) { 2707 nvlist_free(nvconfig); 2708 return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, 2709 ENXIO)); 2710 } 2711 nvlist_free(nvconfig); 2712 2713 /* 2714 * Now that we've validated the config, check the state of the 2715 * root vdev. If it can't be opened, it indicates one or 2716 * more toplevel vdevs are faulted. 2717 */ 2718 if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) 2719 return (SET_ERROR(ENXIO)); 2720 2721 if (spa_check_logs(spa)) { 2722 *ereport = FM_EREPORT_ZFS_LOG_REPLAY; 2723 return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO)); 2724 } 2725 } 2726 2727 if (missing_feat_write) { 2728 ASSERT(state == SPA_LOAD_TRYIMPORT); 2729 2730 /* 2731 * At this point, we know that we can open the pool in 2732 * read-only mode but not read-write mode. We now have enough 2733 * information and can return to userland. 2734 */ 2735 return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP)); 2736 } 2737 2738 /* 2739 * We've successfully opened the pool, verify that we're ready 2740 * to start pushing transactions. 2741 */ 2742 if (state != SPA_LOAD_TRYIMPORT) { 2743 if (error = spa_load_verify(spa)) 2744 return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, 2745 error)); 2746 } 2747 2748 if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER || 2749 spa->spa_load_max_txg == UINT64_MAX)) { 2750 dmu_tx_t *tx; 2751 int need_update = B_FALSE; 2752 2753 ASSERT(state != SPA_LOAD_TRYIMPORT); 2754 2755 /* 2756 * Claim log blocks that haven't been committed yet. 2757 * This must all happen in a single txg. 2758 * Note: spa_claim_max_txg is updated by spa_claim_notify(), 2759 * invoked from zil_claim_log_block()'s i/o done callback. 2760 * Price of rollback is that we abandon the log. 2761 */ 2762 spa->spa_claiming = B_TRUE; 2763 2764 tx = dmu_tx_create_assigned(spa_get_dsl(spa), 2765 spa_first_txg(spa)); 2766 (void) dmu_objset_find(spa_name(spa), 2767 zil_claim, tx, DS_FIND_CHILDREN); 2768 dmu_tx_commit(tx); 2769 2770 spa->spa_claiming = B_FALSE; 2771 2772 spa_set_log_state(spa, SPA_LOG_GOOD); 2773 spa->spa_sync_on = B_TRUE; 2774 txg_sync_start(spa->spa_dsl_pool); 2775 2776 /* 2777 * Wait for all claims to sync. We sync up to the highest 2778 * claimed log block birth time so that claimed log blocks 2779 * don't appear to be from the future. spa_claim_max_txg 2780 * will have been set for us by either zil_check_log_chain() 2781 * (invoked from spa_check_logs()) or zil_claim() above. 2782 */ 2783 txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg); 2784 2785 /* 2786 * If the config cache is stale, or we have uninitialized 2787 * metaslabs (see spa_vdev_add()), then update the config. 2788 * 2789 * If this is a verbatim import, trust the current 2790 * in-core spa_config and update the disk labels. 2791 */ 2792 if (config_cache_txg != spa->spa_config_txg || 2793 state == SPA_LOAD_IMPORT || 2794 state == SPA_LOAD_RECOVER || 2795 (spa->spa_import_flags & ZFS_IMPORT_VERBATIM)) 2796 need_update = B_TRUE; 2797 2798 for (int c = 0; c < rvd->vdev_children; c++) 2799 if (rvd->vdev_child[c]->vdev_ms_array == 0) 2800 need_update = B_TRUE; 2801 2802 /* 2803 * Update the config cache asychronously in case we're the 2804 * root pool, in which case the config cache isn't writable yet. 2805 */ 2806 if (need_update) 2807 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE); 2808 2809 /* 2810 * Check all DTLs to see if anything needs resilvering. 2811 */ 2812 if (!dsl_scan_resilvering(spa->spa_dsl_pool) && 2813 vdev_resilver_needed(rvd, NULL, NULL)) 2814 spa_async_request(spa, SPA_ASYNC_RESILVER); 2815 2816 /* 2817 * Log the fact that we booted up (so that we can detect if 2818 * we rebooted in the middle of an operation). 2819 */ 2820 spa_history_log_version(spa, "open"); 2821 2822 /* 2823 * Delete any inconsistent datasets. 2824 */ 2825 (void) dmu_objset_find(spa_name(spa), 2826 dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN); 2827 2828 /* 2829 * Clean up any stale temporary dataset userrefs. 2830 */ 2831 dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool); 2832 } 2833 2834 return (0); 2835} 2836 2837static int 2838spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig) 2839{ 2840 int mode = spa->spa_mode; 2841 2842 spa_unload(spa); 2843 spa_deactivate(spa); 2844 2845 spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1; 2846 2847 spa_activate(spa, mode); 2848 spa_async_suspend(spa); 2849 2850 return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); 2851} 2852 2853/* 2854 * If spa_load() fails this function will try loading prior txg's. If 2855 * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool 2856 * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this 2857 * function will not rewind the pool and will return the same error as 2858 * spa_load(). 2859 */ 2860static int 2861spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig, 2862 uint64_t max_request, int rewind_flags) 2863{ 2864 nvlist_t *loadinfo = NULL; 2865 nvlist_t *config = NULL; 2866 int load_error, rewind_error; 2867 uint64_t safe_rewind_txg; 2868 uint64_t min_txg; 2869 2870 if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) { 2871 spa->spa_load_max_txg = spa->spa_load_txg; 2872 spa_set_log_state(spa, SPA_LOG_CLEAR); 2873 } else { 2874 spa->spa_load_max_txg = max_request; 2875 if (max_request != UINT64_MAX) 2876 spa->spa_extreme_rewind = B_TRUE; 2877 } 2878 2879 load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING, 2880 mosconfig); 2881 if (load_error == 0) 2882 return (0); 2883 2884 if (spa->spa_root_vdev != NULL) 2885 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 2886 2887 spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg; 2888 spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp; 2889 2890 if (rewind_flags & ZPOOL_NEVER_REWIND) { 2891 nvlist_free(config); 2892 return (load_error); 2893 } 2894 2895 if (state == SPA_LOAD_RECOVER) { 2896 /* Price of rolling back is discarding txgs, including log */ 2897 spa_set_log_state(spa, SPA_LOG_CLEAR); 2898 } else { 2899 /* 2900 * If we aren't rolling back save the load info from our first 2901 * import attempt so that we can restore it after attempting 2902 * to rewind. 2903 */ 2904 loadinfo = spa->spa_load_info; 2905 spa->spa_load_info = fnvlist_alloc(); 2906 } 2907 2908 spa->spa_load_max_txg = spa->spa_last_ubsync_txg; 2909 safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE; 2910 min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ? 2911 TXG_INITIAL : safe_rewind_txg; 2912 2913 /* 2914 * Continue as long as we're finding errors, we're still within 2915 * the acceptable rewind range, and we're still finding uberblocks 2916 */ 2917 while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg && 2918 spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) { 2919 if (spa->spa_load_max_txg < safe_rewind_txg) 2920 spa->spa_extreme_rewind = B_TRUE; 2921 rewind_error = spa_load_retry(spa, state, mosconfig); 2922 } 2923 2924 spa->spa_extreme_rewind = B_FALSE; 2925 spa->spa_load_max_txg = UINT64_MAX; 2926 2927 if (config && (rewind_error || state != SPA_LOAD_RECOVER)) 2928 spa_config_set(spa, config); 2929 2930 if (state == SPA_LOAD_RECOVER) { 2931 ASSERT3P(loadinfo, ==, NULL); 2932 return (rewind_error); 2933 } else { 2934 /* Store the rewind info as part of the initial load info */ 2935 fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO, 2936 spa->spa_load_info); 2937 2938 /* Restore the initial load info */ 2939 fnvlist_free(spa->spa_load_info); 2940 spa->spa_load_info = loadinfo; 2941 2942 return (load_error); 2943 } 2944} 2945 2946/* 2947 * Pool Open/Import 2948 * 2949 * The import case is identical to an open except that the configuration is sent 2950 * down from userland, instead of grabbed from the configuration cache. For the 2951 * case of an open, the pool configuration will exist in the 2952 * POOL_STATE_UNINITIALIZED state. 2953 * 2954 * The stats information (gen/count/ustats) is used to gather vdev statistics at 2955 * the same time open the pool, without having to keep around the spa_t in some 2956 * ambiguous state. 2957 */ 2958static int 2959spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy, 2960 nvlist_t **config) 2961{ 2962 spa_t *spa; 2963 spa_load_state_t state = SPA_LOAD_OPEN; 2964 int error; 2965 int locked = B_FALSE; 2966 int firstopen = B_FALSE; 2967 2968 *spapp = NULL; 2969 2970 /* 2971 * As disgusting as this is, we need to support recursive calls to this 2972 * function because dsl_dir_open() is called during spa_load(), and ends 2973 * up calling spa_open() again. The real fix is to figure out how to 2974 * avoid dsl_dir_open() calling this in the first place. 2975 */ 2976 if (mutex_owner(&spa_namespace_lock) != curthread) { 2977 mutex_enter(&spa_namespace_lock); 2978 locked = B_TRUE; 2979 } 2980 2981 if ((spa = spa_lookup(pool)) == NULL) { 2982 if (locked) 2983 mutex_exit(&spa_namespace_lock); 2984 return (SET_ERROR(ENOENT)); 2985 } 2986 2987 if (spa->spa_state == POOL_STATE_UNINITIALIZED) { 2988 zpool_rewind_policy_t policy; 2989 2990 firstopen = B_TRUE; 2991 2992 zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config, 2993 &policy); 2994 if (policy.zrp_request & ZPOOL_DO_REWIND) 2995 state = SPA_LOAD_RECOVER; 2996 2997 spa_activate(spa, spa_mode_global); 2998 2999 if (state != SPA_LOAD_RECOVER) 3000 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 3001 3002 error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg, 3003 policy.zrp_request); 3004 3005 if (error == EBADF) { 3006 /* 3007 * If vdev_validate() returns failure (indicated by 3008 * EBADF), it indicates that one of the vdevs indicates 3009 * that the pool has been exported or destroyed. If 3010 * this is the case, the config cache is out of sync and 3011 * we should remove the pool from the namespace. 3012 */ 3013 spa_unload(spa); 3014 spa_deactivate(spa); 3015 spa_config_sync(spa, B_TRUE, B_TRUE); 3016 spa_remove(spa); 3017 if (locked) 3018 mutex_exit(&spa_namespace_lock); 3019 return (SET_ERROR(ENOENT)); 3020 } 3021 3022 if (error) { 3023 /* 3024 * We can't open the pool, but we still have useful 3025 * information: the state of each vdev after the 3026 * attempted vdev_open(). Return this to the user. 3027 */ 3028 if (config != NULL && spa->spa_config) { 3029 VERIFY(nvlist_dup(spa->spa_config, config, 3030 KM_SLEEP) == 0); 3031 VERIFY(nvlist_add_nvlist(*config, 3032 ZPOOL_CONFIG_LOAD_INFO, 3033 spa->spa_load_info) == 0); 3034 } 3035 spa_unload(spa); 3036 spa_deactivate(spa); 3037 spa->spa_last_open_failed = error; 3038 if (locked) 3039 mutex_exit(&spa_namespace_lock); 3040 *spapp = NULL; 3041 return (error); 3042 } 3043 } 3044 3045 spa_open_ref(spa, tag); 3046 3047 if (config != NULL) 3048 *config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 3049 3050 /* 3051 * If we've recovered the pool, pass back any information we 3052 * gathered while doing the load. 3053 */ 3054 if (state == SPA_LOAD_RECOVER) { 3055 VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO, 3056 spa->spa_load_info) == 0); 3057 } 3058 3059 if (locked) { 3060 spa->spa_last_open_failed = 0; 3061 spa->spa_last_ubsync_txg = 0; 3062 spa->spa_load_txg = 0; 3063 mutex_exit(&spa_namespace_lock); 3064#ifdef __FreeBSD__ 3065#ifdef _KERNEL 3066 if (firstopen) 3067 zvol_create_minors(spa->spa_name); 3068#endif 3069#endif 3070 } 3071 3072 *spapp = spa; 3073 3074 return (0); 3075} 3076 3077int 3078spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy, 3079 nvlist_t **config) 3080{ 3081 return (spa_open_common(name, spapp, tag, policy, config)); 3082} 3083 3084int 3085spa_open(const char *name, spa_t **spapp, void *tag) 3086{ 3087 return (spa_open_common(name, spapp, tag, NULL, NULL)); 3088} 3089 3090/* 3091 * Lookup the given spa_t, incrementing the inject count in the process, 3092 * preventing it from being exported or destroyed. 3093 */ 3094spa_t * 3095spa_inject_addref(char *name) 3096{ 3097 spa_t *spa; 3098 3099 mutex_enter(&spa_namespace_lock); 3100 if ((spa = spa_lookup(name)) == NULL) { 3101 mutex_exit(&spa_namespace_lock); 3102 return (NULL); 3103 } 3104 spa->spa_inject_ref++; 3105 mutex_exit(&spa_namespace_lock); 3106 3107 return (spa); 3108} 3109 3110void 3111spa_inject_delref(spa_t *spa) 3112{ 3113 mutex_enter(&spa_namespace_lock); 3114 spa->spa_inject_ref--; 3115 mutex_exit(&spa_namespace_lock); 3116} 3117 3118/* 3119 * Add spares device information to the nvlist. 3120 */ 3121static void 3122spa_add_spares(spa_t *spa, nvlist_t *config) 3123{ 3124 nvlist_t **spares; 3125 uint_t i, nspares; 3126 nvlist_t *nvroot; 3127 uint64_t guid; 3128 vdev_stat_t *vs; 3129 uint_t vsc; 3130 uint64_t pool; 3131 3132 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3133 3134 if (spa->spa_spares.sav_count == 0) 3135 return; 3136 3137 VERIFY(nvlist_lookup_nvlist(config, 3138 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3139 VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 3140 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3141 if (nspares != 0) { 3142 VERIFY(nvlist_add_nvlist_array(nvroot, 3143 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3144 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3145 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0); 3146 3147 /* 3148 * Go through and find any spares which have since been 3149 * repurposed as an active spare. If this is the case, update 3150 * their status appropriately. 3151 */ 3152 for (i = 0; i < nspares; i++) { 3153 VERIFY(nvlist_lookup_uint64(spares[i], 3154 ZPOOL_CONFIG_GUID, &guid) == 0); 3155 if (spa_spare_exists(guid, &pool, NULL) && 3156 pool != 0ULL) { 3157 VERIFY(nvlist_lookup_uint64_array( 3158 spares[i], ZPOOL_CONFIG_VDEV_STATS, 3159 (uint64_t **)&vs, &vsc) == 0); 3160 vs->vs_state = VDEV_STATE_CANT_OPEN; 3161 vs->vs_aux = VDEV_AUX_SPARED; 3162 } 3163 } 3164 } 3165} 3166 3167/* 3168 * Add l2cache device information to the nvlist, including vdev stats. 3169 */ 3170static void 3171spa_add_l2cache(spa_t *spa, nvlist_t *config) 3172{ 3173 nvlist_t **l2cache; 3174 uint_t i, j, nl2cache; 3175 nvlist_t *nvroot; 3176 uint64_t guid; 3177 vdev_t *vd; 3178 vdev_stat_t *vs; 3179 uint_t vsc; 3180 3181 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3182 3183 if (spa->spa_l2cache.sav_count == 0) 3184 return; 3185 3186 VERIFY(nvlist_lookup_nvlist(config, 3187 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0); 3188 VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 3189 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3190 if (nl2cache != 0) { 3191 VERIFY(nvlist_add_nvlist_array(nvroot, 3192 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3193 VERIFY(nvlist_lookup_nvlist_array(nvroot, 3194 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0); 3195 3196 /* 3197 * Update level 2 cache device stats. 3198 */ 3199 3200 for (i = 0; i < nl2cache; i++) { 3201 VERIFY(nvlist_lookup_uint64(l2cache[i], 3202 ZPOOL_CONFIG_GUID, &guid) == 0); 3203 3204 vd = NULL; 3205 for (j = 0; j < spa->spa_l2cache.sav_count; j++) { 3206 if (guid == 3207 spa->spa_l2cache.sav_vdevs[j]->vdev_guid) { 3208 vd = spa->spa_l2cache.sav_vdevs[j]; 3209 break; 3210 } 3211 } 3212 ASSERT(vd != NULL); 3213 3214 VERIFY(nvlist_lookup_uint64_array(l2cache[i], 3215 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc) 3216 == 0); 3217 vdev_get_stats(vd, vs); 3218 } 3219 } 3220} 3221 3222static void 3223spa_add_feature_stats(spa_t *spa, nvlist_t *config) 3224{ 3225 nvlist_t *features; 3226 zap_cursor_t zc; 3227 zap_attribute_t za; 3228 3229 ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER)); 3230 VERIFY(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3231 3232 /* We may be unable to read features if pool is suspended. */ 3233 if (spa_suspended(spa)) 3234 goto out; 3235 3236 if (spa->spa_feat_for_read_obj != 0) { 3237 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3238 spa->spa_feat_for_read_obj); 3239 zap_cursor_retrieve(&zc, &za) == 0; 3240 zap_cursor_advance(&zc)) { 3241 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3242 za.za_num_integers == 1); 3243 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3244 za.za_first_integer)); 3245 } 3246 zap_cursor_fini(&zc); 3247 } 3248 3249 if (spa->spa_feat_for_write_obj != 0) { 3250 for (zap_cursor_init(&zc, spa->spa_meta_objset, 3251 spa->spa_feat_for_write_obj); 3252 zap_cursor_retrieve(&zc, &za) == 0; 3253 zap_cursor_advance(&zc)) { 3254 ASSERT(za.za_integer_length == sizeof (uint64_t) && 3255 za.za_num_integers == 1); 3256 VERIFY3U(0, ==, nvlist_add_uint64(features, za.za_name, 3257 za.za_first_integer)); 3258 } 3259 zap_cursor_fini(&zc); 3260 } 3261 3262out: 3263 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, 3264 features) == 0); 3265 nvlist_free(features); 3266} 3267 3268int 3269spa_get_stats(const char *name, nvlist_t **config, 3270 char *altroot, size_t buflen) 3271{ 3272 int error; 3273 spa_t *spa; 3274 3275 *config = NULL; 3276 error = spa_open_common(name, &spa, FTAG, NULL, config); 3277 3278 if (spa != NULL) { 3279 /* 3280 * This still leaves a window of inconsistency where the spares 3281 * or l2cache devices could change and the config would be 3282 * self-inconsistent. 3283 */ 3284 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 3285 3286 if (*config != NULL) { 3287 uint64_t loadtimes[2]; 3288 3289 loadtimes[0] = spa->spa_loaded_ts.tv_sec; 3290 loadtimes[1] = spa->spa_loaded_ts.tv_nsec; 3291 VERIFY(nvlist_add_uint64_array(*config, 3292 ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0); 3293 3294 VERIFY(nvlist_add_uint64(*config, 3295 ZPOOL_CONFIG_ERRCOUNT, 3296 spa_get_errlog_size(spa)) == 0); 3297 3298 if (spa_suspended(spa)) 3299 VERIFY(nvlist_add_uint64(*config, 3300 ZPOOL_CONFIG_SUSPENDED, 3301 spa->spa_failmode) == 0); 3302 3303 spa_add_spares(spa, *config); 3304 spa_add_l2cache(spa, *config); 3305 spa_add_feature_stats(spa, *config); 3306 } 3307 } 3308 3309 /* 3310 * We want to get the alternate root even for faulted pools, so we cheat 3311 * and call spa_lookup() directly. 3312 */ 3313 if (altroot) { 3314 if (spa == NULL) { 3315 mutex_enter(&spa_namespace_lock); 3316 spa = spa_lookup(name); 3317 if (spa) 3318 spa_altroot(spa, altroot, buflen); 3319 else 3320 altroot[0] = '\0'; 3321 spa = NULL; 3322 mutex_exit(&spa_namespace_lock); 3323 } else { 3324 spa_altroot(spa, altroot, buflen); 3325 } 3326 } 3327 3328 if (spa != NULL) { 3329 spa_config_exit(spa, SCL_CONFIG, FTAG); 3330 spa_close(spa, FTAG); 3331 } 3332 3333 return (error); 3334} 3335 3336/* 3337 * Validate that the auxiliary device array is well formed. We must have an 3338 * array of nvlists, each which describes a valid leaf vdev. If this is an 3339 * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be 3340 * specified, as long as they are well-formed. 3341 */ 3342static int 3343spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode, 3344 spa_aux_vdev_t *sav, const char *config, uint64_t version, 3345 vdev_labeltype_t label) 3346{ 3347 nvlist_t **dev; 3348 uint_t i, ndev; 3349 vdev_t *vd; 3350 int error; 3351 3352 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3353 3354 /* 3355 * It's acceptable to have no devs specified. 3356 */ 3357 if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0) 3358 return (0); 3359 3360 if (ndev == 0) 3361 return (SET_ERROR(EINVAL)); 3362 3363 /* 3364 * Make sure the pool is formatted with a version that supports this 3365 * device type. 3366 */ 3367 if (spa_version(spa) < version) 3368 return (SET_ERROR(ENOTSUP)); 3369 3370 /* 3371 * Set the pending device list so we correctly handle device in-use 3372 * checking. 3373 */ 3374 sav->sav_pending = dev; 3375 sav->sav_npending = ndev; 3376 3377 for (i = 0; i < ndev; i++) { 3378 if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0, 3379 mode)) != 0) 3380 goto out; 3381 3382 if (!vd->vdev_ops->vdev_op_leaf) { 3383 vdev_free(vd); 3384 error = SET_ERROR(EINVAL); 3385 goto out; 3386 } 3387 3388 /* 3389 * The L2ARC currently only supports disk devices in 3390 * kernel context. For user-level testing, we allow it. 3391 */ 3392#ifdef _KERNEL 3393 if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) && 3394 strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) { 3395 error = SET_ERROR(ENOTBLK); 3396 vdev_free(vd); 3397 goto out; 3398 } 3399#endif 3400 vd->vdev_top = vd; 3401 3402 if ((error = vdev_open(vd)) == 0 && 3403 (error = vdev_label_init(vd, crtxg, label)) == 0) { 3404 VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID, 3405 vd->vdev_guid) == 0); 3406 } 3407 3408 vdev_free(vd); 3409 3410 if (error && 3411 (mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE)) 3412 goto out; 3413 else 3414 error = 0; 3415 } 3416 3417out: 3418 sav->sav_pending = NULL; 3419 sav->sav_npending = 0; 3420 return (error); 3421} 3422 3423static int 3424spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode) 3425{ 3426 int error; 3427 3428 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 3429 3430 if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3431 &spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES, 3432 VDEV_LABEL_SPARE)) != 0) { 3433 return (error); 3434 } 3435 3436 return (spa_validate_aux_devs(spa, nvroot, crtxg, mode, 3437 &spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE, 3438 VDEV_LABEL_L2CACHE)); 3439} 3440 3441static void 3442spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs, 3443 const char *config) 3444{ 3445 int i; 3446 3447 if (sav->sav_config != NULL) { 3448 nvlist_t **olddevs; 3449 uint_t oldndevs; 3450 nvlist_t **newdevs; 3451 3452 /* 3453 * Generate new dev list by concatentating with the 3454 * current dev list. 3455 */ 3456 VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config, 3457 &olddevs, &oldndevs) == 0); 3458 3459 newdevs = kmem_alloc(sizeof (void *) * 3460 (ndevs + oldndevs), KM_SLEEP); 3461 for (i = 0; i < oldndevs; i++) 3462 VERIFY(nvlist_dup(olddevs[i], &newdevs[i], 3463 KM_SLEEP) == 0); 3464 for (i = 0; i < ndevs; i++) 3465 VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs], 3466 KM_SLEEP) == 0); 3467 3468 VERIFY(nvlist_remove(sav->sav_config, config, 3469 DATA_TYPE_NVLIST_ARRAY) == 0); 3470 3471 VERIFY(nvlist_add_nvlist_array(sav->sav_config, 3472 config, newdevs, ndevs + oldndevs) == 0); 3473 for (i = 0; i < oldndevs + ndevs; i++) 3474 nvlist_free(newdevs[i]); 3475 kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *)); 3476 } else { 3477 /* 3478 * Generate a new dev list. 3479 */ 3480 VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME, 3481 KM_SLEEP) == 0); 3482 VERIFY(nvlist_add_nvlist_array(sav->sav_config, config, 3483 devs, ndevs) == 0); 3484 } 3485} 3486 3487/* 3488 * Stop and drop level 2 ARC devices 3489 */ 3490void 3491spa_l2cache_drop(spa_t *spa) 3492{ 3493 vdev_t *vd; 3494 int i; 3495 spa_aux_vdev_t *sav = &spa->spa_l2cache; 3496 3497 for (i = 0; i < sav->sav_count; i++) { 3498 uint64_t pool; 3499 3500 vd = sav->sav_vdevs[i]; 3501 ASSERT(vd != NULL); 3502 3503 if (spa_l2cache_exists(vd->vdev_guid, &pool) && 3504 pool != 0ULL && l2arc_vdev_present(vd)) 3505 l2arc_remove_vdev(vd); 3506 } 3507} 3508 3509/* 3510 * Pool Creation 3511 */ 3512int 3513spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props, 3514 nvlist_t *zplprops) 3515{ 3516 spa_t *spa; 3517 char *altroot = NULL; 3518 vdev_t *rvd; 3519 dsl_pool_t *dp; 3520 dmu_tx_t *tx; 3521 int error = 0; 3522 uint64_t txg = TXG_INITIAL; 3523 nvlist_t **spares, **l2cache; 3524 uint_t nspares, nl2cache; 3525 uint64_t version, obj; 3526 boolean_t has_features; 3527 3528 /* 3529 * If this pool already exists, return failure. 3530 */ 3531 mutex_enter(&spa_namespace_lock); 3532 if (spa_lookup(pool) != NULL) { 3533 mutex_exit(&spa_namespace_lock); 3534 return (SET_ERROR(EEXIST)); 3535 } 3536 3537 /* 3538 * Allocate a new spa_t structure. 3539 */ 3540 (void) nvlist_lookup_string(props, 3541 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 3542 spa = spa_add(pool, NULL, altroot); 3543 spa_activate(spa, spa_mode_global); 3544 3545 if (props && (error = spa_prop_validate(spa, props))) { 3546 spa_deactivate(spa); 3547 spa_remove(spa); 3548 mutex_exit(&spa_namespace_lock); 3549 return (error); 3550 } 3551 3552 has_features = B_FALSE; 3553 for (nvpair_t *elem = nvlist_next_nvpair(props, NULL); 3554 elem != NULL; elem = nvlist_next_nvpair(props, elem)) { 3555 if (zpool_prop_feature(nvpair_name(elem))) 3556 has_features = B_TRUE; 3557 } 3558 3559 if (has_features || nvlist_lookup_uint64(props, 3560 zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) { 3561 version = SPA_VERSION; 3562 } 3563 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 3564 3565 spa->spa_first_txg = txg; 3566 spa->spa_uberblock.ub_txg = txg - 1; 3567 spa->spa_uberblock.ub_version = version; 3568 spa->spa_ubsync = spa->spa_uberblock; 3569 3570 /* 3571 * Create "The Godfather" zio to hold all async IOs 3572 */ 3573 spa->spa_async_zio_root = zio_root(spa, NULL, NULL, 3574 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_GODFATHER); 3575 3576 /* 3577 * Create the root vdev. 3578 */ 3579 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3580 3581 error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD); 3582 3583 ASSERT(error != 0 || rvd != NULL); 3584 ASSERT(error != 0 || spa->spa_root_vdev == rvd); 3585 3586 if (error == 0 && !zfs_allocatable_devs(nvroot)) 3587 error = SET_ERROR(EINVAL); 3588 3589 if (error == 0 && 3590 (error = vdev_create(rvd, txg, B_FALSE)) == 0 && 3591 (error = spa_validate_aux(spa, nvroot, txg, 3592 VDEV_ALLOC_ADD)) == 0) { 3593 for (int c = 0; c < rvd->vdev_children; c++) { 3594 vdev_ashift_optimize(rvd->vdev_child[c]); 3595 vdev_metaslab_set_size(rvd->vdev_child[c]); 3596 vdev_expand(rvd->vdev_child[c], txg); 3597 } 3598 } 3599 3600 spa_config_exit(spa, SCL_ALL, FTAG); 3601 3602 if (error != 0) { 3603 spa_unload(spa); 3604 spa_deactivate(spa); 3605 spa_remove(spa); 3606 mutex_exit(&spa_namespace_lock); 3607 return (error); 3608 } 3609 3610 /* 3611 * Get the list of spares, if specified. 3612 */ 3613 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 3614 &spares, &nspares) == 0) { 3615 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME, 3616 KM_SLEEP) == 0); 3617 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 3618 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 3619 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3620 spa_load_spares(spa); 3621 spa_config_exit(spa, SCL_ALL, FTAG); 3622 spa->spa_spares.sav_sync = B_TRUE; 3623 } 3624 3625 /* 3626 * Get the list of level 2 cache devices, if specified. 3627 */ 3628 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 3629 &l2cache, &nl2cache) == 0) { 3630 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 3631 NV_UNIQUE_NAME, KM_SLEEP) == 0); 3632 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 3633 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 3634 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3635 spa_load_l2cache(spa); 3636 spa_config_exit(spa, SCL_ALL, FTAG); 3637 spa->spa_l2cache.sav_sync = B_TRUE; 3638 } 3639 3640 spa->spa_is_initializing = B_TRUE; 3641 spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, txg); 3642 spa->spa_meta_objset = dp->dp_meta_objset; 3643 spa->spa_is_initializing = B_FALSE; 3644 3645 /* 3646 * Create DDTs (dedup tables). 3647 */ 3648 ddt_create(spa); 3649 3650 spa_update_dspace(spa); 3651 3652 tx = dmu_tx_create_assigned(dp, txg); 3653 3654 /* 3655 * Create the pool config object. 3656 */ 3657 spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset, 3658 DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE, 3659 DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx); 3660 3661 if (zap_add(spa->spa_meta_objset, 3662 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG, 3663 sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) { 3664 cmn_err(CE_PANIC, "failed to add pool config"); 3665 } 3666 3667 if (spa_version(spa) >= SPA_VERSION_FEATURES) 3668 spa_feature_create_zap_objects(spa, tx); 3669 3670 if (zap_add(spa->spa_meta_objset, 3671 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION, 3672 sizeof (uint64_t), 1, &version, tx) != 0) { 3673 cmn_err(CE_PANIC, "failed to add pool version"); 3674 } 3675 3676 /* Newly created pools with the right version are always deflated. */ 3677 if (version >= SPA_VERSION_RAIDZ_DEFLATE) { 3678 spa->spa_deflate = TRUE; 3679 if (zap_add(spa->spa_meta_objset, 3680 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 3681 sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) { 3682 cmn_err(CE_PANIC, "failed to add deflate"); 3683 } 3684 } 3685 3686 /* 3687 * Create the deferred-free bpobj. Turn off compression 3688 * because sync-to-convergence takes longer if the blocksize 3689 * keeps changing. 3690 */ 3691 obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx); 3692 dmu_object_set_compress(spa->spa_meta_objset, obj, 3693 ZIO_COMPRESS_OFF, tx); 3694 if (zap_add(spa->spa_meta_objset, 3695 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ, 3696 sizeof (uint64_t), 1, &obj, tx) != 0) { 3697 cmn_err(CE_PANIC, "failed to add bpobj"); 3698 } 3699 VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj, 3700 spa->spa_meta_objset, obj)); 3701 3702 /* 3703 * Create the pool's history object. 3704 */ 3705 if (version >= SPA_VERSION_ZPOOL_HISTORY) 3706 spa_history_create_obj(spa, tx); 3707 3708 /* 3709 * Set pool properties. 3710 */ 3711 spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS); 3712 spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION); 3713 spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE); 3714 spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND); 3715 3716 if (props != NULL) { 3717 spa_configfile_set(spa, props, B_FALSE); 3718 spa_sync_props(props, tx); 3719 } 3720 3721 dmu_tx_commit(tx); 3722 3723 spa->spa_sync_on = B_TRUE; 3724 txg_sync_start(spa->spa_dsl_pool); 3725 3726 /* 3727 * We explicitly wait for the first transaction to complete so that our 3728 * bean counters are appropriately updated. 3729 */ 3730 txg_wait_synced(spa->spa_dsl_pool, txg); 3731 3732 spa_config_sync(spa, B_FALSE, B_TRUE); 3733 3734 spa_history_log_version(spa, "create"); 3735 3736 spa->spa_minref = refcount_count(&spa->spa_refcount); 3737 3738 mutex_exit(&spa_namespace_lock); 3739 3740 return (0); 3741} 3742 3743#ifdef _KERNEL 3744#if defined(sun) 3745/* 3746 * Get the root pool information from the root disk, then import the root pool 3747 * during the system boot up time. 3748 */ 3749extern int vdev_disk_read_rootlabel(char *, char *, nvlist_t **); 3750 3751static nvlist_t * 3752spa_generate_rootconf(char *devpath, char *devid, uint64_t *guid) 3753{ 3754 nvlist_t *config; 3755 nvlist_t *nvtop, *nvroot; 3756 uint64_t pgid; 3757 3758 if (vdev_disk_read_rootlabel(devpath, devid, &config) != 0) 3759 return (NULL); 3760 3761 /* 3762 * Add this top-level vdev to the child array. 3763 */ 3764 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3765 &nvtop) == 0); 3766 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 3767 &pgid) == 0); 3768 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, guid) == 0); 3769 3770 /* 3771 * Put this pool's top-level vdevs into a root vdev. 3772 */ 3773 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 3774 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 3775 VDEV_TYPE_ROOT) == 0); 3776 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 3777 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 3778 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 3779 &nvtop, 1) == 0); 3780 3781 /* 3782 * Replace the existing vdev_tree with the new root vdev in 3783 * this pool's configuration (remove the old, add the new). 3784 */ 3785 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 3786 nvlist_free(nvroot); 3787 return (config); 3788} 3789 3790/* 3791 * Walk the vdev tree and see if we can find a device with "better" 3792 * configuration. A configuration is "better" if the label on that 3793 * device has a more recent txg. 3794 */ 3795static void 3796spa_alt_rootvdev(vdev_t *vd, vdev_t **avd, uint64_t *txg) 3797{ 3798 for (int c = 0; c < vd->vdev_children; c++) 3799 spa_alt_rootvdev(vd->vdev_child[c], avd, txg); 3800 3801 if (vd->vdev_ops->vdev_op_leaf) { 3802 nvlist_t *label; 3803 uint64_t label_txg; 3804 3805 if (vdev_disk_read_rootlabel(vd->vdev_physpath, vd->vdev_devid, 3806 &label) != 0) 3807 return; 3808 3809 VERIFY(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG, 3810 &label_txg) == 0); 3811 3812 /* 3813 * Do we have a better boot device? 3814 */ 3815 if (label_txg > *txg) { 3816 *txg = label_txg; 3817 *avd = vd; 3818 } 3819 nvlist_free(label); 3820 } 3821} 3822 3823/* 3824 * Import a root pool. 3825 * 3826 * For x86. devpath_list will consist of devid and/or physpath name of 3827 * the vdev (e.g. "id1,sd@SSEAGATE..." or "/pci@1f,0/ide@d/disk@0,0:a"). 3828 * The GRUB "findroot" command will return the vdev we should boot. 3829 * 3830 * For Sparc, devpath_list consists the physpath name of the booting device 3831 * no matter the rootpool is a single device pool or a mirrored pool. 3832 * e.g. 3833 * "/pci@1f,0/ide@d/disk@0,0:a" 3834 */ 3835int 3836spa_import_rootpool(char *devpath, char *devid) 3837{ 3838 spa_t *spa; 3839 vdev_t *rvd, *bvd, *avd = NULL; 3840 nvlist_t *config, *nvtop; 3841 uint64_t guid, txg; 3842 char *pname; 3843 int error; 3844 3845 /* 3846 * Read the label from the boot device and generate a configuration. 3847 */ 3848 config = spa_generate_rootconf(devpath, devid, &guid); 3849#if defined(_OBP) && defined(_KERNEL) 3850 if (config == NULL) { 3851 if (strstr(devpath, "/iscsi/ssd") != NULL) { 3852 /* iscsi boot */ 3853 get_iscsi_bootpath_phy(devpath); 3854 config = spa_generate_rootconf(devpath, devid, &guid); 3855 } 3856 } 3857#endif 3858 if (config == NULL) { 3859 cmn_err(CE_NOTE, "Cannot read the pool label from '%s'", 3860 devpath); 3861 return (SET_ERROR(EIO)); 3862 } 3863 3864 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 3865 &pname) == 0); 3866 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) == 0); 3867 3868 mutex_enter(&spa_namespace_lock); 3869 if ((spa = spa_lookup(pname)) != NULL) { 3870 /* 3871 * Remove the existing root pool from the namespace so that we 3872 * can replace it with the correct config we just read in. 3873 */ 3874 spa_remove(spa); 3875 } 3876 3877 spa = spa_add(pname, config, NULL); 3878 spa->spa_is_root = B_TRUE; 3879 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 3880 3881 /* 3882 * Build up a vdev tree based on the boot device's label config. 3883 */ 3884 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 3885 &nvtop) == 0); 3886 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3887 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 3888 VDEV_ALLOC_ROOTPOOL); 3889 spa_config_exit(spa, SCL_ALL, FTAG); 3890 if (error) { 3891 mutex_exit(&spa_namespace_lock); 3892 nvlist_free(config); 3893 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 3894 pname); 3895 return (error); 3896 } 3897 3898 /* 3899 * Get the boot vdev. 3900 */ 3901 if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) { 3902 cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu", 3903 (u_longlong_t)guid); 3904 error = SET_ERROR(ENOENT); 3905 goto out; 3906 } 3907 3908 /* 3909 * Determine if there is a better boot device. 3910 */ 3911 avd = bvd; 3912 spa_alt_rootvdev(rvd, &avd, &txg); 3913 if (avd != bvd) { 3914 cmn_err(CE_NOTE, "The boot device is 'degraded'. Please " 3915 "try booting from '%s'", avd->vdev_path); 3916 error = SET_ERROR(EINVAL); 3917 goto out; 3918 } 3919 3920 /* 3921 * If the boot device is part of a spare vdev then ensure that 3922 * we're booting off the active spare. 3923 */ 3924 if (bvd->vdev_parent->vdev_ops == &vdev_spare_ops && 3925 !bvd->vdev_isspare) { 3926 cmn_err(CE_NOTE, "The boot device is currently spared. Please " 3927 "try booting from '%s'", 3928 bvd->vdev_parent-> 3929 vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path); 3930 error = SET_ERROR(EINVAL); 3931 goto out; 3932 } 3933 3934 error = 0; 3935out: 3936 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 3937 vdev_free(rvd); 3938 spa_config_exit(spa, SCL_ALL, FTAG); 3939 mutex_exit(&spa_namespace_lock); 3940 3941 nvlist_free(config); 3942 return (error); 3943} 3944 3945#else 3946 3947extern int vdev_geom_read_pool_label(const char *name, nvlist_t ***configs, 3948 uint64_t *count); 3949 3950static nvlist_t * 3951spa_generate_rootconf(const char *name) 3952{ 3953 nvlist_t **configs, **tops; 3954 nvlist_t *config; 3955 nvlist_t *best_cfg, *nvtop, *nvroot; 3956 uint64_t *holes; 3957 uint64_t best_txg; 3958 uint64_t nchildren; 3959 uint64_t pgid; 3960 uint64_t count; 3961 uint64_t i; 3962 uint_t nholes; 3963 3964 if (vdev_geom_read_pool_label(name, &configs, &count) != 0) 3965 return (NULL); 3966 3967 ASSERT3U(count, !=, 0); 3968 best_txg = 0; 3969 for (i = 0; i < count; i++) { 3970 uint64_t txg; 3971 3972 VERIFY(nvlist_lookup_uint64(configs[i], ZPOOL_CONFIG_POOL_TXG, 3973 &txg) == 0); 3974 if (txg > best_txg) { 3975 best_txg = txg; 3976 best_cfg = configs[i]; 3977 } 3978 } 3979 3980 /* 3981 * Multi-vdev root pool configuration discovery is not supported yet. 3982 */ 3983 nchildren = 1; 3984 nvlist_lookup_uint64(best_cfg, ZPOOL_CONFIG_VDEV_CHILDREN, &nchildren); 3985 holes = NULL; 3986 nvlist_lookup_uint64_array(best_cfg, ZPOOL_CONFIG_HOLE_ARRAY, 3987 &holes, &nholes); 3988 3989 tops = kmem_zalloc(nchildren * sizeof(void *), KM_SLEEP); 3990 for (i = 0; i < nchildren; i++) { 3991 if (i >= count) 3992 break; 3993 if (configs[i] == NULL) 3994 continue; 3995 VERIFY(nvlist_lookup_nvlist(configs[i], ZPOOL_CONFIG_VDEV_TREE, 3996 &nvtop) == 0); 3997 nvlist_dup(nvtop, &tops[i], KM_SLEEP); 3998 } 3999 for (i = 0; holes != NULL && i < nholes; i++) { 4000 if (i >= nchildren) 4001 continue; 4002 if (tops[holes[i]] != NULL) 4003 continue; 4004 nvlist_alloc(&tops[holes[i]], NV_UNIQUE_NAME, KM_SLEEP); 4005 VERIFY(nvlist_add_string(tops[holes[i]], ZPOOL_CONFIG_TYPE, 4006 VDEV_TYPE_HOLE) == 0); 4007 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_ID, 4008 holes[i]) == 0); 4009 VERIFY(nvlist_add_uint64(tops[holes[i]], ZPOOL_CONFIG_GUID, 4010 0) == 0); 4011 } 4012 for (i = 0; i < nchildren; i++) { 4013 if (tops[i] != NULL) 4014 continue; 4015 nvlist_alloc(&tops[i], NV_UNIQUE_NAME, KM_SLEEP); 4016 VERIFY(nvlist_add_string(tops[i], ZPOOL_CONFIG_TYPE, 4017 VDEV_TYPE_MISSING) == 0); 4018 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_ID, 4019 i) == 0); 4020 VERIFY(nvlist_add_uint64(tops[i], ZPOOL_CONFIG_GUID, 4021 0) == 0); 4022 } 4023 4024 /* 4025 * Create pool config based on the best vdev config. 4026 */ 4027 nvlist_dup(best_cfg, &config, KM_SLEEP); 4028 4029 /* 4030 * Put this pool's top-level vdevs into a root vdev. 4031 */ 4032 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, 4033 &pgid) == 0); 4034 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 4035 VERIFY(nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, 4036 VDEV_TYPE_ROOT) == 0); 4037 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) == 0); 4038 VERIFY(nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, pgid) == 0); 4039 VERIFY(nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 4040 tops, nchildren) == 0); 4041 4042 /* 4043 * Replace the existing vdev_tree with the new root vdev in 4044 * this pool's configuration (remove the old, add the new). 4045 */ 4046 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 4047 4048 /* 4049 * Drop vdev config elements that should not be present at pool level. 4050 */ 4051 nvlist_remove(config, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64); 4052 nvlist_remove(config, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64); 4053 4054 for (i = 0; i < count; i++) 4055 nvlist_free(configs[i]); 4056 kmem_free(configs, count * sizeof(void *)); 4057 for (i = 0; i < nchildren; i++) 4058 nvlist_free(tops[i]); 4059 kmem_free(tops, nchildren * sizeof(void *)); 4060 nvlist_free(nvroot); 4061 return (config); 4062} 4063 4064int 4065spa_import_rootpool(const char *name) 4066{ 4067 spa_t *spa; 4068 vdev_t *rvd, *bvd, *avd = NULL; 4069 nvlist_t *config, *nvtop; 4070 uint64_t txg; 4071 char *pname; 4072 int error; 4073 4074 /* 4075 * Read the label from the boot device and generate a configuration. 4076 */ 4077 config = spa_generate_rootconf(name); 4078 4079 mutex_enter(&spa_namespace_lock); 4080 if (config != NULL) { 4081 VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, 4082 &pname) == 0 && strcmp(name, pname) == 0); 4083 VERIFY(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg) 4084 == 0); 4085 4086 if ((spa = spa_lookup(pname)) != NULL) { 4087 /* 4088 * Remove the existing root pool from the namespace so 4089 * that we can replace it with the correct config 4090 * we just read in. 4091 */ 4092 spa_remove(spa); 4093 } 4094 spa = spa_add(pname, config, NULL); 4095 4096 /* 4097 * Set spa_ubsync.ub_version as it can be used in vdev_alloc() 4098 * via spa_version(). 4099 */ 4100 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, 4101 &spa->spa_ubsync.ub_version) != 0) 4102 spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL; 4103 } else if ((spa = spa_lookup(name)) == NULL) { 4104 cmn_err(CE_NOTE, "Cannot find the pool label for '%s'", 4105 name); 4106 return (EIO); 4107 } else { 4108 VERIFY(nvlist_dup(spa->spa_config, &config, KM_SLEEP) == 0); 4109 } 4110 spa->spa_is_root = B_TRUE; 4111 spa->spa_import_flags = ZFS_IMPORT_VERBATIM; 4112 4113 /* 4114 * Build up a vdev tree based on the boot device's label config. 4115 */ 4116 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4117 &nvtop) == 0); 4118 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4119 error = spa_config_parse(spa, &rvd, nvtop, NULL, 0, 4120 VDEV_ALLOC_ROOTPOOL); 4121 spa_config_exit(spa, SCL_ALL, FTAG); 4122 if (error) { 4123 mutex_exit(&spa_namespace_lock); 4124 nvlist_free(config); 4125 cmn_err(CE_NOTE, "Can not parse the config for pool '%s'", 4126 pname); 4127 return (error); 4128 } 4129 4130 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4131 vdev_free(rvd); 4132 spa_config_exit(spa, SCL_ALL, FTAG); 4133 mutex_exit(&spa_namespace_lock); 4134 4135 nvlist_free(config); 4136 return (0); 4137} 4138 4139#endif /* sun */ 4140#endif 4141 4142/* 4143 * Import a non-root pool into the system. 4144 */ 4145int 4146spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags) 4147{ 4148 spa_t *spa; 4149 char *altroot = NULL; 4150 spa_load_state_t state = SPA_LOAD_IMPORT; 4151 zpool_rewind_policy_t policy; 4152 uint64_t mode = spa_mode_global; 4153 uint64_t readonly = B_FALSE; 4154 int error; 4155 nvlist_t *nvroot; 4156 nvlist_t **spares, **l2cache; 4157 uint_t nspares, nl2cache; 4158 4159 /* 4160 * If a pool with this name exists, return failure. 4161 */ 4162 mutex_enter(&spa_namespace_lock); 4163 if (spa_lookup(pool) != NULL) { 4164 mutex_exit(&spa_namespace_lock); 4165 return (SET_ERROR(EEXIST)); 4166 } 4167 4168 /* 4169 * Create and initialize the spa structure. 4170 */ 4171 (void) nvlist_lookup_string(props, 4172 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 4173 (void) nvlist_lookup_uint64(props, 4174 zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly); 4175 if (readonly) 4176 mode = FREAD; 4177 spa = spa_add(pool, config, altroot); 4178 spa->spa_import_flags = flags; 4179 4180 /* 4181 * Verbatim import - Take a pool and insert it into the namespace 4182 * as if it had been loaded at boot. 4183 */ 4184 if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) { 4185 if (props != NULL) 4186 spa_configfile_set(spa, props, B_FALSE); 4187 4188 spa_config_sync(spa, B_FALSE, B_TRUE); 4189 4190 mutex_exit(&spa_namespace_lock); 4191 return (0); 4192 } 4193 4194 spa_activate(spa, mode); 4195 4196 /* 4197 * Don't start async tasks until we know everything is healthy. 4198 */ 4199 spa_async_suspend(spa); 4200 4201 zpool_get_rewind_policy(config, &policy); 4202 if (policy.zrp_request & ZPOOL_DO_REWIND) 4203 state = SPA_LOAD_RECOVER; 4204 4205 /* 4206 * Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig 4207 * because the user-supplied config is actually the one to trust when 4208 * doing an import. 4209 */ 4210 if (state != SPA_LOAD_RECOVER) 4211 spa->spa_last_ubsync_txg = spa->spa_load_txg = 0; 4212 4213 error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg, 4214 policy.zrp_request); 4215 4216 /* 4217 * Propagate anything learned while loading the pool and pass it 4218 * back to caller (i.e. rewind info, missing devices, etc). 4219 */ 4220 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4221 spa->spa_load_info) == 0); 4222 4223 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4224 /* 4225 * Toss any existing sparelist, as it doesn't have any validity 4226 * anymore, and conflicts with spa_has_spare(). 4227 */ 4228 if (spa->spa_spares.sav_config) { 4229 nvlist_free(spa->spa_spares.sav_config); 4230 spa->spa_spares.sav_config = NULL; 4231 spa_load_spares(spa); 4232 } 4233 if (spa->spa_l2cache.sav_config) { 4234 nvlist_free(spa->spa_l2cache.sav_config); 4235 spa->spa_l2cache.sav_config = NULL; 4236 spa_load_l2cache(spa); 4237 } 4238 4239 VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, 4240 &nvroot) == 0); 4241 if (error == 0) 4242 error = spa_validate_aux(spa, nvroot, -1ULL, 4243 VDEV_ALLOC_SPARE); 4244 if (error == 0) 4245 error = spa_validate_aux(spa, nvroot, -1ULL, 4246 VDEV_ALLOC_L2CACHE); 4247 spa_config_exit(spa, SCL_ALL, FTAG); 4248 4249 if (props != NULL) 4250 spa_configfile_set(spa, props, B_FALSE); 4251 4252 if (error != 0 || (props && spa_writeable(spa) && 4253 (error = spa_prop_set(spa, props)))) { 4254 spa_unload(spa); 4255 spa_deactivate(spa); 4256 spa_remove(spa); 4257 mutex_exit(&spa_namespace_lock); 4258 return (error); 4259 } 4260 4261 spa_async_resume(spa); 4262 4263 /* 4264 * Override any spares and level 2 cache devices as specified by 4265 * the user, as these may have correct device names/devids, etc. 4266 */ 4267 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, 4268 &spares, &nspares) == 0) { 4269 if (spa->spa_spares.sav_config) 4270 VERIFY(nvlist_remove(spa->spa_spares.sav_config, 4271 ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0); 4272 else 4273 VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, 4274 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4275 VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config, 4276 ZPOOL_CONFIG_SPARES, spares, nspares) == 0); 4277 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4278 spa_load_spares(spa); 4279 spa_config_exit(spa, SCL_ALL, FTAG); 4280 spa->spa_spares.sav_sync = B_TRUE; 4281 } 4282 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, 4283 &l2cache, &nl2cache) == 0) { 4284 if (spa->spa_l2cache.sav_config) 4285 VERIFY(nvlist_remove(spa->spa_l2cache.sav_config, 4286 ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0); 4287 else 4288 VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config, 4289 NV_UNIQUE_NAME, KM_SLEEP) == 0); 4290 VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config, 4291 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0); 4292 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4293 spa_load_l2cache(spa); 4294 spa_config_exit(spa, SCL_ALL, FTAG); 4295 spa->spa_l2cache.sav_sync = B_TRUE; 4296 } 4297 4298 /* 4299 * Check for any removed devices. 4300 */ 4301 if (spa->spa_autoreplace) { 4302 spa_aux_check_removed(&spa->spa_spares); 4303 spa_aux_check_removed(&spa->spa_l2cache); 4304 } 4305 4306 if (spa_writeable(spa)) { 4307 /* 4308 * Update the config cache to include the newly-imported pool. 4309 */ 4310 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4311 } 4312 4313 /* 4314 * It's possible that the pool was expanded while it was exported. 4315 * We kick off an async task to handle this for us. 4316 */ 4317 spa_async_request(spa, SPA_ASYNC_AUTOEXPAND); 4318 4319 mutex_exit(&spa_namespace_lock); 4320 spa_history_log_version(spa, "import"); 4321 4322#ifdef __FreeBSD__ 4323#ifdef _KERNEL 4324 zvol_create_minors(pool); 4325#endif 4326#endif 4327 return (0); 4328} 4329 4330nvlist_t * 4331spa_tryimport(nvlist_t *tryconfig) 4332{ 4333 nvlist_t *config = NULL; 4334 char *poolname; 4335 spa_t *spa; 4336 uint64_t state; 4337 int error; 4338 4339 if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname)) 4340 return (NULL); 4341 4342 if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state)) 4343 return (NULL); 4344 4345 /* 4346 * Create and initialize the spa structure. 4347 */ 4348 mutex_enter(&spa_namespace_lock); 4349 spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL); 4350 spa_activate(spa, FREAD); 4351 4352 /* 4353 * Pass off the heavy lifting to spa_load(). 4354 * Pass TRUE for mosconfig because the user-supplied config 4355 * is actually the one to trust when doing an import. 4356 */ 4357 error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE); 4358 4359 /* 4360 * If 'tryconfig' was at least parsable, return the current config. 4361 */ 4362 if (spa->spa_root_vdev != NULL) { 4363 config = spa_config_generate(spa, NULL, -1ULL, B_TRUE); 4364 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, 4365 poolname) == 0); 4366 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 4367 state) == 0); 4368 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP, 4369 spa->spa_uberblock.ub_timestamp) == 0); 4370 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, 4371 spa->spa_load_info) == 0); 4372 4373 /* 4374 * If the bootfs property exists on this pool then we 4375 * copy it out so that external consumers can tell which 4376 * pools are bootable. 4377 */ 4378 if ((!error || error == EEXIST) && spa->spa_bootfs) { 4379 char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4380 4381 /* 4382 * We have to play games with the name since the 4383 * pool was opened as TRYIMPORT_NAME. 4384 */ 4385 if (dsl_dsobj_to_dsname(spa_name(spa), 4386 spa->spa_bootfs, tmpname) == 0) { 4387 char *cp; 4388 char *dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 4389 4390 cp = strchr(tmpname, '/'); 4391 if (cp == NULL) { 4392 (void) strlcpy(dsname, tmpname, 4393 MAXPATHLEN); 4394 } else { 4395 (void) snprintf(dsname, MAXPATHLEN, 4396 "%s/%s", poolname, ++cp); 4397 } 4398 VERIFY(nvlist_add_string(config, 4399 ZPOOL_CONFIG_BOOTFS, dsname) == 0); 4400 kmem_free(dsname, MAXPATHLEN); 4401 } 4402 kmem_free(tmpname, MAXPATHLEN); 4403 } 4404 4405 /* 4406 * Add the list of hot spares and level 2 cache devices. 4407 */ 4408 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 4409 spa_add_spares(spa, config); 4410 spa_add_l2cache(spa, config); 4411 spa_config_exit(spa, SCL_CONFIG, FTAG); 4412 } 4413 4414 spa_unload(spa); 4415 spa_deactivate(spa); 4416 spa_remove(spa); 4417 mutex_exit(&spa_namespace_lock); 4418 4419 return (config); 4420} 4421 4422/* 4423 * Pool export/destroy 4424 * 4425 * The act of destroying or exporting a pool is very simple. We make sure there 4426 * is no more pending I/O and any references to the pool are gone. Then, we 4427 * update the pool state and sync all the labels to disk, removing the 4428 * configuration from the cache afterwards. If the 'hardforce' flag is set, then 4429 * we don't sync the labels or remove the configuration cache. 4430 */ 4431static int 4432spa_export_common(char *pool, int new_state, nvlist_t **oldconfig, 4433 boolean_t force, boolean_t hardforce) 4434{ 4435 spa_t *spa; 4436 4437 if (oldconfig) 4438 *oldconfig = NULL; 4439 4440 if (!(spa_mode_global & FWRITE)) 4441 return (SET_ERROR(EROFS)); 4442 4443 mutex_enter(&spa_namespace_lock); 4444 if ((spa = spa_lookup(pool)) == NULL) { 4445 mutex_exit(&spa_namespace_lock); 4446 return (SET_ERROR(ENOENT)); 4447 } 4448 4449 /* 4450 * Put a hold on the pool, drop the namespace lock, stop async tasks, 4451 * reacquire the namespace lock, and see if we can export. 4452 */ 4453 spa_open_ref(spa, FTAG); 4454 mutex_exit(&spa_namespace_lock); 4455 spa_async_suspend(spa); 4456 mutex_enter(&spa_namespace_lock); 4457 spa_close(spa, FTAG); 4458 4459 /* 4460 * The pool will be in core if it's openable, 4461 * in which case we can modify its state. 4462 */ 4463 if (spa->spa_state != POOL_STATE_UNINITIALIZED && spa->spa_sync_on) { 4464 /* 4465 * Objsets may be open only because they're dirty, so we 4466 * have to force it to sync before checking spa_refcnt. 4467 */ 4468 txg_wait_synced(spa->spa_dsl_pool, 0); 4469 4470 /* 4471 * A pool cannot be exported or destroyed if there are active 4472 * references. If we are resetting a pool, allow references by 4473 * fault injection handlers. 4474 */ 4475 if (!spa_refcount_zero(spa) || 4476 (spa->spa_inject_ref != 0 && 4477 new_state != POOL_STATE_UNINITIALIZED)) { 4478 spa_async_resume(spa); 4479 mutex_exit(&spa_namespace_lock); 4480 return (SET_ERROR(EBUSY)); 4481 } 4482 4483 /* 4484 * A pool cannot be exported if it has an active shared spare. 4485 * This is to prevent other pools stealing the active spare 4486 * from an exported pool. At user's own will, such pool can 4487 * be forcedly exported. 4488 */ 4489 if (!force && new_state == POOL_STATE_EXPORTED && 4490 spa_has_active_shared_spare(spa)) { 4491 spa_async_resume(spa); 4492 mutex_exit(&spa_namespace_lock); 4493 return (SET_ERROR(EXDEV)); 4494 } 4495 4496 /* 4497 * We want this to be reflected on every label, 4498 * so mark them all dirty. spa_unload() will do the 4499 * final sync that pushes these changes out. 4500 */ 4501 if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) { 4502 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 4503 spa->spa_state = new_state; 4504 spa->spa_final_txg = spa_last_synced_txg(spa) + 4505 TXG_DEFER_SIZE + 1; 4506 vdev_config_dirty(spa->spa_root_vdev); 4507 spa_config_exit(spa, SCL_ALL, FTAG); 4508 } 4509 } 4510 4511 spa_event_notify(spa, NULL, ESC_ZFS_POOL_DESTROY); 4512 4513 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 4514 spa_unload(spa); 4515 spa_deactivate(spa); 4516 } 4517 4518 if (oldconfig && spa->spa_config) 4519 VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0); 4520 4521 if (new_state != POOL_STATE_UNINITIALIZED) { 4522 if (!hardforce) 4523 spa_config_sync(spa, B_TRUE, B_TRUE); 4524 spa_remove(spa); 4525 } 4526 mutex_exit(&spa_namespace_lock); 4527 4528 return (0); 4529} 4530 4531/* 4532 * Destroy a storage pool. 4533 */ 4534int 4535spa_destroy(char *pool) 4536{ 4537 return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL, 4538 B_FALSE, B_FALSE)); 4539} 4540 4541/* 4542 * Export a storage pool. 4543 */ 4544int 4545spa_export(char *pool, nvlist_t **oldconfig, boolean_t force, 4546 boolean_t hardforce) 4547{ 4548 return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig, 4549 force, hardforce)); 4550} 4551 4552/* 4553 * Similar to spa_export(), this unloads the spa_t without actually removing it 4554 * from the namespace in any way. 4555 */ 4556int 4557spa_reset(char *pool) 4558{ 4559 return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL, 4560 B_FALSE, B_FALSE)); 4561} 4562 4563/* 4564 * ========================================================================== 4565 * Device manipulation 4566 * ========================================================================== 4567 */ 4568 4569/* 4570 * Add a device to a storage pool. 4571 */ 4572int 4573spa_vdev_add(spa_t *spa, nvlist_t *nvroot) 4574{ 4575 uint64_t txg, id; 4576 int error; 4577 vdev_t *rvd = spa->spa_root_vdev; 4578 vdev_t *vd, *tvd; 4579 nvlist_t **spares, **l2cache; 4580 uint_t nspares, nl2cache; 4581 4582 ASSERT(spa_writeable(spa)); 4583 4584 txg = spa_vdev_enter(spa); 4585 4586 if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0, 4587 VDEV_ALLOC_ADD)) != 0) 4588 return (spa_vdev_exit(spa, NULL, txg, error)); 4589 4590 spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */ 4591 4592 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 4593 &nspares) != 0) 4594 nspares = 0; 4595 4596 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, 4597 &nl2cache) != 0) 4598 nl2cache = 0; 4599 4600 if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0) 4601 return (spa_vdev_exit(spa, vd, txg, EINVAL)); 4602 4603 if (vd->vdev_children != 0 && 4604 (error = vdev_create(vd, txg, B_FALSE)) != 0) 4605 return (spa_vdev_exit(spa, vd, txg, error)); 4606 4607 /* 4608 * We must validate the spares and l2cache devices after checking the 4609 * children. Otherwise, vdev_inuse() will blindly overwrite the spare. 4610 */ 4611 if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0) 4612 return (spa_vdev_exit(spa, vd, txg, error)); 4613 4614 /* 4615 * Transfer each new top-level vdev from vd to rvd. 4616 */ 4617 for (int c = 0; c < vd->vdev_children; c++) { 4618 4619 /* 4620 * Set the vdev id to the first hole, if one exists. 4621 */ 4622 for (id = 0; id < rvd->vdev_children; id++) { 4623 if (rvd->vdev_child[id]->vdev_ishole) { 4624 vdev_free(rvd->vdev_child[id]); 4625 break; 4626 } 4627 } 4628 tvd = vd->vdev_child[c]; 4629 vdev_remove_child(vd, tvd); 4630 tvd->vdev_id = id; 4631 vdev_add_child(rvd, tvd); 4632 vdev_config_dirty(tvd); 4633 } 4634 4635 if (nspares != 0) { 4636 spa_set_aux_vdevs(&spa->spa_spares, spares, nspares, 4637 ZPOOL_CONFIG_SPARES); 4638 spa_load_spares(spa); 4639 spa->spa_spares.sav_sync = B_TRUE; 4640 } 4641 4642 if (nl2cache != 0) { 4643 spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache, 4644 ZPOOL_CONFIG_L2CACHE); 4645 spa_load_l2cache(spa); 4646 spa->spa_l2cache.sav_sync = B_TRUE; 4647 } 4648 4649 /* 4650 * We have to be careful when adding new vdevs to an existing pool. 4651 * If other threads start allocating from these vdevs before we 4652 * sync the config cache, and we lose power, then upon reboot we may 4653 * fail to open the pool because there are DVAs that the config cache 4654 * can't translate. Therefore, we first add the vdevs without 4655 * initializing metaslabs; sync the config cache (via spa_vdev_exit()); 4656 * and then let spa_config_update() initialize the new metaslabs. 4657 * 4658 * spa_load() checks for added-but-not-initialized vdevs, so that 4659 * if we lose power at any point in this sequence, the remaining 4660 * steps will be completed the next time we load the pool. 4661 */ 4662 (void) spa_vdev_exit(spa, vd, txg, 0); 4663 4664 mutex_enter(&spa_namespace_lock); 4665 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 4666 mutex_exit(&spa_namespace_lock); 4667 4668 return (0); 4669} 4670 4671/* 4672 * Attach a device to a mirror. The arguments are the path to any device 4673 * in the mirror, and the nvroot for the new device. If the path specifies 4674 * a device that is not mirrored, we automatically insert the mirror vdev. 4675 * 4676 * If 'replacing' is specified, the new device is intended to replace the 4677 * existing device; in this case the two devices are made into their own 4678 * mirror using the 'replacing' vdev, which is functionally identical to 4679 * the mirror vdev (it actually reuses all the same ops) but has a few 4680 * extra rules: you can't attach to it after it's been created, and upon 4681 * completion of resilvering, the first disk (the one being replaced) 4682 * is automatically detached. 4683 */ 4684int 4685spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing) 4686{ 4687 uint64_t txg, dtl_max_txg; 4688 vdev_t *rvd = spa->spa_root_vdev; 4689 vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd; 4690 vdev_ops_t *pvops; 4691 char *oldvdpath, *newvdpath; 4692 int newvd_isspare; 4693 int error; 4694 4695 ASSERT(spa_writeable(spa)); 4696 4697 txg = spa_vdev_enter(spa); 4698 4699 oldvd = spa_lookup_by_guid(spa, guid, B_FALSE); 4700 4701 if (oldvd == NULL) 4702 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4703 4704 if (!oldvd->vdev_ops->vdev_op_leaf) 4705 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4706 4707 pvd = oldvd->vdev_parent; 4708 4709 if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0, 4710 VDEV_ALLOC_ATTACH)) != 0) 4711 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 4712 4713 if (newrootvd->vdev_children != 1) 4714 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4715 4716 newvd = newrootvd->vdev_child[0]; 4717 4718 if (!newvd->vdev_ops->vdev_op_leaf) 4719 return (spa_vdev_exit(spa, newrootvd, txg, EINVAL)); 4720 4721 if ((error = vdev_create(newrootvd, txg, replacing)) != 0) 4722 return (spa_vdev_exit(spa, newrootvd, txg, error)); 4723 4724 /* 4725 * Spares can't replace logs 4726 */ 4727 if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare) 4728 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4729 4730 if (!replacing) { 4731 /* 4732 * For attach, the only allowable parent is a mirror or the root 4733 * vdev. 4734 */ 4735 if (pvd->vdev_ops != &vdev_mirror_ops && 4736 pvd->vdev_ops != &vdev_root_ops) 4737 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4738 4739 pvops = &vdev_mirror_ops; 4740 } else { 4741 /* 4742 * Active hot spares can only be replaced by inactive hot 4743 * spares. 4744 */ 4745 if (pvd->vdev_ops == &vdev_spare_ops && 4746 oldvd->vdev_isspare && 4747 !spa_has_spare(spa, newvd->vdev_guid)) 4748 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4749 4750 /* 4751 * If the source is a hot spare, and the parent isn't already a 4752 * spare, then we want to create a new hot spare. Otherwise, we 4753 * want to create a replacing vdev. The user is not allowed to 4754 * attach to a spared vdev child unless the 'isspare' state is 4755 * the same (spare replaces spare, non-spare replaces 4756 * non-spare). 4757 */ 4758 if (pvd->vdev_ops == &vdev_replacing_ops && 4759 spa_version(spa) < SPA_VERSION_MULTI_REPLACE) { 4760 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4761 } else if (pvd->vdev_ops == &vdev_spare_ops && 4762 newvd->vdev_isspare != oldvd->vdev_isspare) { 4763 return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP)); 4764 } 4765 4766 if (newvd->vdev_isspare) 4767 pvops = &vdev_spare_ops; 4768 else 4769 pvops = &vdev_replacing_ops; 4770 } 4771 4772 /* 4773 * Make sure the new device is big enough. 4774 */ 4775 if (newvd->vdev_asize < vdev_get_min_asize(oldvd)) 4776 return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW)); 4777 4778 /* 4779 * The new device cannot have a higher alignment requirement 4780 * than the top-level vdev. 4781 */ 4782 if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift) 4783 return (spa_vdev_exit(spa, newrootvd, txg, EDOM)); 4784 4785 /* 4786 * If this is an in-place replacement, update oldvd's path and devid 4787 * to make it distinguishable from newvd, and unopenable from now on. 4788 */ 4789 if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) { 4790 spa_strfree(oldvd->vdev_path); 4791 oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5, 4792 KM_SLEEP); 4793 (void) sprintf(oldvd->vdev_path, "%s/%s", 4794 newvd->vdev_path, "old"); 4795 if (oldvd->vdev_devid != NULL) { 4796 spa_strfree(oldvd->vdev_devid); 4797 oldvd->vdev_devid = NULL; 4798 } 4799 } 4800 4801 /* mark the device being resilvered */ 4802 newvd->vdev_resilver_txg = txg; 4803 4804 /* 4805 * If the parent is not a mirror, or if we're replacing, insert the new 4806 * mirror/replacing/spare vdev above oldvd. 4807 */ 4808 if (pvd->vdev_ops != pvops) 4809 pvd = vdev_add_parent(oldvd, pvops); 4810 4811 ASSERT(pvd->vdev_top->vdev_parent == rvd); 4812 ASSERT(pvd->vdev_ops == pvops); 4813 ASSERT(oldvd->vdev_parent == pvd); 4814 4815 /* 4816 * Extract the new device from its root and add it to pvd. 4817 */ 4818 vdev_remove_child(newrootvd, newvd); 4819 newvd->vdev_id = pvd->vdev_children; 4820 newvd->vdev_crtxg = oldvd->vdev_crtxg; 4821 vdev_add_child(pvd, newvd); 4822 4823 tvd = newvd->vdev_top; 4824 ASSERT(pvd->vdev_top == tvd); 4825 ASSERT(tvd->vdev_parent == rvd); 4826 4827 vdev_config_dirty(tvd); 4828 4829 /* 4830 * Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account 4831 * for any dmu_sync-ed blocks. It will propagate upward when 4832 * spa_vdev_exit() calls vdev_dtl_reassess(). 4833 */ 4834 dtl_max_txg = txg + TXG_CONCURRENT_STATES; 4835 4836 vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL, 4837 dtl_max_txg - TXG_INITIAL); 4838 4839 if (newvd->vdev_isspare) { 4840 spa_spare_activate(newvd); 4841 spa_event_notify(spa, newvd, ESC_ZFS_VDEV_SPARE); 4842 } 4843 4844 oldvdpath = spa_strdup(oldvd->vdev_path); 4845 newvdpath = spa_strdup(newvd->vdev_path); 4846 newvd_isspare = newvd->vdev_isspare; 4847 4848 /* 4849 * Mark newvd's DTL dirty in this txg. 4850 */ 4851 vdev_dirty(tvd, VDD_DTL, newvd, txg); 4852 4853 /* 4854 * Schedule the resilver to restart in the future. We do this to 4855 * ensure that dmu_sync-ed blocks have been stitched into the 4856 * respective datasets. 4857 */ 4858 dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg); 4859 4860 /* 4861 * Commit the config 4862 */ 4863 (void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0); 4864 4865 spa_history_log_internal(spa, "vdev attach", NULL, 4866 "%s vdev=%s %s vdev=%s", 4867 replacing && newvd_isspare ? "spare in" : 4868 replacing ? "replace" : "attach", newvdpath, 4869 replacing ? "for" : "to", oldvdpath); 4870 4871 spa_strfree(oldvdpath); 4872 spa_strfree(newvdpath); 4873 4874 if (spa->spa_bootfs) 4875 spa_event_notify(spa, newvd, ESC_ZFS_BOOTFS_VDEV_ATTACH); 4876 4877 return (0); 4878} 4879 4880/* 4881 * Detach a device from a mirror or replacing vdev. 4882 * 4883 * If 'replace_done' is specified, only detach if the parent 4884 * is a replacing vdev. 4885 */ 4886int 4887spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) 4888{ 4889 uint64_t txg; 4890 int error; 4891 vdev_t *rvd = spa->spa_root_vdev; 4892 vdev_t *vd, *pvd, *cvd, *tvd; 4893 boolean_t unspare = B_FALSE; 4894 uint64_t unspare_guid = 0; 4895 char *vdpath; 4896 4897 ASSERT(spa_writeable(spa)); 4898 4899 txg = spa_vdev_enter(spa); 4900 4901 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 4902 4903 if (vd == NULL) 4904 return (spa_vdev_exit(spa, NULL, txg, ENODEV)); 4905 4906 if (!vd->vdev_ops->vdev_op_leaf) 4907 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4908 4909 pvd = vd->vdev_parent; 4910 4911 /* 4912 * If the parent/child relationship is not as expected, don't do it. 4913 * Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing 4914 * vdev that's replacing B with C. The user's intent in replacing 4915 * is to go from M(A,B) to M(A,C). If the user decides to cancel 4916 * the replace by detaching C, the expected behavior is to end up 4917 * M(A,B). But suppose that right after deciding to detach C, 4918 * the replacement of B completes. We would have M(A,C), and then 4919 * ask to detach C, which would leave us with just A -- not what 4920 * the user wanted. To prevent this, we make sure that the 4921 * parent/child relationship hasn't changed -- in this example, 4922 * that C's parent is still the replacing vdev R. 4923 */ 4924 if (pvd->vdev_guid != pguid && pguid != 0) 4925 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4926 4927 /* 4928 * Only 'replacing' or 'spare' vdevs can be replaced. 4929 */ 4930 if (replace_done && pvd->vdev_ops != &vdev_replacing_ops && 4931 pvd->vdev_ops != &vdev_spare_ops) 4932 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4933 4934 ASSERT(pvd->vdev_ops != &vdev_spare_ops || 4935 spa_version(spa) >= SPA_VERSION_SPARES); 4936 4937 /* 4938 * Only mirror, replacing, and spare vdevs support detach. 4939 */ 4940 if (pvd->vdev_ops != &vdev_replacing_ops && 4941 pvd->vdev_ops != &vdev_mirror_ops && 4942 pvd->vdev_ops != &vdev_spare_ops) 4943 return (spa_vdev_exit(spa, NULL, txg, ENOTSUP)); 4944 4945 /* 4946 * If this device has the only valid copy of some data, 4947 * we cannot safely detach it. 4948 */ 4949 if (vdev_dtl_required(vd)) 4950 return (spa_vdev_exit(spa, NULL, txg, EBUSY)); 4951 4952 ASSERT(pvd->vdev_children >= 2); 4953 4954 /* 4955 * If we are detaching the second disk from a replacing vdev, then 4956 * check to see if we changed the original vdev's path to have "/old" 4957 * at the end in spa_vdev_attach(). If so, undo that change now. 4958 */ 4959 if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 && 4960 vd->vdev_path != NULL) { 4961 size_t len = strlen(vd->vdev_path); 4962 4963 for (int c = 0; c < pvd->vdev_children; c++) { 4964 cvd = pvd->vdev_child[c]; 4965 4966 if (cvd == vd || cvd->vdev_path == NULL) 4967 continue; 4968 4969 if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 && 4970 strcmp(cvd->vdev_path + len, "/old") == 0) { 4971 spa_strfree(cvd->vdev_path); 4972 cvd->vdev_path = spa_strdup(vd->vdev_path); 4973 break; 4974 } 4975 } 4976 } 4977 4978 /* 4979 * If we are detaching the original disk from a spare, then it implies 4980 * that the spare should become a real disk, and be removed from the 4981 * active spare list for the pool. 4982 */ 4983 if (pvd->vdev_ops == &vdev_spare_ops && 4984 vd->vdev_id == 0 && 4985 pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare) 4986 unspare = B_TRUE; 4987 4988 /* 4989 * Erase the disk labels so the disk can be used for other things. 4990 * This must be done after all other error cases are handled, 4991 * but before we disembowel vd (so we can still do I/O to it). 4992 * But if we can't do it, don't treat the error as fatal -- 4993 * it may be that the unwritability of the disk is the reason 4994 * it's being detached! 4995 */ 4996 error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 4997 4998 /* 4999 * Remove vd from its parent and compact the parent's children. 5000 */ 5001 vdev_remove_child(pvd, vd); 5002 vdev_compact_children(pvd); 5003 5004 /* 5005 * Remember one of the remaining children so we can get tvd below. 5006 */ 5007 cvd = pvd->vdev_child[pvd->vdev_children - 1]; 5008 5009 /* 5010 * If we need to remove the remaining child from the list of hot spares, 5011 * do it now, marking the vdev as no longer a spare in the process. 5012 * We must do this before vdev_remove_parent(), because that can 5013 * change the GUID if it creates a new toplevel GUID. For a similar 5014 * reason, we must remove the spare now, in the same txg as the detach; 5015 * otherwise someone could attach a new sibling, change the GUID, and 5016 * the subsequent attempt to spa_vdev_remove(unspare_guid) would fail. 5017 */ 5018 if (unspare) { 5019 ASSERT(cvd->vdev_isspare); 5020 spa_spare_remove(cvd); 5021 unspare_guid = cvd->vdev_guid; 5022 (void) spa_vdev_remove(spa, unspare_guid, B_TRUE); 5023 cvd->vdev_unspare = B_TRUE; 5024 } 5025 5026 /* 5027 * If the parent mirror/replacing vdev only has one child, 5028 * the parent is no longer needed. Remove it from the tree. 5029 */ 5030 if (pvd->vdev_children == 1) { 5031 if (pvd->vdev_ops == &vdev_spare_ops) 5032 cvd->vdev_unspare = B_FALSE; 5033 vdev_remove_parent(cvd); 5034 } 5035 5036 5037 /* 5038 * We don't set tvd until now because the parent we just removed 5039 * may have been the previous top-level vdev. 5040 */ 5041 tvd = cvd->vdev_top; 5042 ASSERT(tvd->vdev_parent == rvd); 5043 5044 /* 5045 * Reevaluate the parent vdev state. 5046 */ 5047 vdev_propagate_state(cvd); 5048 5049 /* 5050 * If the 'autoexpand' property is set on the pool then automatically 5051 * try to expand the size of the pool. For example if the device we 5052 * just detached was smaller than the others, it may be possible to 5053 * add metaslabs (i.e. grow the pool). We need to reopen the vdev 5054 * first so that we can obtain the updated sizes of the leaf vdevs. 5055 */ 5056 if (spa->spa_autoexpand) { 5057 vdev_reopen(tvd); 5058 vdev_expand(tvd, txg); 5059 } 5060 5061 vdev_config_dirty(tvd); 5062 5063 /* 5064 * Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that 5065 * vd->vdev_detached is set and free vd's DTL object in syncing context. 5066 * But first make sure we're not on any *other* txg's DTL list, to 5067 * prevent vd from being accessed after it's freed. 5068 */ 5069 vdpath = spa_strdup(vd->vdev_path); 5070 for (int t = 0; t < TXG_SIZE; t++) 5071 (void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t); 5072 vd->vdev_detached = B_TRUE; 5073 vdev_dirty(tvd, VDD_DTL, vd, txg); 5074 5075 spa_event_notify(spa, vd, ESC_ZFS_VDEV_REMOVE); 5076 5077 /* hang on to the spa before we release the lock */ 5078 spa_open_ref(spa, FTAG); 5079 5080 error = spa_vdev_exit(spa, vd, txg, 0); 5081 5082 spa_history_log_internal(spa, "detach", NULL, 5083 "vdev=%s", vdpath); 5084 spa_strfree(vdpath); 5085 5086 /* 5087 * If this was the removal of the original device in a hot spare vdev, 5088 * then we want to go through and remove the device from the hot spare 5089 * list of every other pool. 5090 */ 5091 if (unspare) { 5092 spa_t *altspa = NULL; 5093 5094 mutex_enter(&spa_namespace_lock); 5095 while ((altspa = spa_next(altspa)) != NULL) { 5096 if (altspa->spa_state != POOL_STATE_ACTIVE || 5097 altspa == spa) 5098 continue; 5099 5100 spa_open_ref(altspa, FTAG); 5101 mutex_exit(&spa_namespace_lock); 5102 (void) spa_vdev_remove(altspa, unspare_guid, B_TRUE); 5103 mutex_enter(&spa_namespace_lock); 5104 spa_close(altspa, FTAG); 5105 } 5106 mutex_exit(&spa_namespace_lock); 5107 5108 /* search the rest of the vdevs for spares to remove */ 5109 spa_vdev_resilver_done(spa); 5110 } 5111 5112 /* all done with the spa; OK to release */ 5113 mutex_enter(&spa_namespace_lock); 5114 spa_close(spa, FTAG); 5115 mutex_exit(&spa_namespace_lock); 5116 5117 return (error); 5118} 5119 5120/* 5121 * Split a set of devices from their mirrors, and create a new pool from them. 5122 */ 5123int 5124spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config, 5125 nvlist_t *props, boolean_t exp) 5126{ 5127 int error = 0; 5128 uint64_t txg, *glist; 5129 spa_t *newspa; 5130 uint_t c, children, lastlog; 5131 nvlist_t **child, *nvl, *tmp; 5132 dmu_tx_t *tx; 5133 char *altroot = NULL; 5134 vdev_t *rvd, **vml = NULL; /* vdev modify list */ 5135 boolean_t activate_slog; 5136 5137 ASSERT(spa_writeable(spa)); 5138 5139 txg = spa_vdev_enter(spa); 5140 5141 /* clear the log and flush everything up to now */ 5142 activate_slog = spa_passivate_log(spa); 5143 (void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5144 error = spa_offline_log(spa); 5145 txg = spa_vdev_config_enter(spa); 5146 5147 if (activate_slog) 5148 spa_activate_log(spa); 5149 5150 if (error != 0) 5151 return (spa_vdev_exit(spa, NULL, txg, error)); 5152 5153 /* check new spa name before going any further */ 5154 if (spa_lookup(newname) != NULL) 5155 return (spa_vdev_exit(spa, NULL, txg, EEXIST)); 5156 5157 /* 5158 * scan through all the children to ensure they're all mirrors 5159 */ 5160 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 || 5161 nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 5162 &children) != 0) 5163 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5164 5165 /* first, check to ensure we've got the right child count */ 5166 rvd = spa->spa_root_vdev; 5167 lastlog = 0; 5168 for (c = 0; c < rvd->vdev_children; c++) { 5169 vdev_t *vd = rvd->vdev_child[c]; 5170 5171 /* don't count the holes & logs as children */ 5172 if (vd->vdev_islog || vd->vdev_ishole) { 5173 if (lastlog == 0) 5174 lastlog = c; 5175 continue; 5176 } 5177 5178 lastlog = 0; 5179 } 5180 if (children != (lastlog != 0 ? lastlog : rvd->vdev_children)) 5181 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5182 5183 /* next, ensure no spare or cache devices are part of the split */ 5184 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 || 5185 nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0) 5186 return (spa_vdev_exit(spa, NULL, txg, EINVAL)); 5187 5188 vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP); 5189 glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP); 5190 5191 /* then, loop over each vdev and validate it */ 5192 for (c = 0; c < children; c++) { 5193 uint64_t is_hole = 0; 5194 5195 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE, 5196 &is_hole); 5197 5198 if (is_hole != 0) { 5199 if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole || 5200 spa->spa_root_vdev->vdev_child[c]->vdev_islog) { 5201 continue; 5202 } else { 5203 error = SET_ERROR(EINVAL); 5204 break; 5205 } 5206 } 5207 5208 /* which disk is going to be split? */ 5209 if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID, 5210 &glist[c]) != 0) { 5211 error = SET_ERROR(EINVAL); 5212 break; 5213 } 5214 5215 /* look it up in the spa */ 5216 vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE); 5217 if (vml[c] == NULL) { 5218 error = SET_ERROR(ENODEV); 5219 break; 5220 } 5221 5222 /* make sure there's nothing stopping the split */ 5223 if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops || 5224 vml[c]->vdev_islog || 5225 vml[c]->vdev_ishole || 5226 vml[c]->vdev_isspare || 5227 vml[c]->vdev_isl2cache || 5228 !vdev_writeable(vml[c]) || 5229 vml[c]->vdev_children != 0 || 5230 vml[c]->vdev_state != VDEV_STATE_HEALTHY || 5231 c != spa->spa_root_vdev->vdev_child[c]->vdev_id) { 5232 error = SET_ERROR(EINVAL); 5233 break; 5234 } 5235 5236 if (vdev_dtl_required(vml[c])) { 5237 error = SET_ERROR(EBUSY); 5238 break; 5239 } 5240 5241 /* we need certain info from the top level */ 5242 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY, 5243 vml[c]->vdev_top->vdev_ms_array) == 0); 5244 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT, 5245 vml[c]->vdev_top->vdev_ms_shift) == 0); 5246 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE, 5247 vml[c]->vdev_top->vdev_asize) == 0); 5248 VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT, 5249 vml[c]->vdev_top->vdev_ashift) == 0); 5250 } 5251 5252 if (error != 0) { 5253 kmem_free(vml, children * sizeof (vdev_t *)); 5254 kmem_free(glist, children * sizeof (uint64_t)); 5255 return (spa_vdev_exit(spa, NULL, txg, error)); 5256 } 5257 5258 /* stop writers from using the disks */ 5259 for (c = 0; c < children; c++) { 5260 if (vml[c] != NULL) 5261 vml[c]->vdev_offline = B_TRUE; 5262 } 5263 vdev_reopen(spa->spa_root_vdev); 5264 5265 /* 5266 * Temporarily record the splitting vdevs in the spa config. This 5267 * will disappear once the config is regenerated. 5268 */ 5269 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5270 VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST, 5271 glist, children) == 0); 5272 kmem_free(glist, children * sizeof (uint64_t)); 5273 5274 mutex_enter(&spa->spa_props_lock); 5275 VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT, 5276 nvl) == 0); 5277 mutex_exit(&spa->spa_props_lock); 5278 spa->spa_config_splitting = nvl; 5279 vdev_config_dirty(spa->spa_root_vdev); 5280 5281 /* configure and create the new pool */ 5282 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0); 5283 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, 5284 exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0); 5285 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 5286 spa_version(spa)) == 0); 5287 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, 5288 spa->spa_config_txg) == 0); 5289 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, 5290 spa_generate_guid(NULL)) == 0); 5291 (void) nvlist_lookup_string(props, 5292 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot); 5293 5294 /* add the new pool to the namespace */ 5295 newspa = spa_add(newname, config, altroot); 5296 newspa->spa_config_txg = spa->spa_config_txg; 5297 spa_set_log_state(newspa, SPA_LOG_CLEAR); 5298 5299 /* release the spa config lock, retaining the namespace lock */ 5300 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5301 5302 if (zio_injection_enabled) 5303 zio_handle_panic_injection(spa, FTAG, 1); 5304 5305 spa_activate(newspa, spa_mode_global); 5306 spa_async_suspend(newspa); 5307 5308#ifndef sun 5309 /* mark that we are creating new spa by splitting */ 5310 newspa->spa_splitting_newspa = B_TRUE; 5311#endif 5312 /* create the new pool from the disks of the original pool */ 5313 error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE); 5314#ifndef sun 5315 newspa->spa_splitting_newspa = B_FALSE; 5316#endif 5317 if (error) 5318 goto out; 5319 5320 /* if that worked, generate a real config for the new pool */ 5321 if (newspa->spa_root_vdev != NULL) { 5322 VERIFY(nvlist_alloc(&newspa->spa_config_splitting, 5323 NV_UNIQUE_NAME, KM_SLEEP) == 0); 5324 VERIFY(nvlist_add_uint64(newspa->spa_config_splitting, 5325 ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0); 5326 spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL, 5327 B_TRUE)); 5328 } 5329 5330 /* set the props */ 5331 if (props != NULL) { 5332 spa_configfile_set(newspa, props, B_FALSE); 5333 error = spa_prop_set(newspa, props); 5334 if (error) 5335 goto out; 5336 } 5337 5338 /* flush everything */ 5339 txg = spa_vdev_config_enter(newspa); 5340 vdev_config_dirty(newspa->spa_root_vdev); 5341 (void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG); 5342 5343 if (zio_injection_enabled) 5344 zio_handle_panic_injection(spa, FTAG, 2); 5345 5346 spa_async_resume(newspa); 5347 5348 /* finally, update the original pool's config */ 5349 txg = spa_vdev_config_enter(spa); 5350 tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 5351 error = dmu_tx_assign(tx, TXG_WAIT); 5352 if (error != 0) 5353 dmu_tx_abort(tx); 5354 for (c = 0; c < children; c++) { 5355 if (vml[c] != NULL) { 5356 vdev_split(vml[c]); 5357 if (error == 0) 5358 spa_history_log_internal(spa, "detach", tx, 5359 "vdev=%s", vml[c]->vdev_path); 5360 vdev_free(vml[c]); 5361 } 5362 } 5363 vdev_config_dirty(spa->spa_root_vdev); 5364 spa->spa_config_splitting = NULL; 5365 nvlist_free(nvl); 5366 if (error == 0) 5367 dmu_tx_commit(tx); 5368 (void) spa_vdev_exit(spa, NULL, txg, 0); 5369 5370 if (zio_injection_enabled) 5371 zio_handle_panic_injection(spa, FTAG, 3); 5372 5373 /* split is complete; log a history record */ 5374 spa_history_log_internal(newspa, "split", NULL, 5375 "from pool %s", spa_name(spa)); 5376 5377 kmem_free(vml, children * sizeof (vdev_t *)); 5378 5379 /* if we're not going to mount the filesystems in userland, export */ 5380 if (exp) 5381 error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL, 5382 B_FALSE, B_FALSE); 5383 5384 return (error); 5385 5386out: 5387 spa_unload(newspa); 5388 spa_deactivate(newspa); 5389 spa_remove(newspa); 5390 5391 txg = spa_vdev_config_enter(spa); 5392 5393 /* re-online all offlined disks */ 5394 for (c = 0; c < children; c++) { 5395 if (vml[c] != NULL) 5396 vml[c]->vdev_offline = B_FALSE; 5397 } 5398 vdev_reopen(spa->spa_root_vdev); 5399 5400 nvlist_free(spa->spa_config_splitting); 5401 spa->spa_config_splitting = NULL; 5402 (void) spa_vdev_exit(spa, NULL, txg, error); 5403 5404 kmem_free(vml, children * sizeof (vdev_t *)); 5405 return (error); 5406} 5407 5408static nvlist_t * 5409spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 5410{ 5411 for (int i = 0; i < count; i++) { 5412 uint64_t guid; 5413 5414 VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID, 5415 &guid) == 0); 5416 5417 if (guid == target_guid) 5418 return (nvpp[i]); 5419 } 5420 5421 return (NULL); 5422} 5423 5424static void 5425spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 5426 nvlist_t *dev_to_remove) 5427{ 5428 nvlist_t **newdev = NULL; 5429 5430 if (count > 1) 5431 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 5432 5433 for (int i = 0, j = 0; i < count; i++) { 5434 if (dev[i] == dev_to_remove) 5435 continue; 5436 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 5437 } 5438 5439 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 5440 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 5441 5442 for (int i = 0; i < count - 1; i++) 5443 nvlist_free(newdev[i]); 5444 5445 if (count > 1) 5446 kmem_free(newdev, (count - 1) * sizeof (void *)); 5447} 5448 5449/* 5450 * Evacuate the device. 5451 */ 5452static int 5453spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd) 5454{ 5455 uint64_t txg; 5456 int error = 0; 5457 5458 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5459 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5460 ASSERT(vd == vd->vdev_top); 5461 5462 /* 5463 * Evacuate the device. We don't hold the config lock as writer 5464 * since we need to do I/O but we do keep the 5465 * spa_namespace_lock held. Once this completes the device 5466 * should no longer have any blocks allocated on it. 5467 */ 5468 if (vd->vdev_islog) { 5469 if (vd->vdev_stat.vs_alloc != 0) 5470 error = spa_offline_log(spa); 5471 } else { 5472 error = SET_ERROR(ENOTSUP); 5473 } 5474 5475 if (error) 5476 return (error); 5477 5478 /* 5479 * The evacuation succeeded. Remove any remaining MOS metadata 5480 * associated with this vdev, and wait for these changes to sync. 5481 */ 5482 ASSERT0(vd->vdev_stat.vs_alloc); 5483 txg = spa_vdev_config_enter(spa); 5484 vd->vdev_removing = B_TRUE; 5485 vdev_dirty_leaves(vd, VDD_DTL, txg); 5486 vdev_config_dirty(vd); 5487 spa_vdev_config_exit(spa, NULL, txg, 0, FTAG); 5488 5489 return (0); 5490} 5491 5492/* 5493 * Complete the removal by cleaning up the namespace. 5494 */ 5495static void 5496spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd) 5497{ 5498 vdev_t *rvd = spa->spa_root_vdev; 5499 uint64_t id = vd->vdev_id; 5500 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 5501 5502 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 5503 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 5504 ASSERT(vd == vd->vdev_top); 5505 5506 /* 5507 * Only remove any devices which are empty. 5508 */ 5509 if (vd->vdev_stat.vs_alloc != 0) 5510 return; 5511 5512 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 5513 5514 if (list_link_active(&vd->vdev_state_dirty_node)) 5515 vdev_state_clean(vd); 5516 if (list_link_active(&vd->vdev_config_dirty_node)) 5517 vdev_config_clean(vd); 5518 5519 vdev_free(vd); 5520 5521 if (last_vdev) { 5522 vdev_compact_children(rvd); 5523 } else { 5524 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 5525 vdev_add_child(rvd, vd); 5526 } 5527 vdev_config_dirty(rvd); 5528 5529 /* 5530 * Reassess the health of our root vdev. 5531 */ 5532 vdev_reopen(rvd); 5533} 5534 5535/* 5536 * Remove a device from the pool - 5537 * 5538 * Removing a device from the vdev namespace requires several steps 5539 * and can take a significant amount of time. As a result we use 5540 * the spa_vdev_config_[enter/exit] functions which allow us to 5541 * grab and release the spa_config_lock while still holding the namespace 5542 * lock. During each step the configuration is synced out. 5543 * 5544 * Currently, this supports removing only hot spares, slogs, and level 2 ARC 5545 * devices. 5546 */ 5547int 5548spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 5549{ 5550 vdev_t *vd; 5551 metaslab_group_t *mg; 5552 nvlist_t **spares, **l2cache, *nv; 5553 uint64_t txg = 0; 5554 uint_t nspares, nl2cache; 5555 int error = 0; 5556 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 5557 5558 ASSERT(spa_writeable(spa)); 5559 5560 if (!locked) 5561 txg = spa_vdev_enter(spa); 5562 5563 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 5564 5565 if (spa->spa_spares.sav_vdevs != NULL && 5566 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 5567 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 5568 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 5569 /* 5570 * Only remove the hot spare if it's not currently in use 5571 * in this pool. 5572 */ 5573 if (vd == NULL || unspare) { 5574 spa_vdev_remove_aux(spa->spa_spares.sav_config, 5575 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 5576 spa_load_spares(spa); 5577 spa->spa_spares.sav_sync = B_TRUE; 5578 } else { 5579 error = SET_ERROR(EBUSY); 5580 } 5581 } else if (spa->spa_l2cache.sav_vdevs != NULL && 5582 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 5583 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 5584 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 5585 /* 5586 * Cache devices can always be removed. 5587 */ 5588 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 5589 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 5590 spa_load_l2cache(spa); 5591 spa->spa_l2cache.sav_sync = B_TRUE; 5592 } else if (vd != NULL && vd->vdev_islog) { 5593 ASSERT(!locked); 5594 ASSERT(vd == vd->vdev_top); 5595 5596 mg = vd->vdev_mg; 5597 5598 /* 5599 * Stop allocating from this vdev. 5600 */ 5601 metaslab_group_passivate(mg); 5602 5603 /* 5604 * Wait for the youngest allocations and frees to sync, 5605 * and then wait for the deferral of those frees to finish. 5606 */ 5607 spa_vdev_config_exit(spa, NULL, 5608 txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 5609 5610 /* 5611 * Attempt to evacuate the vdev. 5612 */ 5613 error = spa_vdev_remove_evacuate(spa, vd); 5614 5615 txg = spa_vdev_config_enter(spa); 5616 5617 /* 5618 * If we couldn't evacuate the vdev, unwind. 5619 */ 5620 if (error) { 5621 metaslab_group_activate(mg); 5622 return (spa_vdev_exit(spa, NULL, txg, error)); 5623 } 5624 5625 /* 5626 * Clean up the vdev namespace. 5627 */ 5628 spa_vdev_remove_from_namespace(spa, vd); 5629 5630 } else if (vd != NULL) { 5631 /* 5632 * Normal vdevs cannot be removed (yet). 5633 */ 5634 error = SET_ERROR(ENOTSUP); 5635 } else { 5636 /* 5637 * There is no vdev of any kind with the specified guid. 5638 */ 5639 error = SET_ERROR(ENOENT); 5640 } 5641 5642 if (!locked) 5643 return (spa_vdev_exit(spa, NULL, txg, error)); 5644 5645 return (error); 5646} 5647 5648/* 5649 * Find any device that's done replacing, or a vdev marked 'unspare' that's 5650 * currently spared, so we can detach it. 5651 */ 5652static vdev_t * 5653spa_vdev_resilver_done_hunt(vdev_t *vd) 5654{ 5655 vdev_t *newvd, *oldvd; 5656 5657 for (int c = 0; c < vd->vdev_children; c++) { 5658 oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]); 5659 if (oldvd != NULL) 5660 return (oldvd); 5661 } 5662 5663 /* 5664 * Check for a completed replacement. We always consider the first 5665 * vdev in the list to be the oldest vdev, and the last one to be 5666 * the newest (see spa_vdev_attach() for how that works). In 5667 * the case where the newest vdev is faulted, we will not automatically 5668 * remove it after a resilver completes. This is OK as it will require 5669 * user intervention to determine which disk the admin wishes to keep. 5670 */ 5671 if (vd->vdev_ops == &vdev_replacing_ops) { 5672 ASSERT(vd->vdev_children > 1); 5673 5674 newvd = vd->vdev_child[vd->vdev_children - 1]; 5675 oldvd = vd->vdev_child[0]; 5676 5677 if (vdev_dtl_empty(newvd, DTL_MISSING) && 5678 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5679 !vdev_dtl_required(oldvd)) 5680 return (oldvd); 5681 } 5682 5683 /* 5684 * Check for a completed resilver with the 'unspare' flag set. 5685 */ 5686 if (vd->vdev_ops == &vdev_spare_ops) { 5687 vdev_t *first = vd->vdev_child[0]; 5688 vdev_t *last = vd->vdev_child[vd->vdev_children - 1]; 5689 5690 if (last->vdev_unspare) { 5691 oldvd = first; 5692 newvd = last; 5693 } else if (first->vdev_unspare) { 5694 oldvd = last; 5695 newvd = first; 5696 } else { 5697 oldvd = NULL; 5698 } 5699 5700 if (oldvd != NULL && 5701 vdev_dtl_empty(newvd, DTL_MISSING) && 5702 vdev_dtl_empty(newvd, DTL_OUTAGE) && 5703 !vdev_dtl_required(oldvd)) 5704 return (oldvd); 5705 5706 /* 5707 * If there are more than two spares attached to a disk, 5708 * and those spares are not required, then we want to 5709 * attempt to free them up now so that they can be used 5710 * by other pools. Once we're back down to a single 5711 * disk+spare, we stop removing them. 5712 */ 5713 if (vd->vdev_children > 2) { 5714 newvd = vd->vdev_child[1]; 5715 5716 if (newvd->vdev_isspare && last->vdev_isspare && 5717 vdev_dtl_empty(last, DTL_MISSING) && 5718 vdev_dtl_empty(last, DTL_OUTAGE) && 5719 !vdev_dtl_required(newvd)) 5720 return (newvd); 5721 } 5722 } 5723 5724 return (NULL); 5725} 5726 5727static void 5728spa_vdev_resilver_done(spa_t *spa) 5729{ 5730 vdev_t *vd, *pvd, *ppvd; 5731 uint64_t guid, sguid, pguid, ppguid; 5732 5733 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5734 5735 while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) { 5736 pvd = vd->vdev_parent; 5737 ppvd = pvd->vdev_parent; 5738 guid = vd->vdev_guid; 5739 pguid = pvd->vdev_guid; 5740 ppguid = ppvd->vdev_guid; 5741 sguid = 0; 5742 /* 5743 * If we have just finished replacing a hot spared device, then 5744 * we need to detach the parent's first child (the original hot 5745 * spare) as well. 5746 */ 5747 if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 && 5748 ppvd->vdev_children == 2) { 5749 ASSERT(pvd->vdev_ops == &vdev_replacing_ops); 5750 sguid = ppvd->vdev_child[1]->vdev_guid; 5751 } 5752 ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd)); 5753 5754 spa_config_exit(spa, SCL_ALL, FTAG); 5755 if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0) 5756 return; 5757 if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0) 5758 return; 5759 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 5760 } 5761 5762 spa_config_exit(spa, SCL_ALL, FTAG); 5763} 5764 5765/* 5766 * Update the stored path or FRU for this vdev. 5767 */ 5768int 5769spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value, 5770 boolean_t ispath) 5771{ 5772 vdev_t *vd; 5773 boolean_t sync = B_FALSE; 5774 5775 ASSERT(spa_writeable(spa)); 5776 5777 spa_vdev_state_enter(spa, SCL_ALL); 5778 5779 if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL) 5780 return (spa_vdev_state_exit(spa, NULL, ENOENT)); 5781 5782 if (!vd->vdev_ops->vdev_op_leaf) 5783 return (spa_vdev_state_exit(spa, NULL, ENOTSUP)); 5784 5785 if (ispath) { 5786 if (strcmp(value, vd->vdev_path) != 0) { 5787 spa_strfree(vd->vdev_path); 5788 vd->vdev_path = spa_strdup(value); 5789 sync = B_TRUE; 5790 } 5791 } else { 5792 if (vd->vdev_fru == NULL) { 5793 vd->vdev_fru = spa_strdup(value); 5794 sync = B_TRUE; 5795 } else if (strcmp(value, vd->vdev_fru) != 0) { 5796 spa_strfree(vd->vdev_fru); 5797 vd->vdev_fru = spa_strdup(value); 5798 sync = B_TRUE; 5799 } 5800 } 5801 5802 return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0)); 5803} 5804 5805int 5806spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath) 5807{ 5808 return (spa_vdev_set_common(spa, guid, newpath, B_TRUE)); 5809} 5810 5811int 5812spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru) 5813{ 5814 return (spa_vdev_set_common(spa, guid, newfru, B_FALSE)); 5815} 5816 5817/* 5818 * ========================================================================== 5819 * SPA Scanning 5820 * ========================================================================== 5821 */ 5822 5823int 5824spa_scan_stop(spa_t *spa) 5825{ 5826 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5827 if (dsl_scan_resilvering(spa->spa_dsl_pool)) 5828 return (SET_ERROR(EBUSY)); 5829 return (dsl_scan_cancel(spa->spa_dsl_pool)); 5830} 5831 5832int 5833spa_scan(spa_t *spa, pool_scan_func_t func) 5834{ 5835 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0); 5836 5837 if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE) 5838 return (SET_ERROR(ENOTSUP)); 5839 5840 /* 5841 * If a resilver was requested, but there is no DTL on a 5842 * writeable leaf device, we have nothing to do. 5843 */ 5844 if (func == POOL_SCAN_RESILVER && 5845 !vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) { 5846 spa_async_request(spa, SPA_ASYNC_RESILVER_DONE); 5847 return (0); 5848 } 5849 5850 return (dsl_scan(spa->spa_dsl_pool, func)); 5851} 5852 5853/* 5854 * ========================================================================== 5855 * SPA async task processing 5856 * ========================================================================== 5857 */ 5858 5859static void 5860spa_async_remove(spa_t *spa, vdev_t *vd) 5861{ 5862 if (vd->vdev_remove_wanted) { 5863 vd->vdev_remove_wanted = B_FALSE; 5864 vd->vdev_delayed_close = B_FALSE; 5865 vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE); 5866 5867 /* 5868 * We want to clear the stats, but we don't want to do a full 5869 * vdev_clear() as that will cause us to throw away 5870 * degraded/faulted state as well as attempt to reopen the 5871 * device, all of which is a waste. 5872 */ 5873 vd->vdev_stat.vs_read_errors = 0; 5874 vd->vdev_stat.vs_write_errors = 0; 5875 vd->vdev_stat.vs_checksum_errors = 0; 5876 5877 vdev_state_dirty(vd->vdev_top); 5878 } 5879 5880 for (int c = 0; c < vd->vdev_children; c++) 5881 spa_async_remove(spa, vd->vdev_child[c]); 5882} 5883 5884static void 5885spa_async_probe(spa_t *spa, vdev_t *vd) 5886{ 5887 if (vd->vdev_probe_wanted) { 5888 vd->vdev_probe_wanted = B_FALSE; 5889 vdev_reopen(vd); /* vdev_open() does the actual probe */ 5890 } 5891 5892 for (int c = 0; c < vd->vdev_children; c++) 5893 spa_async_probe(spa, vd->vdev_child[c]); 5894} 5895 5896static void 5897spa_async_autoexpand(spa_t *spa, vdev_t *vd) 5898{ 5899 sysevent_id_t eid; 5900 nvlist_t *attr; 5901 char *physpath; 5902 5903 if (!spa->spa_autoexpand) 5904 return; 5905 5906 for (int c = 0; c < vd->vdev_children; c++) { 5907 vdev_t *cvd = vd->vdev_child[c]; 5908 spa_async_autoexpand(spa, cvd); 5909 } 5910 5911 if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL) 5912 return; 5913 5914 physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 5915 (void) snprintf(physpath, MAXPATHLEN, "/devices%s", vd->vdev_physpath); 5916 5917 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0); 5918 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0); 5919 5920 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS, 5921 ESC_ZFS_VDEV_AUTOEXPAND, attr, &eid, DDI_SLEEP); 5922 5923 nvlist_free(attr); 5924 kmem_free(physpath, MAXPATHLEN); 5925} 5926 5927static void 5928spa_async_thread(void *arg) 5929{ 5930 spa_t *spa = arg; 5931 int tasks; 5932 5933 ASSERT(spa->spa_sync_on); 5934 5935 mutex_enter(&spa->spa_async_lock); 5936 tasks = spa->spa_async_tasks; 5937 spa->spa_async_tasks &= SPA_ASYNC_REMOVE; 5938 mutex_exit(&spa->spa_async_lock); 5939 5940 /* 5941 * See if the config needs to be updated. 5942 */ 5943 if (tasks & SPA_ASYNC_CONFIG_UPDATE) { 5944 uint64_t old_space, new_space; 5945 5946 mutex_enter(&spa_namespace_lock); 5947 old_space = metaslab_class_get_space(spa_normal_class(spa)); 5948 spa_config_update(spa, SPA_CONFIG_UPDATE_POOL); 5949 new_space = metaslab_class_get_space(spa_normal_class(spa)); 5950 mutex_exit(&spa_namespace_lock); 5951 5952 /* 5953 * If the pool grew as a result of the config update, 5954 * then log an internal history event. 5955 */ 5956 if (new_space != old_space) { 5957 spa_history_log_internal(spa, "vdev online", NULL, 5958 "pool '%s' size: %llu(+%llu)", 5959 spa_name(spa), new_space, new_space - old_space); 5960 } 5961 } 5962 5963 if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) { 5964 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 5965 spa_async_autoexpand(spa, spa->spa_root_vdev); 5966 spa_config_exit(spa, SCL_CONFIG, FTAG); 5967 } 5968 5969 /* 5970 * See if any devices need to be probed. 5971 */ 5972 if (tasks & SPA_ASYNC_PROBE) { 5973 spa_vdev_state_enter(spa, SCL_NONE); 5974 spa_async_probe(spa, spa->spa_root_vdev); 5975 (void) spa_vdev_state_exit(spa, NULL, 0); 5976 } 5977 5978 /* 5979 * If any devices are done replacing, detach them. 5980 */ 5981 if (tasks & SPA_ASYNC_RESILVER_DONE) 5982 spa_vdev_resilver_done(spa); 5983 5984 /* 5985 * Kick off a resilver. 5986 */ 5987 if (tasks & SPA_ASYNC_RESILVER) 5988 dsl_resilver_restart(spa->spa_dsl_pool, 0); 5989 5990 /* 5991 * Let the world know that we're done. 5992 */ 5993 mutex_enter(&spa->spa_async_lock); 5994 spa->spa_async_thread = NULL; 5995 cv_broadcast(&spa->spa_async_cv); 5996 mutex_exit(&spa->spa_async_lock); 5997 thread_exit(); 5998} 5999 6000static void 6001spa_async_thread_vd(void *arg) 6002{ 6003 spa_t *spa = arg; 6004 int tasks; 6005 6006 ASSERT(spa->spa_sync_on); 6007 6008 mutex_enter(&spa->spa_async_lock); 6009 tasks = spa->spa_async_tasks; 6010retry: 6011 spa->spa_async_tasks &= ~SPA_ASYNC_REMOVE; 6012 mutex_exit(&spa->spa_async_lock); 6013 6014 /* 6015 * See if any devices need to be marked REMOVED. 6016 */ 6017 if (tasks & SPA_ASYNC_REMOVE) { 6018 spa_vdev_state_enter(spa, SCL_NONE); 6019 spa_async_remove(spa, spa->spa_root_vdev); 6020 for (int i = 0; i < spa->spa_l2cache.sav_count; i++) 6021 spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]); 6022 for (int i = 0; i < spa->spa_spares.sav_count; i++) 6023 spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]); 6024 (void) spa_vdev_state_exit(spa, NULL, 0); 6025 } 6026 6027 /* 6028 * Let the world know that we're done. 6029 */ 6030 mutex_enter(&spa->spa_async_lock); 6031 tasks = spa->spa_async_tasks; 6032 if ((tasks & SPA_ASYNC_REMOVE) != 0) 6033 goto retry; 6034 spa->spa_async_thread_vd = NULL; 6035 cv_broadcast(&spa->spa_async_cv); 6036 mutex_exit(&spa->spa_async_lock); 6037 thread_exit(); 6038} 6039 6040void 6041spa_async_suspend(spa_t *spa) 6042{ 6043 mutex_enter(&spa->spa_async_lock); 6044 spa->spa_async_suspended++; 6045 while (spa->spa_async_thread != NULL && 6046 spa->spa_async_thread_vd != NULL) 6047 cv_wait(&spa->spa_async_cv, &spa->spa_async_lock); 6048 mutex_exit(&spa->spa_async_lock); 6049} 6050 6051void 6052spa_async_resume(spa_t *spa) 6053{ 6054 mutex_enter(&spa->spa_async_lock); 6055 ASSERT(spa->spa_async_suspended != 0); 6056 spa->spa_async_suspended--; 6057 mutex_exit(&spa->spa_async_lock); 6058} 6059 6060static boolean_t 6061spa_async_tasks_pending(spa_t *spa) 6062{ 6063 uint_t non_config_tasks; 6064 uint_t config_task; 6065 boolean_t config_task_suspended; 6066 6067 non_config_tasks = spa->spa_async_tasks & ~(SPA_ASYNC_CONFIG_UPDATE | 6068 SPA_ASYNC_REMOVE); 6069 config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE; 6070 if (spa->spa_ccw_fail_time == 0) { 6071 config_task_suspended = B_FALSE; 6072 } else { 6073 config_task_suspended = 6074 (gethrtime() - spa->spa_ccw_fail_time) < 6075 (zfs_ccw_retry_interval * NANOSEC); 6076 } 6077 6078 return (non_config_tasks || (config_task && !config_task_suspended)); 6079} 6080 6081static void 6082spa_async_dispatch(spa_t *spa) 6083{ 6084 mutex_enter(&spa->spa_async_lock); 6085 if (spa_async_tasks_pending(spa) && 6086 !spa->spa_async_suspended && 6087 spa->spa_async_thread == NULL && 6088 rootdir != NULL) 6089 spa->spa_async_thread = thread_create(NULL, 0, 6090 spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri); 6091 mutex_exit(&spa->spa_async_lock); 6092} 6093 6094static void 6095spa_async_dispatch_vd(spa_t *spa) 6096{ 6097 mutex_enter(&spa->spa_async_lock); 6098 if ((spa->spa_async_tasks & SPA_ASYNC_REMOVE) != 0 && 6099 !spa->spa_async_suspended && 6100 spa->spa_async_thread_vd == NULL && 6101 rootdir != NULL) 6102 spa->spa_async_thread_vd = thread_create(NULL, 0, 6103 spa_async_thread_vd, spa, 0, &p0, TS_RUN, maxclsyspri); 6104 mutex_exit(&spa->spa_async_lock); 6105} 6106 6107void 6108spa_async_request(spa_t *spa, int task) 6109{ 6110 zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task); 6111 mutex_enter(&spa->spa_async_lock); 6112 spa->spa_async_tasks |= task; 6113 mutex_exit(&spa->spa_async_lock); 6114 spa_async_dispatch_vd(spa); 6115} 6116 6117/* 6118 * ========================================================================== 6119 * SPA syncing routines 6120 * ========================================================================== 6121 */ 6122 6123static int 6124bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6125{ 6126 bpobj_t *bpo = arg; 6127 bpobj_enqueue(bpo, bp, tx); 6128 return (0); 6129} 6130 6131static int 6132spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx) 6133{ 6134 zio_t *zio = arg; 6135 6136 zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp, 6137 BP_GET_PSIZE(bp), zio->io_flags)); 6138 return (0); 6139} 6140 6141/* 6142 * Note: this simple function is not inlined to make it easier to dtrace the 6143 * amount of time spent syncing frees. 6144 */ 6145static void 6146spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx) 6147{ 6148 zio_t *zio = zio_root(spa, NULL, NULL, 0); 6149 bplist_iterate(bpl, spa_free_sync_cb, zio, tx); 6150 VERIFY(zio_wait(zio) == 0); 6151} 6152 6153/* 6154 * Note: this simple function is not inlined to make it easier to dtrace the 6155 * amount of time spent syncing deferred frees. 6156 */ 6157static void 6158spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx) 6159{ 6160 zio_t *zio = zio_root(spa, NULL, NULL, 0); 6161 VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj, 6162 spa_free_sync_cb, zio, tx), ==, 0); 6163 VERIFY0(zio_wait(zio)); 6164} 6165 6166 6167static void 6168spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx) 6169{ 6170 char *packed = NULL; 6171 size_t bufsize; 6172 size_t nvsize = 0; 6173 dmu_buf_t *db; 6174 6175 VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0); 6176 6177 /* 6178 * Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration 6179 * information. This avoids the dmu_buf_will_dirty() path and 6180 * saves us a pre-read to get data we don't actually care about. 6181 */ 6182 bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE); 6183 packed = kmem_alloc(bufsize, KM_SLEEP); 6184 6185 VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR, 6186 KM_SLEEP) == 0); 6187 bzero(packed + nvsize, bufsize - nvsize); 6188 6189 dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx); 6190 6191 kmem_free(packed, bufsize); 6192 6193 VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db)); 6194 dmu_buf_will_dirty(db, tx); 6195 *(uint64_t *)db->db_data = nvsize; 6196 dmu_buf_rele(db, FTAG); 6197} 6198 6199static void 6200spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx, 6201 const char *config, const char *entry) 6202{ 6203 nvlist_t *nvroot; 6204 nvlist_t **list; 6205 int i; 6206 6207 if (!sav->sav_sync) 6208 return; 6209 6210 /* 6211 * Update the MOS nvlist describing the list of available devices. 6212 * spa_validate_aux() will have already made sure this nvlist is 6213 * valid and the vdevs are labeled appropriately. 6214 */ 6215 if (sav->sav_object == 0) { 6216 sav->sav_object = dmu_object_alloc(spa->spa_meta_objset, 6217 DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE, 6218 sizeof (uint64_t), tx); 6219 VERIFY(zap_update(spa->spa_meta_objset, 6220 DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1, 6221 &sav->sav_object, tx) == 0); 6222 } 6223 6224 VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0); 6225 if (sav->sav_count == 0) { 6226 VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0); 6227 } else { 6228 list = kmem_alloc(sav->sav_count * sizeof (void *), KM_SLEEP); 6229 for (i = 0; i < sav->sav_count; i++) 6230 list[i] = vdev_config_generate(spa, sav->sav_vdevs[i], 6231 B_FALSE, VDEV_CONFIG_L2CACHE); 6232 VERIFY(nvlist_add_nvlist_array(nvroot, config, list, 6233 sav->sav_count) == 0); 6234 for (i = 0; i < sav->sav_count; i++) 6235 nvlist_free(list[i]); 6236 kmem_free(list, sav->sav_count * sizeof (void *)); 6237 } 6238 6239 spa_sync_nvlist(spa, sav->sav_object, nvroot, tx); 6240 nvlist_free(nvroot); 6241 6242 sav->sav_sync = B_FALSE; 6243} 6244 6245static void 6246spa_sync_config_object(spa_t *spa, dmu_tx_t *tx) 6247{ 6248 nvlist_t *config; 6249 6250 if (list_is_empty(&spa->spa_config_dirty_list)) 6251 return; 6252 6253 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6254 6255 config = spa_config_generate(spa, spa->spa_root_vdev, 6256 dmu_tx_get_txg(tx), B_FALSE); 6257 6258 /* 6259 * If we're upgrading the spa version then make sure that 6260 * the config object gets updated with the correct version. 6261 */ 6262 if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version) 6263 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, 6264 spa->spa_uberblock.ub_version); 6265 6266 spa_config_exit(spa, SCL_STATE, FTAG); 6267 6268 if (spa->spa_config_syncing) 6269 nvlist_free(spa->spa_config_syncing); 6270 spa->spa_config_syncing = config; 6271 6272 spa_sync_nvlist(spa, spa->spa_config_object, config, tx); 6273} 6274 6275static void 6276spa_sync_version(void *arg, dmu_tx_t *tx) 6277{ 6278 uint64_t *versionp = arg; 6279 uint64_t version = *versionp; 6280 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6281 6282 /* 6283 * Setting the version is special cased when first creating the pool. 6284 */ 6285 ASSERT(tx->tx_txg != TXG_INITIAL); 6286 6287 ASSERT(SPA_VERSION_IS_SUPPORTED(version)); 6288 ASSERT(version >= spa_version(spa)); 6289 6290 spa->spa_uberblock.ub_version = version; 6291 vdev_config_dirty(spa->spa_root_vdev); 6292 spa_history_log_internal(spa, "set", tx, "version=%lld", version); 6293} 6294 6295/* 6296 * Set zpool properties. 6297 */ 6298static void 6299spa_sync_props(void *arg, dmu_tx_t *tx) 6300{ 6301 nvlist_t *nvp = arg; 6302 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 6303 objset_t *mos = spa->spa_meta_objset; 6304 nvpair_t *elem = NULL; 6305 6306 mutex_enter(&spa->spa_props_lock); 6307 6308 while ((elem = nvlist_next_nvpair(nvp, elem))) { 6309 uint64_t intval; 6310 char *strval, *fname; 6311 zpool_prop_t prop; 6312 const char *propname; 6313 zprop_type_t proptype; 6314 spa_feature_t fid; 6315 6316 switch (prop = zpool_name_to_prop(nvpair_name(elem))) { 6317 case ZPROP_INVAL: 6318 /* 6319 * We checked this earlier in spa_prop_validate(). 6320 */ 6321 ASSERT(zpool_prop_feature(nvpair_name(elem))); 6322 6323 fname = strchr(nvpair_name(elem), '@') + 1; 6324 VERIFY0(zfeature_lookup_name(fname, &fid)); 6325 6326 spa_feature_enable(spa, fid, tx); 6327 spa_history_log_internal(spa, "set", tx, 6328 "%s=enabled", nvpair_name(elem)); 6329 break; 6330 6331 case ZPOOL_PROP_VERSION: 6332 intval = fnvpair_value_uint64(elem); 6333 /* 6334 * The version is synced seperatly before other 6335 * properties and should be correct by now. 6336 */ 6337 ASSERT3U(spa_version(spa), >=, intval); 6338 break; 6339 6340 case ZPOOL_PROP_ALTROOT: 6341 /* 6342 * 'altroot' is a non-persistent property. It should 6343 * have been set temporarily at creation or import time. 6344 */ 6345 ASSERT(spa->spa_root != NULL); 6346 break; 6347 6348 case ZPOOL_PROP_READONLY: 6349 case ZPOOL_PROP_CACHEFILE: 6350 /* 6351 * 'readonly' and 'cachefile' are also non-persisitent 6352 * properties. 6353 */ 6354 break; 6355 case ZPOOL_PROP_COMMENT: 6356 strval = fnvpair_value_string(elem); 6357 if (spa->spa_comment != NULL) 6358 spa_strfree(spa->spa_comment); 6359 spa->spa_comment = spa_strdup(strval); 6360 /* 6361 * We need to dirty the configuration on all the vdevs 6362 * so that their labels get updated. It's unnecessary 6363 * to do this for pool creation since the vdev's 6364 * configuratoin has already been dirtied. 6365 */ 6366 if (tx->tx_txg != TXG_INITIAL) 6367 vdev_config_dirty(spa->spa_root_vdev); 6368 spa_history_log_internal(spa, "set", tx, 6369 "%s=%s", nvpair_name(elem), strval); 6370 break; 6371 default: 6372 /* 6373 * Set pool property values in the poolprops mos object. 6374 */ 6375 if (spa->spa_pool_props_object == 0) { 6376 spa->spa_pool_props_object = 6377 zap_create_link(mos, DMU_OT_POOL_PROPS, 6378 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS, 6379 tx); 6380 } 6381 6382 /* normalize the property name */ 6383 propname = zpool_prop_to_name(prop); 6384 proptype = zpool_prop_get_type(prop); 6385 6386 if (nvpair_type(elem) == DATA_TYPE_STRING) { 6387 ASSERT(proptype == PROP_TYPE_STRING); 6388 strval = fnvpair_value_string(elem); 6389 VERIFY0(zap_update(mos, 6390 spa->spa_pool_props_object, propname, 6391 1, strlen(strval) + 1, strval, tx)); 6392 spa_history_log_internal(spa, "set", tx, 6393 "%s=%s", nvpair_name(elem), strval); 6394 } else if (nvpair_type(elem) == DATA_TYPE_UINT64) { 6395 intval = fnvpair_value_uint64(elem); 6396 6397 if (proptype == PROP_TYPE_INDEX) { 6398 const char *unused; 6399 VERIFY0(zpool_prop_index_to_string( 6400 prop, intval, &unused)); 6401 } 6402 VERIFY0(zap_update(mos, 6403 spa->spa_pool_props_object, propname, 6404 8, 1, &intval, tx)); 6405 spa_history_log_internal(spa, "set", tx, 6406 "%s=%lld", nvpair_name(elem), intval); 6407 } else { 6408 ASSERT(0); /* not allowed */ 6409 } 6410 6411 switch (prop) { 6412 case ZPOOL_PROP_DELEGATION: 6413 spa->spa_delegation = intval; 6414 break; 6415 case ZPOOL_PROP_BOOTFS: 6416 spa->spa_bootfs = intval; 6417 break; 6418 case ZPOOL_PROP_FAILUREMODE: 6419 spa->spa_failmode = intval; 6420 break; 6421 case ZPOOL_PROP_AUTOEXPAND: 6422 spa->spa_autoexpand = intval; 6423 if (tx->tx_txg != TXG_INITIAL) 6424 spa_async_request(spa, 6425 SPA_ASYNC_AUTOEXPAND); 6426 break; 6427 case ZPOOL_PROP_DEDUPDITTO: 6428 spa->spa_dedup_ditto = intval; 6429 break; 6430 default: 6431 break; 6432 } 6433 } 6434 6435 } 6436 6437 mutex_exit(&spa->spa_props_lock); 6438} 6439 6440/* 6441 * Perform one-time upgrade on-disk changes. spa_version() does not 6442 * reflect the new version this txg, so there must be no changes this 6443 * txg to anything that the upgrade code depends on after it executes. 6444 * Therefore this must be called after dsl_pool_sync() does the sync 6445 * tasks. 6446 */ 6447static void 6448spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx) 6449{ 6450 dsl_pool_t *dp = spa->spa_dsl_pool; 6451 6452 ASSERT(spa->spa_sync_pass == 1); 6453 6454 rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG); 6455 6456 if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN && 6457 spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) { 6458 dsl_pool_create_origin(dp, tx); 6459 6460 /* Keeping the origin open increases spa_minref */ 6461 spa->spa_minref += 3; 6462 } 6463 6464 if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES && 6465 spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) { 6466 dsl_pool_upgrade_clones(dp, tx); 6467 } 6468 6469 if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES && 6470 spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) { 6471 dsl_pool_upgrade_dir_clones(dp, tx); 6472 6473 /* Keeping the freedir open increases spa_minref */ 6474 spa->spa_minref += 3; 6475 } 6476 6477 if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES && 6478 spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6479 spa_feature_create_zap_objects(spa, tx); 6480 } 6481 6482 /* 6483 * LZ4_COMPRESS feature's behaviour was changed to activate_on_enable 6484 * when possibility to use lz4 compression for metadata was added 6485 * Old pools that have this feature enabled must be upgraded to have 6486 * this feature active 6487 */ 6488 if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) { 6489 boolean_t lz4_en = spa_feature_is_enabled(spa, 6490 SPA_FEATURE_LZ4_COMPRESS); 6491 boolean_t lz4_ac = spa_feature_is_active(spa, 6492 SPA_FEATURE_LZ4_COMPRESS); 6493 6494 if (lz4_en && !lz4_ac) 6495 spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx); 6496 } 6497 rrw_exit(&dp->dp_config_rwlock, FTAG); 6498} 6499 6500/* 6501 * Sync the specified transaction group. New blocks may be dirtied as 6502 * part of the process, so we iterate until it converges. 6503 */ 6504void 6505spa_sync(spa_t *spa, uint64_t txg) 6506{ 6507 dsl_pool_t *dp = spa->spa_dsl_pool; 6508 objset_t *mos = spa->spa_meta_objset; 6509 bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK]; 6510 vdev_t *rvd = spa->spa_root_vdev; 6511 vdev_t *vd; 6512 dmu_tx_t *tx; 6513 int error; 6514 6515 VERIFY(spa_writeable(spa)); 6516 6517 /* 6518 * Lock out configuration changes. 6519 */ 6520 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 6521 6522 spa->spa_syncing_txg = txg; 6523 spa->spa_sync_pass = 0; 6524 6525 /* 6526 * If there are any pending vdev state changes, convert them 6527 * into config changes that go out with this transaction group. 6528 */ 6529 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6530 while (list_head(&spa->spa_state_dirty_list) != NULL) { 6531 /* 6532 * We need the write lock here because, for aux vdevs, 6533 * calling vdev_config_dirty() modifies sav_config. 6534 * This is ugly and will become unnecessary when we 6535 * eliminate the aux vdev wart by integrating all vdevs 6536 * into the root vdev tree. 6537 */ 6538 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6539 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER); 6540 while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) { 6541 vdev_state_clean(vd); 6542 vdev_config_dirty(vd); 6543 } 6544 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 6545 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 6546 } 6547 spa_config_exit(spa, SCL_STATE, FTAG); 6548 6549 tx = dmu_tx_create_assigned(dp, txg); 6550 6551 spa->spa_sync_starttime = gethrtime(); 6552#ifdef illumos 6553 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, 6554 spa->spa_sync_starttime + spa->spa_deadman_synctime)); 6555#else /* FreeBSD */ 6556#ifdef _KERNEL 6557 callout_reset(&spa->spa_deadman_cycid, 6558 hz * spa->spa_deadman_synctime / NANOSEC, spa_deadman, spa); 6559#endif 6560#endif 6561 6562 /* 6563 * If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg, 6564 * set spa_deflate if we have no raid-z vdevs. 6565 */ 6566 if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE && 6567 spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) { 6568 int i; 6569 6570 for (i = 0; i < rvd->vdev_children; i++) { 6571 vd = rvd->vdev_child[i]; 6572 if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE) 6573 break; 6574 } 6575 if (i == rvd->vdev_children) { 6576 spa->spa_deflate = TRUE; 6577 VERIFY(0 == zap_add(spa->spa_meta_objset, 6578 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE, 6579 sizeof (uint64_t), 1, &spa->spa_deflate, tx)); 6580 } 6581 } 6582 6583 /* 6584 * If anything has changed in this txg, or if someone is waiting 6585 * for this txg to sync (eg, spa_vdev_remove()), push the 6586 * deferred frees from the previous txg. If not, leave them 6587 * alone so that we don't generate work on an otherwise idle 6588 * system. 6589 */ 6590 if (!txg_list_empty(&dp->dp_dirty_datasets, txg) || 6591 !txg_list_empty(&dp->dp_dirty_dirs, txg) || 6592 !txg_list_empty(&dp->dp_sync_tasks, txg) || 6593 ((dsl_scan_active(dp->dp_scan) || 6594 txg_sync_waiting(dp)) && !spa_shutting_down(spa))) { 6595 spa_sync_deferred_frees(spa, tx); 6596 } 6597 6598 /* 6599 * Iterate to convergence. 6600 */ 6601 do { 6602 int pass = ++spa->spa_sync_pass; 6603 6604 spa_sync_config_object(spa, tx); 6605 spa_sync_aux_dev(spa, &spa->spa_spares, tx, 6606 ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES); 6607 spa_sync_aux_dev(spa, &spa->spa_l2cache, tx, 6608 ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE); 6609 spa_errlog_sync(spa, txg); 6610 dsl_pool_sync(dp, txg); 6611 6612 if (pass < zfs_sync_pass_deferred_free) { 6613 spa_sync_frees(spa, free_bpl, tx); 6614 } else { 6615 bplist_iterate(free_bpl, bpobj_enqueue_cb, 6616 &spa->spa_deferred_bpobj, tx); 6617 } 6618 6619 ddt_sync(spa, txg); 6620 dsl_scan_sync(dp, tx); 6621 6622 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)) 6623 vdev_sync(vd, txg); 6624 6625 if (pass == 1) 6626 spa_sync_upgrades(spa, tx); 6627 6628 } while (dmu_objset_is_dirty(mos, txg)); 6629 6630 /* 6631 * Rewrite the vdev configuration (which includes the uberblock) 6632 * to commit the transaction group. 6633 * 6634 * If there are no dirty vdevs, we sync the uberblock to a few 6635 * random top-level vdevs that are known to be visible in the 6636 * config cache (see spa_vdev_add() for a complete description). 6637 * If there *are* dirty vdevs, sync the uberblock to all vdevs. 6638 */ 6639 for (;;) { 6640 /* 6641 * We hold SCL_STATE to prevent vdev open/close/etc. 6642 * while we're attempting to write the vdev labels. 6643 */ 6644 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 6645 6646 if (list_is_empty(&spa->spa_config_dirty_list)) { 6647 vdev_t *svd[SPA_DVAS_PER_BP]; 6648 int svdcount = 0; 6649 int children = rvd->vdev_children; 6650 int c0 = spa_get_random(children); 6651 6652 for (int c = 0; c < children; c++) { 6653 vd = rvd->vdev_child[(c0 + c) % children]; 6654 if (vd->vdev_ms_array == 0 || vd->vdev_islog) 6655 continue; 6656 svd[svdcount++] = vd; 6657 if (svdcount == SPA_DVAS_PER_BP) 6658 break; 6659 } 6660 error = vdev_config_sync(svd, svdcount, txg, B_FALSE); 6661 if (error != 0) 6662 error = vdev_config_sync(svd, svdcount, txg, 6663 B_TRUE); 6664 } else { 6665 error = vdev_config_sync(rvd->vdev_child, 6666 rvd->vdev_children, txg, B_FALSE); 6667 if (error != 0) 6668 error = vdev_config_sync(rvd->vdev_child, 6669 rvd->vdev_children, txg, B_TRUE); 6670 } 6671 6672 if (error == 0) 6673 spa->spa_last_synced_guid = rvd->vdev_guid; 6674 6675 spa_config_exit(spa, SCL_STATE, FTAG); 6676 6677 if (error == 0) 6678 break; 6679 zio_suspend(spa, NULL); 6680 zio_resume_wait(spa); 6681 } 6682 dmu_tx_commit(tx); 6683 6684#ifdef illumos 6685 VERIFY(cyclic_reprogram(spa->spa_deadman_cycid, CY_INFINITY)); 6686#else /* FreeBSD */ 6687#ifdef _KERNEL 6688 callout_drain(&spa->spa_deadman_cycid); 6689#endif 6690#endif 6691 6692 /* 6693 * Clear the dirty config list. 6694 */ 6695 while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL) 6696 vdev_config_clean(vd); 6697 6698 /* 6699 * Now that the new config has synced transactionally, 6700 * let it become visible to the config cache. 6701 */ 6702 if (spa->spa_config_syncing != NULL) { 6703 spa_config_set(spa, spa->spa_config_syncing); 6704 spa->spa_config_txg = txg; 6705 spa->spa_config_syncing = NULL; 6706 } 6707 6708 spa->spa_ubsync = spa->spa_uberblock; 6709 6710 dsl_pool_sync_done(dp, txg); 6711 6712 /* 6713 * Update usable space statistics. 6714 */ 6715 while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))) 6716 vdev_sync_done(vd, txg); 6717 6718 spa_update_dspace(spa); 6719 6720 /* 6721 * It had better be the case that we didn't dirty anything 6722 * since vdev_config_sync(). 6723 */ 6724 ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg)); 6725 ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg)); 6726 ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg)); 6727 6728 spa->spa_sync_pass = 0; 6729 6730 spa_config_exit(spa, SCL_CONFIG, FTAG); 6731 6732 spa_handle_ignored_writes(spa); 6733 6734 /* 6735 * If any async tasks have been requested, kick them off. 6736 */ 6737 spa_async_dispatch(spa); 6738 spa_async_dispatch_vd(spa); 6739} 6740 6741/* 6742 * Sync all pools. We don't want to hold the namespace lock across these 6743 * operations, so we take a reference on the spa_t and drop the lock during the 6744 * sync. 6745 */ 6746void 6747spa_sync_allpools(void) 6748{ 6749 spa_t *spa = NULL; 6750 mutex_enter(&spa_namespace_lock); 6751 while ((spa = spa_next(spa)) != NULL) { 6752 if (spa_state(spa) != POOL_STATE_ACTIVE || 6753 !spa_writeable(spa) || spa_suspended(spa)) 6754 continue; 6755 spa_open_ref(spa, FTAG); 6756 mutex_exit(&spa_namespace_lock); 6757 txg_wait_synced(spa_get_dsl(spa), 0); 6758 mutex_enter(&spa_namespace_lock); 6759 spa_close(spa, FTAG); 6760 } 6761 mutex_exit(&spa_namespace_lock); 6762} 6763 6764/* 6765 * ========================================================================== 6766 * Miscellaneous routines 6767 * ========================================================================== 6768 */ 6769 6770/* 6771 * Remove all pools in the system. 6772 */ 6773void 6774spa_evict_all(void) 6775{ 6776 spa_t *spa; 6777 6778 /* 6779 * Remove all cached state. All pools should be closed now, 6780 * so every spa in the AVL tree should be unreferenced. 6781 */ 6782 mutex_enter(&spa_namespace_lock); 6783 while ((spa = spa_next(NULL)) != NULL) { 6784 /* 6785 * Stop async tasks. The async thread may need to detach 6786 * a device that's been replaced, which requires grabbing 6787 * spa_namespace_lock, so we must drop it here. 6788 */ 6789 spa_open_ref(spa, FTAG); 6790 mutex_exit(&spa_namespace_lock); 6791 spa_async_suspend(spa); 6792 mutex_enter(&spa_namespace_lock); 6793 spa_close(spa, FTAG); 6794 6795 if (spa->spa_state != POOL_STATE_UNINITIALIZED) { 6796 spa_unload(spa); 6797 spa_deactivate(spa); 6798 } 6799 spa_remove(spa); 6800 } 6801 mutex_exit(&spa_namespace_lock); 6802} 6803 6804vdev_t * 6805spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux) 6806{ 6807 vdev_t *vd; 6808 int i; 6809 6810 if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL) 6811 return (vd); 6812 6813 if (aux) { 6814 for (i = 0; i < spa->spa_l2cache.sav_count; i++) { 6815 vd = spa->spa_l2cache.sav_vdevs[i]; 6816 if (vd->vdev_guid == guid) 6817 return (vd); 6818 } 6819 6820 for (i = 0; i < spa->spa_spares.sav_count; i++) { 6821 vd = spa->spa_spares.sav_vdevs[i]; 6822 if (vd->vdev_guid == guid) 6823 return (vd); 6824 } 6825 } 6826 6827 return (NULL); 6828} 6829 6830void 6831spa_upgrade(spa_t *spa, uint64_t version) 6832{ 6833 ASSERT(spa_writeable(spa)); 6834 6835 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 6836 6837 /* 6838 * This should only be called for a non-faulted pool, and since a 6839 * future version would result in an unopenable pool, this shouldn't be 6840 * possible. 6841 */ 6842 ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version)); 6843 ASSERT3U(version, >=, spa->spa_uberblock.ub_version); 6844 6845 spa->spa_uberblock.ub_version = version; 6846 vdev_config_dirty(spa->spa_root_vdev); 6847 6848 spa_config_exit(spa, SCL_ALL, FTAG); 6849 6850 txg_wait_synced(spa_get_dsl(spa), 0); 6851} 6852 6853boolean_t 6854spa_has_spare(spa_t *spa, uint64_t guid) 6855{ 6856 int i; 6857 uint64_t spareguid; 6858 spa_aux_vdev_t *sav = &spa->spa_spares; 6859 6860 for (i = 0; i < sav->sav_count; i++) 6861 if (sav->sav_vdevs[i]->vdev_guid == guid) 6862 return (B_TRUE); 6863 6864 for (i = 0; i < sav->sav_npending; i++) { 6865 if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, 6866 &spareguid) == 0 && spareguid == guid) 6867 return (B_TRUE); 6868 } 6869 6870 return (B_FALSE); 6871} 6872 6873/* 6874 * Check if a pool has an active shared spare device. 6875 * Note: reference count of an active spare is 2, as a spare and as a replace 6876 */ 6877static boolean_t 6878spa_has_active_shared_spare(spa_t *spa) 6879{ 6880 int i, refcnt; 6881 uint64_t pool; 6882 spa_aux_vdev_t *sav = &spa->spa_spares; 6883 6884 for (i = 0; i < sav->sav_count; i++) { 6885 if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool, 6886 &refcnt) && pool != 0ULL && pool == spa_guid(spa) && 6887 refcnt > 2) 6888 return (B_TRUE); 6889 } 6890 6891 return (B_FALSE); 6892} 6893 6894/* 6895 * Post a sysevent corresponding to the given event. The 'name' must be one of 6896 * the event definitions in sys/sysevent/eventdefs.h. The payload will be 6897 * filled in from the spa and (optionally) the vdev. This doesn't do anything 6898 * in the userland libzpool, as we don't want consumers to misinterpret ztest 6899 * or zdb as real changes. 6900 */ 6901void 6902spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) 6903{ 6904#ifdef _KERNEL 6905 sysevent_t *ev; 6906 sysevent_attr_list_t *attr = NULL; 6907 sysevent_value_t value; 6908 sysevent_id_t eid; 6909 6910 ev = sysevent_alloc(EC_ZFS, (char *)name, SUNW_KERN_PUB "zfs", 6911 SE_SLEEP); 6912 6913 value.value_type = SE_DATA_TYPE_STRING; 6914 value.value.sv_string = spa_name(spa); 6915 if (sysevent_add_attr(&attr, ZFS_EV_POOL_NAME, &value, SE_SLEEP) != 0) 6916 goto done; 6917 6918 value.value_type = SE_DATA_TYPE_UINT64; 6919 value.value.sv_uint64 = spa_guid(spa); 6920 if (sysevent_add_attr(&attr, ZFS_EV_POOL_GUID, &value, SE_SLEEP) != 0) 6921 goto done; 6922 6923 if (vd) { 6924 value.value_type = SE_DATA_TYPE_UINT64; 6925 value.value.sv_uint64 = vd->vdev_guid; 6926 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_GUID, &value, 6927 SE_SLEEP) != 0) 6928 goto done; 6929 6930 if (vd->vdev_path) { 6931 value.value_type = SE_DATA_TYPE_STRING; 6932 value.value.sv_string = vd->vdev_path; 6933 if (sysevent_add_attr(&attr, ZFS_EV_VDEV_PATH, 6934 &value, SE_SLEEP) != 0) 6935 goto done; 6936 } 6937 } 6938 6939 if (sysevent_attach_attributes(ev, attr) != 0) 6940 goto done; 6941 attr = NULL; 6942 6943 (void) log_sysevent(ev, SE_SLEEP, &eid); 6944 6945done: 6946 if (attr) 6947 sysevent_free_attr(attr); 6948 sysevent_free(ev); 6949#endif 6950} 6951