1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 25 * Copyright (c) 2013 by Delphix. All rights reserved. 26 */ 27 28#include <sys/zfs_context.h> 29#include <sys/spa.h> 30#include <sys/fm/fs/zfs.h> 31#include <sys/spa_impl.h> 32#include <sys/nvpair.h> 33#include <sys/uio.h> 34#include <sys/fs/zfs.h> 35#include <sys/vdev_impl.h> 36#include <sys/zfs_ioctl.h> 37#include <sys/utsname.h> 38#include <sys/sunddi.h> 39#include <sys/zfeature.h> 40#ifdef _KERNEL 41#include <sys/kobj.h> 42#include <sys/zone.h> 43#endif 44 45/* 46 * Pool configuration repository. 47 * 48 * Pool configuration is stored as a packed nvlist on the filesystem. By 49 * default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot 50 * (when the ZFS module is loaded). Pools can also have the 'cachefile' 51 * property set that allows them to be stored in an alternate location until 52 * the control of external software. 53 * 54 * For each cache file, we have a single nvlist which holds all the 55 * configuration information. When the module loads, we read this information 56 * from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is 57 * maintained independently in spa.c. Whenever the namespace is modified, or 58 * the configuration of a pool is changed, we call spa_config_sync(), which 59 * walks through all the active pools and writes the configuration to disk. 60 */ 61 62static uint64_t spa_config_generation = 1; 63 64/* 65 * This can be overridden in userland to preserve an alternate namespace for 66 * userland pools when doing testing. 67 */ 68const char *spa_config_path = ZPOOL_CACHE; 69 70/* 71 * Called when the module is first loaded, this routine loads the configuration 72 * file into the SPA namespace. It does not actually open or load the pools; it 73 * only populates the namespace. 74 */ 75void 76spa_config_load(void) 77{ 78 void *buf = NULL; 79 nvlist_t *nvlist, *child; 80 nvpair_t *nvpair; 81 char *pathname; 82 struct _buf *file; 83 uint64_t fsize; 84 85 /* 86 * Open the configuration file. 87 */ 88 pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP); 89 90 (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path); 91 92 file = kobj_open_file(pathname); 93 94 kmem_free(pathname, MAXPATHLEN); 95 96 if (file == (struct _buf *)-1) 97 return; 98 99 if (kobj_get_filesize(file, &fsize) != 0) 100 goto out; 101 102 buf = kmem_alloc(fsize, KM_SLEEP); 103 104 /* 105 * Read the nvlist from the file. 106 */ 107 if (kobj_read_file(file, buf, fsize, 0) < 0) 108 goto out; 109 110 /* 111 * Unpack the nvlist. 112 */ 113 if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0) 114 goto out; 115 116 /* 117 * Iterate over all elements in the nvlist, creating a new spa_t for 118 * each one with the specified configuration. 119 */ 120 mutex_enter(&spa_namespace_lock); 121 nvpair = NULL; 122 while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) { 123 if (nvpair_type(nvpair) != DATA_TYPE_NVLIST) 124 continue; 125 126 VERIFY(nvpair_value_nvlist(nvpair, &child) == 0); 127 128 if (spa_lookup(nvpair_name(nvpair)) != NULL) 129 continue; 130 (void) spa_add(nvpair_name(nvpair), child, NULL); 131 } 132 mutex_exit(&spa_namespace_lock); 133 134 nvlist_free(nvlist); 135 136out: 137 if (buf != NULL) 138 kmem_free(buf, fsize); 139 140 kobj_close_file(file); 141} 142 143static void 144spa_config_clean(nvlist_t *nvl) 145{ 146 nvlist_t **child; 147 nvlist_t *nvroot = NULL; 148 uint_t c, children; 149 150 if (nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child, 151 &children) == 0) { 152 for (c = 0; c < children; c++) 153 spa_config_clean(child[c]); 154 } 155 156 if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0) 157 spa_config_clean(nvroot); 158 159 nvlist_remove(nvl, ZPOOL_CONFIG_VDEV_STATS, DATA_TYPE_UINT64_ARRAY); 160 nvlist_remove(nvl, ZPOOL_CONFIG_SCAN_STATS, DATA_TYPE_UINT64_ARRAY); 161} 162 163static int 164spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl) 165{ 166 size_t buflen; 167 char *buf; 168 vnode_t *vp; 169 int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX; 170 char *temp; 171 int err; 172 173 /* 174 * If the nvlist is empty (NULL), then remove the old cachefile. 175 */ 176 if (nvl == NULL) { 177 err = vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE); 178 return (err); 179 } 180 181 /* 182 * Pack the configuration into a buffer. 183 */ 184 VERIFY(nvlist_size(nvl, &buflen, NV_ENCODE_XDR) == 0); 185 186 buf = kmem_alloc(buflen, KM_SLEEP); 187 temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP); 188 189 VERIFY(nvlist_pack(nvl, &buf, &buflen, NV_ENCODE_XDR, 190 KM_SLEEP) == 0); 191 192 /* 193 * Write the configuration to disk. We need to do the traditional 194 * 'write to temporary file, sync, move over original' to make sure we 195 * always have a consistent view of the data. 196 */ 197 (void) snprintf(temp, MAXPATHLEN, "%s.tmp", dp->scd_path); 198 199 err = vn_open(temp, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0); 200 if (err == 0) { 201 err = vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE, 202 0, RLIM64_INFINITY, kcred, NULL); 203 if (err == 0) 204 err = VOP_FSYNC(vp, FSYNC, kcred, NULL); 205 if (err == 0) 206 err = vn_rename(temp, dp->scd_path, UIO_SYSSPACE); 207 (void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL); 208 } 209 210 (void) vn_remove(temp, UIO_SYSSPACE, RMFILE); 211 212 kmem_free(buf, buflen); 213 kmem_free(temp, MAXPATHLEN); 214 return (err); 215} 216 217/* 218 * Synchronize pool configuration to disk. This must be called with the 219 * namespace lock held. Synchronizing the pool cache is typically done after 220 * the configuration has been synced to the MOS. This exposes a window where 221 * the MOS config will have been updated but the cache file has not. If 222 * the system were to crash at that instant then the cached config may not 223 * contain the correct information to open the pool and an explicity import 224 * would be required. 225 */ 226void 227spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent) 228{ 229 spa_config_dirent_t *dp, *tdp; 230 nvlist_t *nvl; 231 boolean_t ccw_failure; 232 int error; 233 char *pool_name; 234 235 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 236 237 if (rootdir == NULL || !(spa_mode_global & FWRITE)) 238 return; 239 240 /* 241 * Iterate over all cachefiles for the pool, past or present. When the 242 * cachefile is changed, the new one is pushed onto this list, allowing 243 * us to update previous cachefiles that no longer contain this pool. 244 */ 245 ccw_failure = B_FALSE; 246 for (dp = list_head(&target->spa_config_list); dp != NULL; 247 dp = list_next(&target->spa_config_list, dp)) { 248 spa_t *spa = NULL; 249 if (dp->scd_path == NULL) 250 continue; 251 252 /* 253 * Iterate over all pools, adding any matching pools to 'nvl'. 254 */ 255 nvl = NULL; 256 while ((spa = spa_next(spa)) != NULL) { 257 nvlist_t *nvroot = NULL; 258 /* 259 * Skip over our own pool if we're about to remove 260 * ourselves from the spa namespace or any pool that 261 * is readonly. Since we cannot guarantee that a 262 * readonly pool would successfully import upon reboot, 263 * we don't allow them to be written to the cache file. 264 */ 265 if ((spa == target && removing) || 266 (spa_state(spa) == POOL_STATE_ACTIVE && 267 !spa_writeable(spa))) 268 continue; 269 270 mutex_enter(&spa->spa_props_lock); 271 tdp = list_head(&spa->spa_config_list); 272 if (spa->spa_config == NULL || 273 tdp->scd_path == NULL || 274 strcmp(tdp->scd_path, dp->scd_path) != 0) { 275 mutex_exit(&spa->spa_props_lock); 276 continue; 277 } 278 279 if (nvl == NULL) 280 VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, 281 KM_SLEEP) == 0); 282 283 if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) { 284 pool_name = fnvlist_lookup_string(spa->spa_config, 285 ZPOOL_CONFIG_POOL_NAME); 286 } else { 287 pool_name = spa_name(spa); 288 } 289 290 fnvlist_add_nvlist(nvl, pool_name, 291 spa->spa_config); 292 mutex_exit(&spa->spa_props_lock); 293 294 if (nvlist_lookup_nvlist(nvl, pool_name, &nvroot) == 0) 295 spa_config_clean(nvroot); 296 } 297 298 error = spa_config_write(dp, nvl); 299 if (error != 0) 300 ccw_failure = B_TRUE; 301 nvlist_free(nvl); 302 } 303 304 if (ccw_failure) { 305 /* 306 * Keep trying so that configuration data is 307 * written if/when any temporary filesystem 308 * resource issues are resolved. 309 */ 310 if (target->spa_ccw_fail_time == 0) { 311 zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE, 312 target, NULL, NULL, 0, 0); 313 } 314 target->spa_ccw_fail_time = gethrtime(); 315 spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE); 316 } else { 317 /* 318 * Do not rate limit future attempts to update 319 * the config cache. 320 */ 321 target->spa_ccw_fail_time = 0; 322 } 323 324 /* 325 * Remove any config entries older than the current one. 326 */ 327 dp = list_head(&target->spa_config_list); 328 while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) { 329 list_remove(&target->spa_config_list, tdp); 330 if (tdp->scd_path != NULL) 331 spa_strfree(tdp->scd_path); 332 kmem_free(tdp, sizeof (spa_config_dirent_t)); 333 } 334 335 spa_config_generation++; 336 337 if (postsysevent) 338 spa_event_notify(target, NULL, ESC_ZFS_CONFIG_SYNC); 339} 340 341/* 342 * Sigh. Inside a local zone, we don't have access to /etc/zfs/zpool.cache, 343 * and we don't want to allow the local zone to see all the pools anyway. 344 * So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration 345 * information for all pool visible within the zone. 346 */ 347nvlist_t * 348spa_all_configs(uint64_t *generation) 349{ 350 nvlist_t *pools; 351 spa_t *spa = NULL; 352 353 if (*generation == spa_config_generation) 354 return (NULL); 355 356 VERIFY(nvlist_alloc(&pools, NV_UNIQUE_NAME, KM_SLEEP) == 0); 357 358 mutex_enter(&spa_namespace_lock); 359 while ((spa = spa_next(spa)) != NULL) { 360 if (INGLOBALZONE(curthread) || 361 zone_dataset_visible(spa_name(spa), NULL)) { 362 mutex_enter(&spa->spa_props_lock); 363 VERIFY(nvlist_add_nvlist(pools, spa_name(spa), 364 spa->spa_config) == 0); 365 mutex_exit(&spa->spa_props_lock); 366 } 367 } 368 *generation = spa_config_generation; 369 mutex_exit(&spa_namespace_lock); 370 371 return (pools); 372} 373 374void 375spa_config_set(spa_t *spa, nvlist_t *config) 376{ 377 mutex_enter(&spa->spa_props_lock); 378 nvlist_free(spa->spa_config); 379 spa->spa_config = config; 380 mutex_exit(&spa->spa_props_lock); 381} 382 383/* 384 * Generate the pool's configuration based on the current in-core state. 385 * 386 * We infer whether to generate a complete config or just one top-level config 387 * based on whether vd is the root vdev. 388 */ 389nvlist_t * 390spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats) 391{ 392 nvlist_t *config, *nvroot; 393 vdev_t *rvd = spa->spa_root_vdev; 394 unsigned long hostid = 0; 395 boolean_t locked = B_FALSE; 396 uint64_t split_guid; 397 char *pool_name; 398 399 if (vd == NULL) { 400 vd = rvd; 401 locked = B_TRUE; 402 spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER); 403 } 404 405 ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) == 406 (SCL_CONFIG | SCL_STATE)); 407 408 /* 409 * If txg is -1, report the current value of spa->spa_config_txg. 410 */ 411 if (txg == -1ULL) 412 txg = spa->spa_config_txg; 413 414 /* 415 * Originally, users had to handle spa namespace collisions by either 416 * exporting the already imported pool or by specifying a new name for 417 * the pool with a conflicting name. In the case of root pools from 418 * virtual guests, neither approach to collision resolution is 419 * reasonable. This is addressed by extending the new name syntax with 420 * an option to specify that the new name is temporary. When specified, 421 * ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us 422 * to use the previous name, which we do below. 423 */ 424 if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) { 425 pool_name = fnvlist_lookup_string(spa->spa_config, 426 ZPOOL_CONFIG_POOL_NAME); 427 } else { 428 pool_name = spa_name(spa); 429 } 430 431 config = fnvlist_alloc(); 432 433 fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa)); 434 fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, pool_name); 435 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa)); 436 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg); 437 fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa)); 438 if (spa->spa_comment != NULL) { 439 fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT, 440 spa->spa_comment); 441 } 442 443 444#ifdef _KERNEL 445 hostid = zone_get_hostid(NULL); 446#else /* _KERNEL */ 447 /* 448 * We're emulating the system's hostid in userland, so we can't use 449 * zone_get_hostid(). 450 */ 451 (void) ddi_strtoul(hw_serial, NULL, 10, &hostid); 452#endif /* _KERNEL */ 453 if (hostid != 0) { 454 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, 455 hostid) == 0); 456 } 457 VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, 458 utsname.nodename) == 0); 459 460 if (vd != rvd) { 461 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID, 462 vd->vdev_top->vdev_guid) == 0); 463 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_GUID, 464 vd->vdev_guid) == 0); 465 if (vd->vdev_isspare) 466 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_SPARE, 467 1ULL) == 0); 468 if (vd->vdev_islog) 469 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_IS_LOG, 470 1ULL) == 0); 471 vd = vd->vdev_top; /* label contains top config */ 472 } else { 473 /* 474 * Only add the (potentially large) split information 475 * in the mos config, and not in the vdev labels 476 */ 477 if (spa->spa_config_splitting != NULL) 478 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT, 479 spa->spa_config_splitting) == 0); 480 } 481 482 /* 483 * Add the top-level config. We even add this on pools which 484 * don't support holes in the namespace. 485 */ 486 vdev_top_config_generate(spa, config); 487 488 /* 489 * If we're splitting, record the original pool's guid. 490 */ 491 if (spa->spa_config_splitting != NULL && 492 nvlist_lookup_uint64(spa->spa_config_splitting, 493 ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) { 494 VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, 495 split_guid) == 0); 496 } 497 498 nvroot = vdev_config_generate(spa, vd, getstats, 0); 499 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot) == 0); 500 nvlist_free(nvroot); 501 502 /* 503 * Store what's necessary for reading the MOS in the label. 504 */ 505 VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, 506 spa->spa_label_features) == 0); 507 508 if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) { 509 ddt_histogram_t *ddh; 510 ddt_stat_t *dds; 511 ddt_object_t *ddo; 512 513 ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP); 514 ddt_get_dedup_histogram(spa, ddh); 515 VERIFY(nvlist_add_uint64_array(config, 516 ZPOOL_CONFIG_DDT_HISTOGRAM, 517 (uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t)) == 0); 518 kmem_free(ddh, sizeof (ddt_histogram_t)); 519 520 ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP); 521 ddt_get_dedup_object_stats(spa, ddo); 522 VERIFY(nvlist_add_uint64_array(config, 523 ZPOOL_CONFIG_DDT_OBJ_STATS, 524 (uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t)) == 0); 525 kmem_free(ddo, sizeof (ddt_object_t)); 526 527 dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP); 528 ddt_get_dedup_stats(spa, dds); 529 VERIFY(nvlist_add_uint64_array(config, 530 ZPOOL_CONFIG_DDT_STATS, 531 (uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t)) == 0); 532 kmem_free(dds, sizeof (ddt_stat_t)); 533 } 534 535 if (locked) 536 spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG); 537 538 return (config); 539} 540 541/* 542 * Update all disk labels, generate a fresh config based on the current 543 * in-core state, and sync the global config cache (do not sync the config 544 * cache if this is a booting rootpool). 545 */ 546void 547spa_config_update(spa_t *spa, int what) 548{ 549 vdev_t *rvd = spa->spa_root_vdev; 550 uint64_t txg; 551 int c; 552 553 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 554 555 spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); 556 txg = spa_last_synced_txg(spa) + 1; 557 if (what == SPA_CONFIG_UPDATE_POOL) { 558 vdev_config_dirty(rvd); 559 } else { 560 /* 561 * If we have top-level vdevs that were added but have 562 * not yet been prepared for allocation, do that now. 563 * (It's safe now because the config cache is up to date, 564 * so it will be able to translate the new DVAs.) 565 * See comments in spa_vdev_add() for full details. 566 */ 567 for (c = 0; c < rvd->vdev_children; c++) { 568 vdev_t *tvd = rvd->vdev_child[c]; 569 if (tvd->vdev_ms_array == 0) { 570 vdev_ashift_optimize(tvd); 571 vdev_metaslab_set_size(tvd); 572 } 573 vdev_expand(tvd, txg); 574 } 575 } 576 spa_config_exit(spa, SCL_ALL, FTAG); 577 578 /* 579 * Wait for the mosconfig to be regenerated and synced. 580 */ 581 txg_wait_synced(spa->spa_dsl_pool, txg); 582 583 /* 584 * Update the global config cache to reflect the new mosconfig. 585 */ 586 spa_config_sync(spa, B_FALSE, what != SPA_CONFIG_UPDATE_POOL); 587 588 if (what == SPA_CONFIG_UPDATE_POOL) 589 spa_config_update(spa, SPA_CONFIG_UPDATE_VDEVS); 590} 591