vdev_removal.c revision 339106
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22/* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 */ 26 27#include <sys/zfs_context.h> 28#include <sys/spa_impl.h> 29#include <sys/dmu.h> 30#include <sys/dmu_tx.h> 31#include <sys/zap.h> 32#include <sys/vdev_impl.h> 33#include <sys/metaslab.h> 34#include <sys/metaslab_impl.h> 35#include <sys/uberblock_impl.h> 36#include <sys/txg.h> 37#include <sys/avl.h> 38#include <sys/bpobj.h> 39#include <sys/dsl_pool.h> 40#include <sys/dsl_synctask.h> 41#include <sys/dsl_dir.h> 42#include <sys/arc.h> 43#include <sys/zfeature.h> 44#include <sys/vdev_indirect_births.h> 45#include <sys/vdev_indirect_mapping.h> 46#include <sys/abd.h> 47 48/* 49 * This file contains the necessary logic to remove vdevs from a 50 * storage pool. Currently, the only devices that can be removed 51 * are log, cache, and spare devices; and top level vdevs from a pool 52 * w/o raidz. (Note that members of a mirror can also be removed 53 * by the detach operation.) 54 * 55 * Log vdevs are removed by evacuating them and then turning the vdev 56 * into a hole vdev while holding spa config locks. 57 * 58 * Top level vdevs are removed and converted into an indirect vdev via 59 * a multi-step process: 60 * 61 * - Disable allocations from this device (spa_vdev_remove_top). 62 * 63 * - From a new thread (spa_vdev_remove_thread), copy data from 64 * the removing vdev to a different vdev. The copy happens in open 65 * context (spa_vdev_copy_impl) and issues a sync task 66 * (vdev_mapping_sync) so the sync thread can update the partial 67 * indirect mappings in core and on disk. 68 * 69 * - If a free happens during a removal, it is freed from the 70 * removing vdev, and if it has already been copied, from the new 71 * location as well (free_from_removing_vdev). 72 * 73 * - After the removal is completed, the copy thread converts the vdev 74 * into an indirect vdev (vdev_remove_complete) before instructing 75 * the sync thread to destroy the space maps and finish the removal 76 * (spa_finish_removal). 77 */ 78 79typedef struct vdev_copy_arg { 80 metaslab_t *vca_msp; 81 uint64_t vca_outstanding_bytes; 82 kcondvar_t vca_cv; 83 kmutex_t vca_lock; 84} vdev_copy_arg_t; 85 86/* 87 * The maximum amount of memory we can use for outstanding i/o while 88 * doing a device removal. This determines how much i/o we can have 89 * in flight concurrently. 90 */ 91int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; 92 93/* 94 * The largest contiguous segment that we will attempt to allocate when 95 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If 96 * there is a performance problem with attempting to allocate large blocks, 97 * consider decreasing this. 98 * 99 * Note: we will issue I/Os of up to this size. The mpt driver does not 100 * respond well to I/Os larger than 1MB, so we set this to 1MB. (When 101 * mpt processes an I/O larger than 1MB, it needs to do an allocation of 102 * 2 physically contiguous pages; if this allocation fails, mpt will drop 103 * the I/O and hang the device.) 104 */ 105int zfs_remove_max_segment = 1024 * 1024; 106 107/* 108 * This is used by the test suite so that it can ensure that certain 109 * actions happen while in the middle of a removal. 110 */ 111uint64_t zfs_remove_max_bytes_pause = UINT64_MAX; 112 113#define VDEV_REMOVAL_ZAP_OBJS "lzap" 114 115static void spa_vdev_remove_thread(void *arg); 116 117static void 118spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) 119{ 120 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, 121 DMU_POOL_DIRECTORY_OBJECT, 122 DMU_POOL_REMOVING, sizeof (uint64_t), 123 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 124 &spa->spa_removing_phys, tx)); 125} 126 127static nvlist_t * 128spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) 129{ 130 for (int i = 0; i < count; i++) { 131 uint64_t guid = 132 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); 133 134 if (guid == target_guid) 135 return (nvpp[i]); 136 } 137 138 return (NULL); 139} 140 141static void 142spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, 143 nvlist_t *dev_to_remove) 144{ 145 nvlist_t **newdev = NULL; 146 147 if (count > 1) 148 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); 149 150 for (int i = 0, j = 0; i < count; i++) { 151 if (dev[i] == dev_to_remove) 152 continue; 153 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); 154 } 155 156 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); 157 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); 158 159 for (int i = 0; i < count - 1; i++) 160 nvlist_free(newdev[i]); 161 162 if (count > 1) 163 kmem_free(newdev, (count - 1) * sizeof (void *)); 164} 165 166static spa_vdev_removal_t * 167spa_vdev_removal_create(vdev_t *vd) 168{ 169 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); 170 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); 171 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); 172 svr->svr_allocd_segs = range_tree_create(NULL, NULL); 173 svr->svr_vdev_id = vd->vdev_id; 174 175 for (int i = 0; i < TXG_SIZE; i++) { 176 svr->svr_frees[i] = range_tree_create(NULL, NULL); 177 list_create(&svr->svr_new_segments[i], 178 sizeof (vdev_indirect_mapping_entry_t), 179 offsetof(vdev_indirect_mapping_entry_t, vime_node)); 180 } 181 182 return (svr); 183} 184 185void 186spa_vdev_removal_destroy(spa_vdev_removal_t *svr) 187{ 188 for (int i = 0; i < TXG_SIZE; i++) { 189 ASSERT0(svr->svr_bytes_done[i]); 190 ASSERT0(svr->svr_max_offset_to_sync[i]); 191 range_tree_destroy(svr->svr_frees[i]); 192 list_destroy(&svr->svr_new_segments[i]); 193 } 194 195 range_tree_destroy(svr->svr_allocd_segs); 196 mutex_destroy(&svr->svr_lock); 197 cv_destroy(&svr->svr_cv); 198 kmem_free(svr, sizeof (*svr)); 199} 200 201/* 202 * This is called as a synctask in the txg in which we will mark this vdev 203 * as removing (in the config stored in the MOS). 204 * 205 * It begins the evacuation of a toplevel vdev by: 206 * - initializing the spa_removing_phys which tracks this removal 207 * - computing the amount of space to remove for accounting purposes 208 * - dirtying all dbufs in the spa_config_object 209 * - creating the spa_vdev_removal 210 * - starting the spa_vdev_remove_thread 211 */ 212static void 213vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) 214{ 215 int vdev_id = (uintptr_t)arg; 216 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 217 vdev_t *vd = vdev_lookup_top(spa, vdev_id); 218 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 219 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; 220 spa_vdev_removal_t *svr = NULL; 221 uint64_t txg = dmu_tx_get_txg(tx); 222 223 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 224 svr = spa_vdev_removal_create(vd); 225 226 ASSERT(vd->vdev_removing); 227 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); 228 229 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 230 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { 231 /* 232 * By activating the OBSOLETE_COUNTS feature, we prevent 233 * the pool from being downgraded and ensure that the 234 * refcounts are precise. 235 */ 236 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 237 uint64_t one = 1; 238 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, 239 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, 240 &one, tx)); 241 ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0); 242 } 243 244 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); 245 vd->vdev_indirect_mapping = 246 vdev_indirect_mapping_open(mos, vic->vic_mapping_object); 247 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); 248 vd->vdev_indirect_births = 249 vdev_indirect_births_open(mos, vic->vic_births_object); 250 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; 251 spa->spa_removing_phys.sr_start_time = gethrestime_sec(); 252 spa->spa_removing_phys.sr_end_time = 0; 253 spa->spa_removing_phys.sr_state = DSS_SCANNING; 254 spa->spa_removing_phys.sr_to_copy = 0; 255 spa->spa_removing_phys.sr_copied = 0; 256 257 /* 258 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because 259 * there may be space in the defer tree, which is free, but still 260 * counted in vs_alloc. 261 */ 262 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { 263 metaslab_t *ms = vd->vdev_ms[i]; 264 if (ms->ms_sm == NULL) 265 continue; 266 267 /* 268 * Sync tasks happen before metaslab_sync(), therefore 269 * smp_alloc and sm_alloc must be the same. 270 */ 271 ASSERT3U(space_map_allocated(ms->ms_sm), ==, 272 ms->ms_sm->sm_phys->smp_alloc); 273 274 spa->spa_removing_phys.sr_to_copy += 275 space_map_allocated(ms->ms_sm); 276 277 /* 278 * Space which we are freeing this txg does not need to 279 * be copied. 280 */ 281 spa->spa_removing_phys.sr_to_copy -= 282 range_tree_space(ms->ms_freeing); 283 284 ASSERT0(range_tree_space(ms->ms_freed)); 285 for (int t = 0; t < TXG_SIZE; t++) 286 ASSERT0(range_tree_space(ms->ms_allocating[t])); 287 } 288 289 /* 290 * Sync tasks are called before metaslab_sync(), so there should 291 * be no already-synced metaslabs in the TXG_CLEAN list. 292 */ 293 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); 294 295 spa_sync_removing_state(spa, tx); 296 297 /* 298 * All blocks that we need to read the most recent mapping must be 299 * stored on concrete vdevs. Therefore, we must dirty anything that 300 * is read before spa_remove_init(). Specifically, the 301 * spa_config_object. (Note that although we already modified the 302 * spa_config_object in spa_sync_removing_state, that may not have 303 * modified all blocks of the object.) 304 */ 305 dmu_object_info_t doi; 306 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); 307 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { 308 dmu_buf_t *dbuf; 309 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, 310 offset, FTAG, &dbuf, 0)); 311 dmu_buf_will_dirty(dbuf, tx); 312 offset += dbuf->db_size; 313 dmu_buf_rele(dbuf, FTAG); 314 } 315 316 /* 317 * Now that we've allocated the im_object, dirty the vdev to ensure 318 * that the object gets written to the config on disk. 319 */ 320 vdev_config_dirty(vd); 321 322 zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu " 323 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx), 324 vic->vic_mapping_object); 325 326 spa_history_log_internal(spa, "vdev remove started", tx, 327 "%s vdev %llu %s", spa_name(spa), vd->vdev_id, 328 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 329 /* 330 * Setting spa_vdev_removal causes subsequent frees to call 331 * free_from_removing_vdev(). Note that we don't need any locking 332 * because we are the sync thread, and metaslab_free_impl() is only 333 * called from syncing context (potentially from a zio taskq thread, 334 * but in any case only when there are outstanding free i/os, which 335 * there are not). 336 */ 337 ASSERT3P(spa->spa_vdev_removal, ==, NULL); 338 spa->spa_vdev_removal = svr; 339 svr->svr_thread = thread_create(NULL, 0, 340 spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri); 341} 342 343/* 344 * When we are opening a pool, we must read the mapping for each 345 * indirect vdev in order from most recently removed to least 346 * recently removed. We do this because the blocks for the mapping 347 * of older indirect vdevs may be stored on more recently removed vdevs. 348 * In order to read each indirect mapping object, we must have 349 * initialized all more recently removed vdevs. 350 */ 351int 352spa_remove_init(spa_t *spa) 353{ 354 int error; 355 356 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, 357 DMU_POOL_DIRECTORY_OBJECT, 358 DMU_POOL_REMOVING, sizeof (uint64_t), 359 sizeof (spa->spa_removing_phys) / sizeof (uint64_t), 360 &spa->spa_removing_phys); 361 362 if (error == ENOENT) { 363 spa->spa_removing_phys.sr_state = DSS_NONE; 364 spa->spa_removing_phys.sr_removing_vdev = -1; 365 spa->spa_removing_phys.sr_prev_indirect_vdev = -1; 366 spa->spa_indirect_vdevs_loaded = B_TRUE; 367 return (0); 368 } else if (error != 0) { 369 return (error); 370 } 371 372 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { 373 /* 374 * We are currently removing a vdev. Create and 375 * initialize a spa_vdev_removal_t from the bonus 376 * buffer of the removing vdevs vdev_im_object, and 377 * initialize its partial mapping. 378 */ 379 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 380 vdev_t *vd = vdev_lookup_top(spa, 381 spa->spa_removing_phys.sr_removing_vdev); 382 383 if (vd == NULL) { 384 spa_config_exit(spa, SCL_STATE, FTAG); 385 return (EINVAL); 386 } 387 388 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 389 390 ASSERT(vdev_is_concrete(vd)); 391 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); 392 ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id); 393 ASSERT(vd->vdev_removing); 394 395 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 396 spa->spa_meta_objset, vic->vic_mapping_object); 397 vd->vdev_indirect_births = vdev_indirect_births_open( 398 spa->spa_meta_objset, vic->vic_births_object); 399 spa_config_exit(spa, SCL_STATE, FTAG); 400 401 spa->spa_vdev_removal = svr; 402 } 403 404 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); 405 uint64_t indirect_vdev_id = 406 spa->spa_removing_phys.sr_prev_indirect_vdev; 407 while (indirect_vdev_id != UINT64_MAX) { 408 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); 409 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 410 411 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 412 vd->vdev_indirect_mapping = vdev_indirect_mapping_open( 413 spa->spa_meta_objset, vic->vic_mapping_object); 414 vd->vdev_indirect_births = vdev_indirect_births_open( 415 spa->spa_meta_objset, vic->vic_births_object); 416 417 indirect_vdev_id = vic->vic_prev_indirect_vdev; 418 } 419 spa_config_exit(spa, SCL_STATE, FTAG); 420 421 /* 422 * Now that we've loaded all the indirect mappings, we can allow 423 * reads from other blocks (e.g. via predictive prefetch). 424 */ 425 spa->spa_indirect_vdevs_loaded = B_TRUE; 426 return (0); 427} 428 429void 430spa_restart_removal(spa_t *spa) 431{ 432 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 433 434 if (svr == NULL) 435 return; 436 437 /* 438 * In general when this function is called there is no 439 * removal thread running. The only scenario where this 440 * is not true is during spa_import() where this function 441 * is called twice [once from spa_import_impl() and 442 * spa_async_resume()]. Thus, in the scenario where we 443 * import a pool that has an ongoing removal we don't 444 * want to spawn a second thread. 445 */ 446 if (svr->svr_thread != NULL) 447 return; 448 449 if (!spa_writeable(spa)) 450 return; 451 452 zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id); 453 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa, 454 0, &p0, TS_RUN, minclsyspri); 455} 456 457/* 458 * Process freeing from a device which is in the middle of being removed. 459 * We must handle this carefully so that we attempt to copy freed data, 460 * and we correctly free already-copied data. 461 */ 462void 463free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size) 464{ 465 spa_t *spa = vd->vdev_spa; 466 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 467 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 468 uint64_t txg = spa_syncing_txg(spa); 469 uint64_t max_offset_yet = 0; 470 471 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 472 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, 473 vdev_indirect_mapping_object(vim)); 474 ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id); 475 476 mutex_enter(&svr->svr_lock); 477 478 /* 479 * Remove the segment from the removing vdev's spacemap. This 480 * ensures that we will not attempt to copy this space (if the 481 * removal thread has not yet visited it), and also ensures 482 * that we know what is actually allocated on the new vdevs 483 * (needed if we cancel the removal). 484 * 485 * Note: we must do the metaslab_free_concrete() with the svr_lock 486 * held, so that the remove_thread can not load this metaslab and then 487 * visit this offset between the time that we metaslab_free_concrete() 488 * and when we check to see if it has been visited. 489 * 490 * Note: The checkpoint flag is set to false as having/taking 491 * a checkpoint and removing a device can't happen at the same 492 * time. 493 */ 494 ASSERT(!spa_has_checkpoint(spa)); 495 metaslab_free_concrete(vd, offset, size, B_FALSE); 496 497 uint64_t synced_size = 0; 498 uint64_t synced_offset = 0; 499 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); 500 if (offset < max_offset_synced) { 501 /* 502 * The mapping for this offset is already on disk. 503 * Free from the new location. 504 * 505 * Note that we use svr_max_synced_offset because it is 506 * updated atomically with respect to the in-core mapping. 507 * By contrast, vim_max_offset is not. 508 * 509 * This block may be split between a synced entry and an 510 * in-flight or unvisited entry. Only process the synced 511 * portion of it here. 512 */ 513 synced_size = MIN(size, max_offset_synced - offset); 514 synced_offset = offset; 515 516 ASSERT3U(max_offset_yet, <=, max_offset_synced); 517 max_offset_yet = max_offset_synced; 518 519 DTRACE_PROBE3(remove__free__synced, 520 spa_t *, spa, 521 uint64_t, offset, 522 uint64_t, synced_size); 523 524 size -= synced_size; 525 offset += synced_size; 526 } 527 528 /* 529 * Look at all in-flight txgs starting from the currently syncing one 530 * and see if a section of this free is being copied. By starting from 531 * this txg and iterating forward, we might find that this region 532 * was copied in two different txgs and handle it appropriately. 533 */ 534 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { 535 int txgoff = (txg + i) & TXG_MASK; 536 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { 537 /* 538 * The mapping for this offset is in flight, and 539 * will be synced in txg+i. 540 */ 541 uint64_t inflight_size = MIN(size, 542 svr->svr_max_offset_to_sync[txgoff] - offset); 543 544 DTRACE_PROBE4(remove__free__inflight, 545 spa_t *, spa, 546 uint64_t, offset, 547 uint64_t, inflight_size, 548 uint64_t, txg + i); 549 550 /* 551 * We copy data in order of increasing offset. 552 * Therefore the max_offset_to_sync[] must increase 553 * (or be zero, indicating that nothing is being 554 * copied in that txg). 555 */ 556 if (svr->svr_max_offset_to_sync[txgoff] != 0) { 557 ASSERT3U(svr->svr_max_offset_to_sync[txgoff], 558 >=, max_offset_yet); 559 max_offset_yet = 560 svr->svr_max_offset_to_sync[txgoff]; 561 } 562 563 /* 564 * We've already committed to copying this segment: 565 * we have allocated space elsewhere in the pool for 566 * it and have an IO outstanding to copy the data. We 567 * cannot free the space before the copy has 568 * completed, or else the copy IO might overwrite any 569 * new data. To free that space, we record the 570 * segment in the appropriate svr_frees tree and free 571 * the mapped space later, in the txg where we have 572 * completed the copy and synced the mapping (see 573 * vdev_mapping_sync). 574 */ 575 range_tree_add(svr->svr_frees[txgoff], 576 offset, inflight_size); 577 size -= inflight_size; 578 offset += inflight_size; 579 580 /* 581 * This space is already accounted for as being 582 * done, because it is being copied in txg+i. 583 * However, if i!=0, then it is being copied in 584 * a future txg. If we crash after this txg 585 * syncs but before txg+i syncs, then the space 586 * will be free. Therefore we must account 587 * for the space being done in *this* txg 588 * (when it is freed) rather than the future txg 589 * (when it will be copied). 590 */ 591 ASSERT3U(svr->svr_bytes_done[txgoff], >=, 592 inflight_size); 593 svr->svr_bytes_done[txgoff] -= inflight_size; 594 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; 595 } 596 } 597 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); 598 599 if (size > 0) { 600 /* 601 * The copy thread has not yet visited this offset. Ensure 602 * that it doesn't. 603 */ 604 605 DTRACE_PROBE3(remove__free__unvisited, 606 spa_t *, spa, 607 uint64_t, offset, 608 uint64_t, size); 609 610 if (svr->svr_allocd_segs != NULL) 611 range_tree_clear(svr->svr_allocd_segs, offset, size); 612 613 /* 614 * Since we now do not need to copy this data, for 615 * accounting purposes we have done our job and can count 616 * it as completed. 617 */ 618 svr->svr_bytes_done[txg & TXG_MASK] += size; 619 } 620 mutex_exit(&svr->svr_lock); 621 622 /* 623 * Now that we have dropped svr_lock, process the synced portion 624 * of this free. 625 */ 626 if (synced_size > 0) { 627 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size); 628 629 /* 630 * Note: this can only be called from syncing context, 631 * and the vdev_indirect_mapping is only changed from the 632 * sync thread, so we don't need svr_lock while doing 633 * metaslab_free_impl_cb. 634 */ 635 boolean_t checkpoint = B_FALSE; 636 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, 637 metaslab_free_impl_cb, &checkpoint); 638 } 639} 640 641/* 642 * Stop an active removal and update the spa_removing phys. 643 */ 644static void 645spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) 646{ 647 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 648 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); 649 650 /* Ensure the removal thread has completed before we free the svr. */ 651 spa_vdev_remove_suspend(spa); 652 653 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); 654 655 if (state == DSS_FINISHED) { 656 spa_removing_phys_t *srp = &spa->spa_removing_phys; 657 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 658 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 659 660 if (srp->sr_prev_indirect_vdev != UINT64_MAX) { 661 vdev_t *pvd = vdev_lookup_top(spa, 662 srp->sr_prev_indirect_vdev); 663 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); 664 } 665 666 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; 667 srp->sr_prev_indirect_vdev = vd->vdev_id; 668 } 669 spa->spa_removing_phys.sr_state = state; 670 spa->spa_removing_phys.sr_end_time = gethrestime_sec(); 671 672 spa->spa_vdev_removal = NULL; 673 spa_vdev_removal_destroy(svr); 674 675 spa_sync_removing_state(spa, tx); 676 677 vdev_config_dirty(spa->spa_root_vdev); 678} 679 680static void 681free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) 682{ 683 vdev_t *vd = arg; 684 vdev_indirect_mark_obsolete(vd, offset, size); 685 boolean_t checkpoint = B_FALSE; 686 vdev_indirect_ops.vdev_op_remap(vd, offset, size, 687 metaslab_free_impl_cb, &checkpoint); 688} 689 690/* 691 * On behalf of the removal thread, syncs an incremental bit more of 692 * the indirect mapping to disk and updates the in-memory mapping. 693 * Called as a sync task in every txg that the removal thread makes progress. 694 */ 695static void 696vdev_mapping_sync(void *arg, dmu_tx_t *tx) 697{ 698 spa_vdev_removal_t *svr = arg; 699 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 700 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 701 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 702 uint64_t txg = dmu_tx_get_txg(tx); 703 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 704 705 ASSERT(vic->vic_mapping_object != 0); 706 ASSERT3U(txg, ==, spa_syncing_txg(spa)); 707 708 vdev_indirect_mapping_add_entries(vim, 709 &svr->svr_new_segments[txg & TXG_MASK], tx); 710 vdev_indirect_births_add_entry(vd->vdev_indirect_births, 711 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); 712 713 /* 714 * Free the copied data for anything that was freed while the 715 * mapping entries were in flight. 716 */ 717 mutex_enter(&svr->svr_lock); 718 range_tree_vacate(svr->svr_frees[txg & TXG_MASK], 719 free_mapped_segment_cb, vd); 720 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, 721 vdev_indirect_mapping_max_offset(vim)); 722 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; 723 mutex_exit(&svr->svr_lock); 724 725 spa_sync_removing_state(spa, tx); 726} 727 728/* 729 * All reads and writes associated with a call to spa_vdev_copy_segment() 730 * are done. 731 */ 732static void 733spa_vdev_copy_nullzio_done(zio_t *zio) 734{ 735 spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); 736} 737 738/* 739 * The write of the new location is done. 740 */ 741static void 742spa_vdev_copy_segment_write_done(zio_t *zio) 743{ 744 vdev_copy_arg_t *vca = zio->io_private; 745 746 abd_free(zio->io_abd); 747 748 mutex_enter(&vca->vca_lock); 749 vca->vca_outstanding_bytes -= zio->io_size; 750 cv_signal(&vca->vca_cv); 751 mutex_exit(&vca->vca_lock); 752} 753 754/* 755 * The read of the old location is done. The parent zio is the write to 756 * the new location. Allow it to start. 757 */ 758static void 759spa_vdev_copy_segment_read_done(zio_t *zio) 760{ 761 zio_nowait(zio_unique_parent(zio)); 762} 763 764/* 765 * If the old and new vdevs are mirrors, we will read both sides of the old 766 * mirror, and write each copy to the corresponding side of the new mirror. 767 * If the old and new vdevs have a different number of children, we will do 768 * this as best as possible. Since we aren't verifying checksums, this 769 * ensures that as long as there's a good copy of the data, we'll have a 770 * good copy after the removal, even if there's silent damage to one side 771 * of the mirror. If we're removing a mirror that has some silent damage, 772 * we'll have exactly the same damage in the new location (assuming that 773 * the new location is also a mirror). 774 * 775 * We accomplish this by creating a tree of zio_t's, with as many writes as 776 * there are "children" of the new vdev (a non-redundant vdev counts as one 777 * child, a 2-way mirror has 2 children, etc). Each write has an associated 778 * read from a child of the old vdev. Typically there will be the same 779 * number of children of the old and new vdevs. However, if there are more 780 * children of the new vdev, some child(ren) of the old vdev will be issued 781 * multiple reads. If there are more children of the old vdev, some copies 782 * will be dropped. 783 * 784 * For example, the tree of zio_t's for a 2-way mirror is: 785 * 786 * null 787 * / \ 788 * write(new vdev, child 0) write(new vdev, child 1) 789 * | | 790 * read(old vdev, child 0) read(old vdev, child 1) 791 * 792 * Child zio's complete before their parents complete. However, zio's 793 * created with zio_vdev_child_io() may be issued before their children 794 * complete. In this case we need to make sure that the children (reads) 795 * complete before the parents (writes) are *issued*. We do this by not 796 * calling zio_nowait() on each write until its corresponding read has 797 * completed. 798 * 799 * The spa_config_lock must be held while zio's created by 800 * zio_vdev_child_io() are in progress, to ensure that the vdev tree does 801 * not change (e.g. due to a concurrent "zpool attach/detach"). The "null" 802 * zio is needed to release the spa_config_lock after all the reads and 803 * writes complete. (Note that we can't grab the config lock for each read, 804 * because it is not reentrant - we could deadlock with a thread waiting 805 * for a write lock.) 806 */ 807static void 808spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio, 809 vdev_t *source_vd, uint64_t source_offset, 810 vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size) 811{ 812 ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0); 813 814 mutex_enter(&vca->vca_lock); 815 vca->vca_outstanding_bytes += size; 816 mutex_exit(&vca->vca_lock); 817 818 abd_t *abd = abd_alloc_for_io(size, B_FALSE); 819 820 vdev_t *source_child_vd; 821 if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) { 822 /* 823 * Source and dest are both mirrors. Copy from the same 824 * child id as we are copying to (wrapping around if there 825 * are more dest children than source children). 826 */ 827 source_child_vd = 828 source_vd->vdev_child[dest_id % source_vd->vdev_children]; 829 } else { 830 source_child_vd = source_vd; 831 } 832 833 zio_t *write_zio = zio_vdev_child_io(nzio, NULL, 834 dest_child_vd, dest_offset, abd, size, 835 ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL, 836 ZIO_FLAG_CANFAIL, 837 spa_vdev_copy_segment_write_done, vca); 838 839 zio_nowait(zio_vdev_child_io(write_zio, NULL, 840 source_child_vd, source_offset, abd, size, 841 ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, 842 ZIO_FLAG_CANFAIL, 843 spa_vdev_copy_segment_read_done, vca)); 844} 845 846/* 847 * Allocate a new location for this segment, and create the zio_t's to 848 * read from the old location and write to the new location. 849 */ 850static int 851spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg, 852 vdev_copy_arg_t *vca, zio_alloc_list_t *zal) 853{ 854 metaslab_group_t *mg = vd->vdev_mg; 855 spa_t *spa = vd->vdev_spa; 856 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 857 vdev_indirect_mapping_entry_t *entry; 858 dva_t dst = { 0 }; 859 860 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE); 861 862 /* 863 * We use allocator 0 for this I/O because we don't expect device remap 864 * to be the steady state of the system, so parallelizing is not as 865 * critical as it is for other allocation types. We also want to ensure 866 * that the IOs are allocated together as much as possible, to reduce 867 * mapping sizes. 868 */ 869 int error = metaslab_alloc_dva(spa, mg->mg_class, size, 870 &dst, 0, NULL, txg, 0, zal, 0); 871 if (error != 0) 872 return (error); 873 874 /* 875 * We can't have any padding of the allocated size, otherwise we will 876 * misunderstand what's allocated, and the size of the mapping. 877 * The caller ensures this will be true by passing in a size that is 878 * aligned to the worst (highest) ashift in the pool. 879 */ 880 ASSERT3U(DVA_GET_ASIZE(&dst), ==, size); 881 882 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); 883 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); 884 entry->vime_mapping.vimep_dst = dst; 885 886 /* 887 * See comment before spa_vdev_copy_one_child(). 888 */ 889 spa_config_enter(spa, SCL_STATE, spa, RW_READER); 890 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, 891 spa_vdev_copy_nullzio_done, NULL, 0); 892 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst)); 893 if (dest_vd->vdev_ops == &vdev_mirror_ops) { 894 for (int i = 0; i < dest_vd->vdev_children; i++) { 895 vdev_t *child = dest_vd->vdev_child[i]; 896 spa_vdev_copy_one_child(vca, nzio, vd, start, 897 child, DVA_GET_OFFSET(&dst), i, size); 898 } 899 } else { 900 spa_vdev_copy_one_child(vca, nzio, vd, start, 901 dest_vd, DVA_GET_OFFSET(&dst), -1, size); 902 } 903 zio_nowait(nzio); 904 905 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); 906 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); 907 vdev_dirty(vd, 0, NULL, txg); 908 909 return (0); 910} 911 912/* 913 * Complete the removal of a toplevel vdev. This is called as a 914 * synctask in the same txg that we will sync out the new config (to the 915 * MOS object) which indicates that this vdev is indirect. 916 */ 917static void 918vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) 919{ 920 spa_vdev_removal_t *svr = arg; 921 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 922 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 923 924 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 925 926 for (int i = 0; i < TXG_SIZE; i++) { 927 ASSERT0(svr->svr_bytes_done[i]); 928 } 929 930 ASSERT3U(spa->spa_removing_phys.sr_copied, ==, 931 spa->spa_removing_phys.sr_to_copy); 932 933 vdev_destroy_spacemaps(vd, tx); 934 935 /* destroy leaf zaps, if any */ 936 ASSERT3P(svr->svr_zaplist, !=, NULL); 937 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); 938 pair != NULL; 939 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { 940 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); 941 } 942 fnvlist_free(svr->svr_zaplist); 943 944 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); 945 /* vd->vdev_path is not available here */ 946 spa_history_log_internal(spa, "vdev remove completed", tx, 947 "%s vdev %llu", spa_name(spa), vd->vdev_id); 948} 949 950static void 951vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) 952{ 953 ASSERT3P(zlist, !=, NULL); 954 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); 955 956 if (vd->vdev_leaf_zap != 0) { 957 char zkey[32]; 958 (void) snprintf(zkey, sizeof (zkey), "%s-%ju", 959 VDEV_REMOVAL_ZAP_OBJS, (uintmax_t)vd->vdev_leaf_zap); 960 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); 961 } 962 963 for (uint64_t id = 0; id < vd->vdev_children; id++) { 964 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); 965 } 966} 967 968static void 969vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) 970{ 971 vdev_t *ivd; 972 dmu_tx_t *tx; 973 spa_t *spa = vd->vdev_spa; 974 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 975 976 /* 977 * First, build a list of leaf zaps to be destroyed. 978 * This is passed to the sync context thread, 979 * which does the actual unlinking. 980 */ 981 svr->svr_zaplist = fnvlist_alloc(); 982 vdev_remove_enlist_zaps(vd, svr->svr_zaplist); 983 984 ivd = vdev_add_parent(vd, &vdev_indirect_ops); 985 ivd->vdev_removing = 0; 986 987 vd->vdev_leaf_zap = 0; 988 989 vdev_remove_child(ivd, vd); 990 vdev_compact_children(ivd); 991 992 ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); 993 994 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); 995 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr, 996 0, ZFS_SPACE_CHECK_NONE, tx); 997 dmu_tx_commit(tx); 998 999 /* 1000 * Indicate that this thread has exited. 1001 * After this, we can not use svr. 1002 */ 1003 mutex_enter(&svr->svr_lock); 1004 svr->svr_thread = NULL; 1005 cv_broadcast(&svr->svr_cv); 1006 mutex_exit(&svr->svr_lock); 1007} 1008 1009/* 1010 * Complete the removal of a toplevel vdev. This is called in open 1011 * context by the removal thread after we have copied all vdev's data. 1012 */ 1013static void 1014vdev_remove_complete(spa_t *spa) 1015{ 1016 uint64_t txg; 1017 1018 /* 1019 * Wait for any deferred frees to be synced before we call 1020 * vdev_metaslab_fini() 1021 */ 1022 txg_wait_synced(spa->spa_dsl_pool, 0); 1023 txg = spa_vdev_enter(spa); 1024 vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); 1025 1026 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1027 ESC_ZFS_VDEV_REMOVE_DEV); 1028 1029 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", 1030 vd->vdev_id, txg); 1031 1032 /* 1033 * Discard allocation state. 1034 */ 1035 if (vd->vdev_mg != NULL) { 1036 vdev_metaslab_fini(vd); 1037 metaslab_group_destroy(vd->vdev_mg); 1038 vd->vdev_mg = NULL; 1039 } 1040 ASSERT0(vd->vdev_stat.vs_space); 1041 ASSERT0(vd->vdev_stat.vs_dspace); 1042 1043 vdev_remove_replace_with_indirect(vd, txg); 1044 1045 /* 1046 * We now release the locks, allowing spa_sync to run and finish the 1047 * removal via vdev_remove_complete_sync in syncing context. 1048 * 1049 * Note that we hold on to the vdev_t that has been replaced. Since 1050 * it isn't part of the vdev tree any longer, it can't be concurrently 1051 * manipulated, even while we don't have the config lock. 1052 */ 1053 (void) spa_vdev_exit(spa, NULL, txg, 0); 1054 1055 /* 1056 * Top ZAP should have been transferred to the indirect vdev in 1057 * vdev_remove_replace_with_indirect. 1058 */ 1059 ASSERT0(vd->vdev_top_zap); 1060 1061 /* 1062 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. 1063 */ 1064 ASSERT0(vd->vdev_leaf_zap); 1065 1066 txg = spa_vdev_enter(spa); 1067 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1068 /* 1069 * Request to update the config and the config cachefile. 1070 */ 1071 vdev_config_dirty(spa->spa_root_vdev); 1072 (void) spa_vdev_exit(spa, vd, txg, 0); 1073 1074 spa_event_post(ev); 1075} 1076 1077/* 1078 * Evacuates a segment of size at most max_alloc from the vdev 1079 * via repeated calls to spa_vdev_copy_segment. If an allocation 1080 * fails, the pool is probably too fragmented to handle such a 1081 * large size, so decrease max_alloc so that the caller will not try 1082 * this size again this txg. 1083 */ 1084static void 1085spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, 1086 uint64_t *max_alloc, dmu_tx_t *tx) 1087{ 1088 uint64_t txg = dmu_tx_get_txg(tx); 1089 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1090 1091 mutex_enter(&svr->svr_lock); 1092 1093 range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root); 1094 if (rs == NULL) { 1095 mutex_exit(&svr->svr_lock); 1096 return; 1097 } 1098 uint64_t offset = rs->rs_start; 1099 uint64_t length = MIN(rs->rs_end - rs->rs_start, *max_alloc); 1100 1101 range_tree_remove(svr->svr_allocd_segs, offset, length); 1102 1103 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { 1104 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, 1105 svr, 0, ZFS_SPACE_CHECK_NONE, tx); 1106 } 1107 1108 svr->svr_max_offset_to_sync[txg & TXG_MASK] = offset + length; 1109 1110 /* 1111 * Note: this is the amount of *allocated* space 1112 * that we are taking care of each txg. 1113 */ 1114 svr->svr_bytes_done[txg & TXG_MASK] += length; 1115 1116 mutex_exit(&svr->svr_lock); 1117 1118 zio_alloc_list_t zal; 1119 metaslab_trace_init(&zal); 1120 uint64_t thismax = *max_alloc; 1121 while (length > 0) { 1122 uint64_t mylen = MIN(length, thismax); 1123 1124 int error = spa_vdev_copy_segment(vd, 1125 offset, mylen, txg, vca, &zal); 1126 1127 if (error == ENOSPC) { 1128 /* 1129 * Cut our segment in half, and don't try this 1130 * segment size again this txg. Note that the 1131 * allocation size must be aligned to the highest 1132 * ashift in the pool, so that the allocation will 1133 * not be padded out to a multiple of the ashift, 1134 * which could cause us to think that this mapping 1135 * is larger than we intended. 1136 */ 1137 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); 1138 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); 1139 thismax = P2ROUNDUP(mylen / 2, 1140 1 << spa->spa_max_ashift); 1141 ASSERT3U(thismax, <, mylen); 1142 /* 1143 * The minimum-size allocation can not fail. 1144 */ 1145 ASSERT3U(mylen, >, 1 << spa->spa_max_ashift); 1146 *max_alloc = mylen - (1 << spa->spa_max_ashift); 1147 } else { 1148 ASSERT0(error); 1149 length -= mylen; 1150 offset += mylen; 1151 1152 /* 1153 * We've performed an allocation, so reset the 1154 * alloc trace list. 1155 */ 1156 metaslab_trace_fini(&zal); 1157 metaslab_trace_init(&zal); 1158 } 1159 } 1160 metaslab_trace_fini(&zal); 1161} 1162 1163/* 1164 * The removal thread operates in open context. It iterates over all 1165 * allocated space in the vdev, by loading each metaslab's spacemap. 1166 * For each contiguous segment of allocated space (capping the segment 1167 * size at SPA_MAXBLOCKSIZE), we: 1168 * - Allocate space for it on another vdev. 1169 * - Create a new mapping from the old location to the new location 1170 * (as a record in svr_new_segments). 1171 * - Initiate a logical read zio to get the data off the removing disk. 1172 * - In the read zio's done callback, initiate a logical write zio to 1173 * write it to the new vdev. 1174 * Note that all of this will take effect when a particular TXG syncs. 1175 * The sync thread ensures that all the phys reads and writes for the syncing 1176 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk 1177 * (see vdev_mapping_sync()). 1178 */ 1179static void 1180spa_vdev_remove_thread(void *arg) 1181{ 1182 spa_t *spa = arg; 1183 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1184 vdev_copy_arg_t vca; 1185 uint64_t max_alloc = zfs_remove_max_segment; 1186 uint64_t last_txg = 0; 1187 1188 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1189 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1190 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1191 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); 1192 1193 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); 1194 ASSERT(vdev_is_concrete(vd)); 1195 ASSERT(vd->vdev_removing); 1196 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); 1197 ASSERT(vim != NULL); 1198 1199 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); 1200 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); 1201 vca.vca_outstanding_bytes = 0; 1202 1203 mutex_enter(&svr->svr_lock); 1204 1205 /* 1206 * Start from vim_max_offset so we pick up where we left off 1207 * if we are restarting the removal after opening the pool. 1208 */ 1209 uint64_t msi; 1210 for (msi = start_offset >> vd->vdev_ms_shift; 1211 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { 1212 metaslab_t *msp = vd->vdev_ms[msi]; 1213 ASSERT3U(msi, <=, vd->vdev_ms_count); 1214 1215 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1216 1217 mutex_enter(&msp->ms_sync_lock); 1218 mutex_enter(&msp->ms_lock); 1219 1220 /* 1221 * Assert nothing in flight -- ms_*tree is empty. 1222 */ 1223 for (int i = 0; i < TXG_SIZE; i++) { 1224 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1225 } 1226 1227 /* 1228 * If the metaslab has ever been allocated from (ms_sm!=NULL), 1229 * read the allocated segments from the space map object 1230 * into svr_allocd_segs. Since we do this while holding 1231 * svr_lock and ms_sync_lock, concurrent frees (which 1232 * would have modified the space map) will wait for us 1233 * to finish loading the spacemap, and then take the 1234 * appropriate action (see free_from_removing_vdev()). 1235 */ 1236 if (msp->ms_sm != NULL) { 1237 space_map_t *sm = NULL; 1238 1239 /* 1240 * We have to open a new space map here, because 1241 * ms_sm's sm_length and sm_alloc may not reflect 1242 * what's in the object contents, if we are in between 1243 * metaslab_sync() and metaslab_sync_done(). 1244 */ 1245 VERIFY0(space_map_open(&sm, 1246 spa->spa_dsl_pool->dp_meta_objset, 1247 msp->ms_sm->sm_object, msp->ms_sm->sm_start, 1248 msp->ms_sm->sm_size, msp->ms_sm->sm_shift)); 1249 space_map_update(sm); 1250 VERIFY0(space_map_load(sm, svr->svr_allocd_segs, 1251 SM_ALLOC)); 1252 space_map_close(sm); 1253 1254 range_tree_walk(msp->ms_freeing, 1255 range_tree_remove, svr->svr_allocd_segs); 1256 1257 /* 1258 * When we are resuming from a paused removal (i.e. 1259 * when importing a pool with a removal in progress), 1260 * discard any state that we have already processed. 1261 */ 1262 range_tree_clear(svr->svr_allocd_segs, 0, start_offset); 1263 } 1264 mutex_exit(&msp->ms_lock); 1265 mutex_exit(&msp->ms_sync_lock); 1266 1267 vca.vca_msp = msp; 1268 zfs_dbgmsg("copying %llu segments for metaslab %llu", 1269 avl_numnodes(&svr->svr_allocd_segs->rt_root), 1270 msp->ms_id); 1271 1272 while (!svr->svr_thread_exit && 1273 !range_tree_is_empty(svr->svr_allocd_segs)) { 1274 1275 mutex_exit(&svr->svr_lock); 1276 1277 /* 1278 * We need to periodically drop the config lock so that 1279 * writers can get in. Additionally, we can't wait 1280 * for a txg to sync while holding a config lock 1281 * (since a waiting writer could cause a 3-way deadlock 1282 * with the sync thread, which also gets a config 1283 * lock for reader). So we can't hold the config lock 1284 * while calling dmu_tx_assign(). 1285 */ 1286 spa_config_exit(spa, SCL_CONFIG, FTAG); 1287 1288 /* 1289 * This delay will pause the removal around the point 1290 * specified by zfs_remove_max_bytes_pause. We do this 1291 * solely from the test suite or during debugging. 1292 */ 1293 uint64_t bytes_copied = 1294 spa->spa_removing_phys.sr_copied; 1295 for (int i = 0; i < TXG_SIZE; i++) 1296 bytes_copied += svr->svr_bytes_done[i]; 1297 while (zfs_remove_max_bytes_pause <= bytes_copied && 1298 !svr->svr_thread_exit) 1299 delay(hz); 1300 1301 mutex_enter(&vca.vca_lock); 1302 while (vca.vca_outstanding_bytes > 1303 zfs_remove_max_copy_bytes) { 1304 cv_wait(&vca.vca_cv, &vca.vca_lock); 1305 } 1306 mutex_exit(&vca.vca_lock); 1307 1308 dmu_tx_t *tx = 1309 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); 1310 1311 VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); 1312 uint64_t txg = dmu_tx_get_txg(tx); 1313 1314 /* 1315 * Reacquire the vdev_config lock. The vdev_t 1316 * that we're removing may have changed, e.g. due 1317 * to a vdev_attach or vdev_detach. 1318 */ 1319 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); 1320 vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1321 1322 if (txg != last_txg) 1323 max_alloc = zfs_remove_max_segment; 1324 last_txg = txg; 1325 1326 spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx); 1327 1328 dmu_tx_commit(tx); 1329 mutex_enter(&svr->svr_lock); 1330 } 1331 } 1332 1333 mutex_exit(&svr->svr_lock); 1334 1335 spa_config_exit(spa, SCL_CONFIG, FTAG); 1336 1337 /* 1338 * Wait for all copies to finish before cleaning up the vca. 1339 */ 1340 txg_wait_synced(spa->spa_dsl_pool, 0); 1341 ASSERT0(vca.vca_outstanding_bytes); 1342 1343 mutex_destroy(&vca.vca_lock); 1344 cv_destroy(&vca.vca_cv); 1345 1346 if (svr->svr_thread_exit) { 1347 mutex_enter(&svr->svr_lock); 1348 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); 1349 svr->svr_thread = NULL; 1350 cv_broadcast(&svr->svr_cv); 1351 mutex_exit(&svr->svr_lock); 1352 } else { 1353 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1354 vdev_remove_complete(spa); 1355 } 1356 thread_exit(); 1357} 1358 1359void 1360spa_vdev_remove_suspend(spa_t *spa) 1361{ 1362 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1363 1364 if (svr == NULL) 1365 return; 1366 1367 mutex_enter(&svr->svr_lock); 1368 svr->svr_thread_exit = B_TRUE; 1369 while (svr->svr_thread != NULL) 1370 cv_wait(&svr->svr_cv, &svr->svr_lock); 1371 svr->svr_thread_exit = B_FALSE; 1372 mutex_exit(&svr->svr_lock); 1373} 1374 1375/* ARGSUSED */ 1376static int 1377spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) 1378{ 1379 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1380 1381 if (spa->spa_vdev_removal == NULL) 1382 return (ESRCH); 1383 return (0); 1384} 1385 1386/* 1387 * Cancel a removal by freeing all entries from the partial mapping 1388 * and marking the vdev as no longer being removing. 1389 */ 1390/* ARGSUSED */ 1391static void 1392spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) 1393{ 1394 spa_t *spa = dmu_tx_pool(tx)->dp_spa; 1395 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1396 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); 1397 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1398 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1399 objset_t *mos = spa->spa_meta_objset; 1400 1401 ASSERT3P(svr->svr_thread, ==, NULL); 1402 1403 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); 1404 if (vdev_obsolete_counts_are_precise(vd)) { 1405 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1406 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1407 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); 1408 } 1409 1410 if (vdev_obsolete_sm_object(vd) != 0) { 1411 ASSERT(vd->vdev_obsolete_sm != NULL); 1412 ASSERT3U(vdev_obsolete_sm_object(vd), ==, 1413 space_map_object(vd->vdev_obsolete_sm)); 1414 1415 space_map_free(vd->vdev_obsolete_sm, tx); 1416 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, 1417 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); 1418 space_map_close(vd->vdev_obsolete_sm); 1419 vd->vdev_obsolete_sm = NULL; 1420 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); 1421 } 1422 for (int i = 0; i < TXG_SIZE; i++) { 1423 ASSERT(list_is_empty(&svr->svr_new_segments[i])); 1424 ASSERT3U(svr->svr_max_offset_to_sync[i], <=, 1425 vdev_indirect_mapping_max_offset(vim)); 1426 } 1427 1428 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { 1429 metaslab_t *msp = vd->vdev_ms[msi]; 1430 1431 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) 1432 break; 1433 1434 ASSERT0(range_tree_space(svr->svr_allocd_segs)); 1435 1436 mutex_enter(&msp->ms_lock); 1437 1438 /* 1439 * Assert nothing in flight -- ms_*tree is empty. 1440 */ 1441 for (int i = 0; i < TXG_SIZE; i++) 1442 ASSERT0(range_tree_space(msp->ms_allocating[i])); 1443 for (int i = 0; i < TXG_DEFER_SIZE; i++) 1444 ASSERT0(range_tree_space(msp->ms_defer[i])); 1445 ASSERT0(range_tree_space(msp->ms_freed)); 1446 1447 if (msp->ms_sm != NULL) { 1448 /* 1449 * Assert that the in-core spacemap has the same 1450 * length as the on-disk one, so we can use the 1451 * existing in-core spacemap to load it from disk. 1452 */ 1453 ASSERT3U(msp->ms_sm->sm_alloc, ==, 1454 msp->ms_sm->sm_phys->smp_alloc); 1455 ASSERT3U(msp->ms_sm->sm_length, ==, 1456 msp->ms_sm->sm_phys->smp_objsize); 1457 1458 mutex_enter(&svr->svr_lock); 1459 VERIFY0(space_map_load(msp->ms_sm, 1460 svr->svr_allocd_segs, SM_ALLOC)); 1461 range_tree_walk(msp->ms_freeing, 1462 range_tree_remove, svr->svr_allocd_segs); 1463 1464 /* 1465 * Clear everything past what has been synced, 1466 * because we have not allocated mappings for it yet. 1467 */ 1468 uint64_t syncd = vdev_indirect_mapping_max_offset(vim); 1469 uint64_t sm_end = msp->ms_sm->sm_start + 1470 msp->ms_sm->sm_size; 1471 if (sm_end > syncd) 1472 range_tree_clear(svr->svr_allocd_segs, 1473 syncd, sm_end - syncd); 1474 1475 mutex_exit(&svr->svr_lock); 1476 } 1477 mutex_exit(&msp->ms_lock); 1478 1479 mutex_enter(&svr->svr_lock); 1480 range_tree_vacate(svr->svr_allocd_segs, 1481 free_mapped_segment_cb, vd); 1482 mutex_exit(&svr->svr_lock); 1483 } 1484 1485 /* 1486 * Note: this must happen after we invoke free_mapped_segment_cb, 1487 * because it adds to the obsolete_segments. 1488 */ 1489 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); 1490 1491 ASSERT3U(vic->vic_mapping_object, ==, 1492 vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); 1493 vdev_indirect_mapping_close(vd->vdev_indirect_mapping); 1494 vd->vdev_indirect_mapping = NULL; 1495 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); 1496 vic->vic_mapping_object = 0; 1497 1498 ASSERT3U(vic->vic_births_object, ==, 1499 vdev_indirect_births_object(vd->vdev_indirect_births)); 1500 vdev_indirect_births_close(vd->vdev_indirect_births); 1501 vd->vdev_indirect_births = NULL; 1502 vdev_indirect_births_free(mos, vic->vic_births_object, tx); 1503 vic->vic_births_object = 0; 1504 1505 /* 1506 * We may have processed some frees from the removing vdev in this 1507 * txg, thus increasing svr_bytes_done; discard that here to 1508 * satisfy the assertions in spa_vdev_removal_destroy(). 1509 * Note that future txg's can not have any bytes_done, because 1510 * future TXG's are only modified from open context, and we have 1511 * already shut down the copying thread. 1512 */ 1513 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; 1514 spa_finish_removal(spa, DSS_CANCELED, tx); 1515 1516 vd->vdev_removing = B_FALSE; 1517 vdev_config_dirty(vd); 1518 1519 zfs_dbgmsg("canceled device removal for vdev %llu in %llu", 1520 vd->vdev_id, dmu_tx_get_txg(tx)); 1521 spa_history_log_internal(spa, "vdev remove canceled", tx, 1522 "%s vdev %llu %s", spa_name(spa), 1523 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1524} 1525 1526int 1527spa_vdev_remove_cancel(spa_t *spa) 1528{ 1529 spa_vdev_remove_suspend(spa); 1530 1531 if (spa->spa_vdev_removal == NULL) 1532 return (ESRCH); 1533 1534 uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id; 1535 1536 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, 1537 spa_vdev_remove_cancel_sync, NULL, 0, 1538 ZFS_SPACE_CHECK_EXTRA_RESERVED); 1539 1540 if (error == 0) { 1541 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); 1542 vdev_t *vd = vdev_lookup_top(spa, vdid); 1543 metaslab_group_activate(vd->vdev_mg); 1544 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); 1545 } 1546 1547 return (error); 1548} 1549 1550/* 1551 * Called every sync pass of every txg if there's a svr. 1552 */ 1553void 1554svr_sync(spa_t *spa, dmu_tx_t *tx) 1555{ 1556 spa_vdev_removal_t *svr = spa->spa_vdev_removal; 1557 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1558 1559 /* 1560 * This check is necessary so that we do not dirty the 1561 * DIRECTORY_OBJECT via spa_sync_removing_state() when there 1562 * is nothing to do. Dirtying it every time would prevent us 1563 * from syncing-to-convergence. 1564 */ 1565 if (svr->svr_bytes_done[txgoff] == 0) 1566 return; 1567 1568 /* 1569 * Update progress accounting. 1570 */ 1571 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; 1572 svr->svr_bytes_done[txgoff] = 0; 1573 1574 spa_sync_removing_state(spa, tx); 1575} 1576 1577static void 1578vdev_remove_make_hole_and_free(vdev_t *vd) 1579{ 1580 uint64_t id = vd->vdev_id; 1581 spa_t *spa = vd->vdev_spa; 1582 vdev_t *rvd = spa->spa_root_vdev; 1583 boolean_t last_vdev = (id == (rvd->vdev_children - 1)); 1584 1585 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1586 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1587 1588 vdev_free(vd); 1589 1590 if (last_vdev) { 1591 vdev_compact_children(rvd); 1592 } else { 1593 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); 1594 vdev_add_child(rvd, vd); 1595 } 1596 vdev_config_dirty(rvd); 1597 1598 /* 1599 * Reassess the health of our root vdev. 1600 */ 1601 vdev_reopen(rvd); 1602} 1603 1604/* 1605 * Remove a log device. The config lock is held for the specified TXG. 1606 */ 1607static int 1608spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) 1609{ 1610 metaslab_group_t *mg = vd->vdev_mg; 1611 spa_t *spa = vd->vdev_spa; 1612 int error = 0; 1613 1614 ASSERT(vd->vdev_islog); 1615 ASSERT(vd == vd->vdev_top); 1616 1617 /* 1618 * Stop allocating from this vdev. 1619 */ 1620 metaslab_group_passivate(mg); 1621 1622 /* 1623 * Wait for the youngest allocations and frees to sync, 1624 * and then wait for the deferral of those frees to finish. 1625 */ 1626 spa_vdev_config_exit(spa, NULL, 1627 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1628 1629 /* 1630 * Evacuate the device. We don't hold the config lock as writer 1631 * since we need to do I/O but we do keep the 1632 * spa_namespace_lock held. Once this completes the device 1633 * should no longer have any blocks allocated on it. 1634 */ 1635 if (vd->vdev_islog) { 1636 if (vd->vdev_stat.vs_alloc != 0) 1637 error = spa_reset_logs(spa); 1638 } 1639 1640 *txg = spa_vdev_config_enter(spa); 1641 1642 if (error != 0) { 1643 metaslab_group_activate(mg); 1644 return (error); 1645 } 1646 ASSERT0(vd->vdev_stat.vs_alloc); 1647 1648 /* 1649 * The evacuation succeeded. Remove any remaining MOS metadata 1650 * associated with this vdev, and wait for these changes to sync. 1651 */ 1652 vd->vdev_removing = B_TRUE; 1653 1654 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1655 vdev_config_dirty(vd); 1656 1657 spa_history_log_internal(spa, "vdev remove", NULL, 1658 "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id, 1659 (vd->vdev_path != NULL) ? vd->vdev_path : "-"); 1660 1661 /* Make sure these changes are sync'ed */ 1662 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); 1663 1664 *txg = spa_vdev_config_enter(spa); 1665 1666 sysevent_t *ev = spa_event_create(spa, vd, NULL, 1667 ESC_ZFS_VDEV_REMOVE_DEV); 1668 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1669 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); 1670 1671 /* The top ZAP should have been destroyed by vdev_remove_empty. */ 1672 ASSERT0(vd->vdev_top_zap); 1673 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ 1674 ASSERT0(vd->vdev_leaf_zap); 1675 1676 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); 1677 1678 if (list_link_active(&vd->vdev_state_dirty_node)) 1679 vdev_state_clean(vd); 1680 if (list_link_active(&vd->vdev_config_dirty_node)) 1681 vdev_config_clean(vd); 1682 1683 /* 1684 * Clean up the vdev namespace. 1685 */ 1686 vdev_remove_make_hole_and_free(vd); 1687 1688 if (ev != NULL) 1689 spa_event_post(ev); 1690 1691 return (0); 1692} 1693 1694static int 1695spa_vdev_remove_top_check(vdev_t *vd) 1696{ 1697 spa_t *spa = vd->vdev_spa; 1698 1699 if (vd != vd->vdev_top) 1700 return (SET_ERROR(ENOTSUP)); 1701 1702 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) 1703 return (SET_ERROR(ENOTSUP)); 1704 1705 /* 1706 * There has to be enough free space to remove the 1707 * device and leave double the "slop" space (i.e. we 1708 * must leave at least 3% of the pool free, in addition to 1709 * the normal slop space). 1710 */ 1711 if (dsl_dir_space_available(spa->spa_dsl_pool->dp_root_dir, 1712 NULL, 0, B_TRUE) < 1713 vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) { 1714 return (SET_ERROR(ENOSPC)); 1715 } 1716 1717 /* 1718 * There can not be a removal in progress. 1719 */ 1720 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) 1721 return (SET_ERROR(EBUSY)); 1722 1723 /* 1724 * The device must have all its data. 1725 */ 1726 if (!vdev_dtl_empty(vd, DTL_MISSING) || 1727 !vdev_dtl_empty(vd, DTL_OUTAGE)) 1728 return (SET_ERROR(EBUSY)); 1729 1730 /* 1731 * The device must be healthy. 1732 */ 1733 if (!vdev_readable(vd)) 1734 return (SET_ERROR(EIO)); 1735 1736 /* 1737 * All vdevs in normal class must have the same ashift. 1738 */ 1739 if (spa->spa_max_ashift != spa->spa_min_ashift) { 1740 return (SET_ERROR(EINVAL)); 1741 } 1742 1743 /* 1744 * All vdevs in normal class must have the same ashift 1745 * and not be raidz. 1746 */ 1747 vdev_t *rvd = spa->spa_root_vdev; 1748 int num_indirect = 0; 1749 for (uint64_t id = 0; id < rvd->vdev_children; id++) { 1750 vdev_t *cvd = rvd->vdev_child[id]; 1751 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog) 1752 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); 1753 if (cvd->vdev_ops == &vdev_indirect_ops) 1754 num_indirect++; 1755 if (!vdev_is_concrete(cvd)) 1756 continue; 1757 if (cvd->vdev_ops == &vdev_raidz_ops) 1758 return (SET_ERROR(EINVAL)); 1759 /* 1760 * Need the mirror to be mirror of leaf vdevs only 1761 */ 1762 if (cvd->vdev_ops == &vdev_mirror_ops) { 1763 for (uint64_t cid = 0; 1764 cid < cvd->vdev_children; cid++) { 1765 vdev_t *tmp = cvd->vdev_child[cid]; 1766 if (!tmp->vdev_ops->vdev_op_leaf) 1767 return (SET_ERROR(EINVAL)); 1768 } 1769 } 1770 } 1771 1772 return (0); 1773} 1774 1775/* 1776 * Initiate removal of a top-level vdev, reducing the total space in the pool. 1777 * The config lock is held for the specified TXG. Once initiated, 1778 * evacuation of all allocated space (copying it to other vdevs) happens 1779 * in the background (see spa_vdev_remove_thread()), and can be canceled 1780 * (see spa_vdev_remove_cancel()). If successful, the vdev will 1781 * be transformed to an indirect vdev (see spa_vdev_remove_complete()). 1782 */ 1783static int 1784spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) 1785{ 1786 spa_t *spa = vd->vdev_spa; 1787 int error; 1788 1789 /* 1790 * Check for errors up-front, so that we don't waste time 1791 * passivating the metaslab group and clearing the ZIL if there 1792 * are errors. 1793 */ 1794 error = spa_vdev_remove_top_check(vd); 1795 if (error != 0) 1796 return (error); 1797 1798 /* 1799 * Stop allocating from this vdev. Note that we must check 1800 * that this is not the only device in the pool before 1801 * passivating, otherwise we will not be able to make 1802 * progress because we can't allocate from any vdevs. 1803 * The above check for sufficient free space serves this 1804 * purpose. 1805 */ 1806 metaslab_group_t *mg = vd->vdev_mg; 1807 metaslab_group_passivate(mg); 1808 1809 /* 1810 * Wait for the youngest allocations and frees to sync, 1811 * and then wait for the deferral of those frees to finish. 1812 */ 1813 spa_vdev_config_exit(spa, NULL, 1814 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); 1815 1816 /* 1817 * We must ensure that no "stubby" log blocks are allocated 1818 * on the device to be removed. These blocks could be 1819 * written at any time, including while we are in the middle 1820 * of copying them. 1821 */ 1822 error = spa_reset_logs(spa); 1823 1824 *txg = spa_vdev_config_enter(spa); 1825 1826 /* 1827 * Things might have changed while the config lock was dropped 1828 * (e.g. space usage). Check for errors again. 1829 */ 1830 if (error == 0) 1831 error = spa_vdev_remove_top_check(vd); 1832 1833 if (error != 0) { 1834 metaslab_group_activate(mg); 1835 return (error); 1836 } 1837 1838 vd->vdev_removing = B_TRUE; 1839 1840 vdev_dirty_leaves(vd, VDD_DTL, *txg); 1841 vdev_config_dirty(vd); 1842 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); 1843 dsl_sync_task_nowait(spa->spa_dsl_pool, 1844 vdev_remove_initiate_sync, 1845 (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx); 1846 dmu_tx_commit(tx); 1847 1848 return (0); 1849} 1850 1851/* 1852 * Remove a device from the pool. 1853 * 1854 * Removing a device from the vdev namespace requires several steps 1855 * and can take a significant amount of time. As a result we use 1856 * the spa_vdev_config_[enter/exit] functions which allow us to 1857 * grab and release the spa_config_lock while still holding the namespace 1858 * lock. During each step the configuration is synced out. 1859 */ 1860int 1861spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) 1862{ 1863 vdev_t *vd; 1864 nvlist_t **spares, **l2cache, *nv; 1865 uint64_t txg = 0; 1866 uint_t nspares, nl2cache; 1867 int error = 0; 1868 boolean_t locked = MUTEX_HELD(&spa_namespace_lock); 1869 sysevent_t *ev = NULL; 1870 1871 ASSERT(spa_writeable(spa)); 1872 1873 if (!locked) 1874 txg = spa_vdev_enter(spa); 1875 1876 ASSERT(MUTEX_HELD(&spa_namespace_lock)); 1877 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { 1878 error = (spa_has_checkpoint(spa)) ? 1879 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; 1880 1881 if (!locked) 1882 return (spa_vdev_exit(spa, NULL, txg, error)); 1883 1884 return (error); 1885 } 1886 1887 vd = spa_lookup_by_guid(spa, guid, B_FALSE); 1888 1889 if (spa->spa_spares.sav_vdevs != NULL && 1890 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, 1891 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && 1892 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { 1893 /* 1894 * Only remove the hot spare if it's not currently in use 1895 * in this pool. 1896 */ 1897 if (vd == NULL || unspare) { 1898 char *nvstr = fnvlist_lookup_string(nv, 1899 ZPOOL_CONFIG_PATH); 1900 spa_history_log_internal(spa, "vdev remove", NULL, 1901 "%s vdev (%s) %s", spa_name(spa), 1902 VDEV_TYPE_SPARE, nvstr); 1903 if (vd == NULL) 1904 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 1905 ev = spa_event_create(spa, vd, NULL, 1906 ESC_ZFS_VDEV_REMOVE_AUX); 1907 spa_vdev_remove_aux(spa->spa_spares.sav_config, 1908 ZPOOL_CONFIG_SPARES, spares, nspares, nv); 1909 spa_load_spares(spa); 1910 spa->spa_spares.sav_sync = B_TRUE; 1911 } else { 1912 error = SET_ERROR(EBUSY); 1913 } 1914 } else if (spa->spa_l2cache.sav_vdevs != NULL && 1915 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, 1916 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && 1917 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { 1918 char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); 1919 spa_history_log_internal(spa, "vdev remove", NULL, 1920 "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr); 1921 /* 1922 * Cache devices can always be removed. 1923 */ 1924 vd = spa_lookup_by_guid(spa, guid, B_TRUE); 1925 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); 1926 spa_vdev_remove_aux(spa->spa_l2cache.sav_config, 1927 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); 1928 spa_load_l2cache(spa); 1929 spa->spa_l2cache.sav_sync = B_TRUE; 1930 } else if (vd != NULL && vd->vdev_islog) { 1931 ASSERT(!locked); 1932 error = spa_vdev_remove_log(vd, &txg); 1933 } else if (vd != NULL) { 1934 ASSERT(!locked); 1935 error = spa_vdev_remove_top(vd, &txg); 1936 } else { 1937 /* 1938 * There is no vdev of any kind with the specified guid. 1939 */ 1940 error = SET_ERROR(ENOENT); 1941 } 1942 1943 if (!locked) 1944 error = spa_vdev_exit(spa, NULL, txg, error); 1945 1946 if (ev != NULL) { 1947 if (error != 0) { 1948 spa_event_discard(ev); 1949 } else { 1950 spa_event_post(ev); 1951 } 1952 } 1953 1954 return (error); 1955} 1956 1957int 1958spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) 1959{ 1960 prs->prs_state = spa->spa_removing_phys.sr_state; 1961 1962 if (prs->prs_state == DSS_NONE) 1963 return (SET_ERROR(ENOENT)); 1964 1965 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; 1966 prs->prs_start_time = spa->spa_removing_phys.sr_start_time; 1967 prs->prs_end_time = spa->spa_removing_phys.sr_end_time; 1968 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; 1969 prs->prs_copied = spa->spa_removing_phys.sr_copied; 1970 1971 if (spa->spa_vdev_removal != NULL) { 1972 for (int i = 0; i < TXG_SIZE; i++) { 1973 prs->prs_copied += 1974 spa->spa_vdev_removal->svr_bytes_done[i]; 1975 } 1976 } 1977 1978 prs->prs_mapping_memory = 0; 1979 uint64_t indirect_vdev_id = 1980 spa->spa_removing_phys.sr_prev_indirect_vdev; 1981 while (indirect_vdev_id != -1) { 1982 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; 1983 vdev_indirect_config_t *vic = &vd->vdev_indirect_config; 1984 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; 1985 1986 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); 1987 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); 1988 indirect_vdev_id = vic->vic_prev_indirect_vdev; 1989 } 1990 1991 return (0); 1992} 1993