vdev_mirror.c revision 326334
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26/* 27 * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 28 */ 29 30#include <sys/zfs_context.h> 31#include <sys/spa.h> 32#include <sys/spa_impl.h> 33#include <sys/dsl_pool.h> 34#include <sys/dsl_scan.h> 35#include <sys/vdev_impl.h> 36#include <sys/zio.h> 37#include <sys/fs/zfs.h> 38 39/* 40 * Virtual device vector for mirroring. 41 */ 42 43typedef struct mirror_child { 44 vdev_t *mc_vd; 45 uint64_t mc_offset; 46 int mc_error; 47 int mc_load; 48 uint8_t mc_tried; 49 uint8_t mc_skipped; 50 uint8_t mc_speculative; 51} mirror_child_t; 52 53typedef struct mirror_map { 54 int *mm_preferred; 55 int mm_preferred_cnt; 56 int mm_children; 57 boolean_t mm_resilvering; 58 boolean_t mm_root; 59 mirror_child_t mm_child[]; 60} mirror_map_t; 61 62static int vdev_mirror_shift = 21; 63 64#ifdef _KERNEL 65SYSCTL_DECL(_vfs_zfs_vdev); 66static SYSCTL_NODE(_vfs_zfs_vdev, OID_AUTO, mirror, CTLFLAG_RD, 0, 67 "ZFS VDEV Mirror"); 68#endif 69 70/* 71 * The load configuration settings below are tuned by default for 72 * the case where all devices are of the same rotational type. 73 * 74 * If there is a mixture of rotating and non-rotating media, setting 75 * non_rotating_seek_inc to 0 may well provide better results as it 76 * will direct more reads to the non-rotating vdevs which are more 77 * likely to have a higher performance. 78 */ 79 80/* Rotating media load calculation configuration. */ 81static int rotating_inc = 0; 82#ifdef _KERNEL 83TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_inc", &rotating_inc); 84SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_inc, CTLFLAG_RW, 85 &rotating_inc, 0, "Rotating media load increment for non-seeking I/O's"); 86#endif 87 88static int rotating_seek_inc = 5; 89#ifdef _KERNEL 90TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_inc", &rotating_seek_inc); 91SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_inc, CTLFLAG_RW, 92 &rotating_seek_inc, 0, "Rotating media load increment for seeking I/O's"); 93#endif 94 95static int rotating_seek_offset = 1 * 1024 * 1024; 96#ifdef _KERNEL 97TUNABLE_INT("vfs.zfs.vdev.mirror.rotating_seek_offset", &rotating_seek_offset); 98SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, rotating_seek_offset, CTLFLAG_RW, 99 &rotating_seek_offset, 0, "Offset in bytes from the last I/O which " 100 "triggers a reduced rotating media seek increment"); 101#endif 102 103/* Non-rotating media load calculation configuration. */ 104static int non_rotating_inc = 0; 105#ifdef _KERNEL 106TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_inc", &non_rotating_inc); 107SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_inc, CTLFLAG_RW, 108 &non_rotating_inc, 0, 109 "Non-rotating media load increment for non-seeking I/O's"); 110#endif 111 112static int non_rotating_seek_inc = 1; 113#ifdef _KERNEL 114TUNABLE_INT("vfs.zfs.vdev.mirror.non_rotating_seek_inc", 115 &non_rotating_seek_inc); 116SYSCTL_INT(_vfs_zfs_vdev_mirror, OID_AUTO, non_rotating_seek_inc, CTLFLAG_RW, 117 &non_rotating_seek_inc, 0, 118 "Non-rotating media load increment for seeking I/O's"); 119#endif 120 121 122static inline size_t 123vdev_mirror_map_size(int children) 124{ 125 return (offsetof(mirror_map_t, mm_child[children]) + 126 sizeof(int) * children); 127} 128 129static inline mirror_map_t * 130vdev_mirror_map_alloc(int children, boolean_t resilvering, boolean_t root) 131{ 132 mirror_map_t *mm; 133 134 mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP); 135 mm->mm_children = children; 136 mm->mm_resilvering = resilvering; 137 mm->mm_root = root; 138 mm->mm_preferred = (int *)((uintptr_t)mm + 139 offsetof(mirror_map_t, mm_child[children])); 140 141 return mm; 142} 143 144static void 145vdev_mirror_map_free(zio_t *zio) 146{ 147 mirror_map_t *mm = zio->io_vsd; 148 149 kmem_free(mm, vdev_mirror_map_size(mm->mm_children)); 150} 151 152static const zio_vsd_ops_t vdev_mirror_vsd_ops = { 153 vdev_mirror_map_free, 154 zio_vsd_default_cksum_report 155}; 156 157static int 158vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset) 159{ 160 uint64_t lastoffset; 161 int load; 162 163 /* All DVAs have equal weight at the root. */ 164 if (mm->mm_root) 165 return (INT_MAX); 166 167 /* 168 * We don't return INT_MAX if the device is resilvering i.e. 169 * vdev_resilver_txg != 0 as when tested performance was slightly 170 * worse overall when resilvering with compared to without. 171 */ 172 173 /* Standard load based on pending queue length. */ 174 load = vdev_queue_length(vd); 175 lastoffset = vdev_queue_lastoffset(vd); 176 177 if (vd->vdev_rotation_rate == VDEV_RATE_NON_ROTATING) { 178 /* Non-rotating media. */ 179 if (lastoffset == zio_offset) 180 return (load + non_rotating_inc); 181 182 /* 183 * Apply a seek penalty even for non-rotating devices as 184 * sequential I/O'a can be aggregated into fewer operations 185 * on the device, thus avoiding unnecessary per-command 186 * overhead and boosting performance. 187 */ 188 return (load + non_rotating_seek_inc); 189 } 190 191 /* Rotating media I/O's which directly follow the last I/O. */ 192 if (lastoffset == zio_offset) 193 return (load + rotating_inc); 194 195 /* 196 * Apply half the seek increment to I/O's within seek offset 197 * of the last I/O queued to this vdev as they should incure less 198 * of a seek increment. 199 */ 200 if (ABS(lastoffset - zio_offset) < rotating_seek_offset) 201 return (load + (rotating_seek_inc / 2)); 202 203 /* Apply the full seek increment to all other I/O's. */ 204 return (load + rotating_seek_inc); 205} 206 207 208static mirror_map_t * 209vdev_mirror_map_init(zio_t *zio) 210{ 211 mirror_map_t *mm = NULL; 212 mirror_child_t *mc; 213 vdev_t *vd = zio->io_vd; 214 int c; 215 216 if (vd == NULL) { 217 dva_t *dva = zio->io_bp->blk_dva; 218 spa_t *spa = zio->io_spa; 219 220 mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE, 221 B_TRUE); 222 for (c = 0; c < mm->mm_children; c++) { 223 mc = &mm->mm_child[c]; 224 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c])); 225 mc->mc_offset = DVA_GET_OFFSET(&dva[c]); 226 } 227 } else { 228 /* 229 * If we are resilvering, then we should handle scrub reads 230 * differently; we shouldn't issue them to the resilvering 231 * device because it might not have those blocks. 232 * 233 * We are resilvering iff: 234 * 1) We are a replacing vdev (ie our name is "replacing-1" or 235 * "spare-1" or something like that), and 236 * 2) The pool is currently being resilvered. 237 * 238 * We cannot simply check vd->vdev_resilver_txg, because it's 239 * not set in this path. 240 * 241 * Nor can we just check our vdev_ops; there are cases (such as 242 * when a user types "zpool replace pool odev spare_dev" and 243 * spare_dev is in the spare list, or when a spare device is 244 * automatically used to replace a DEGRADED device) when 245 * resilvering is complete but both the original vdev and the 246 * spare vdev remain in the pool. That behavior is intentional. 247 * It helps implement the policy that a spare should be 248 * automatically removed from the pool after the user replaces 249 * the device that originally failed. 250 * 251 * If a spa load is in progress, then spa_dsl_pool may be 252 * uninitialized. But we shouldn't be resilvering during a spa 253 * load anyway. 254 */ 255 boolean_t replacing = (vd->vdev_ops == &vdev_replacing_ops || 256 vd->vdev_ops == &vdev_spare_ops) && 257 spa_load_state(vd->vdev_spa) == SPA_LOAD_NONE && 258 dsl_scan_resilvering(vd->vdev_spa->spa_dsl_pool); 259 mm = vdev_mirror_map_alloc(vd->vdev_children, replacing, 260 B_FALSE); 261 for (c = 0; c < mm->mm_children; c++) { 262 mc = &mm->mm_child[c]; 263 mc->mc_vd = vd->vdev_child[c]; 264 mc->mc_offset = zio->io_offset; 265 } 266 } 267 268 zio->io_vsd = mm; 269 zio->io_vsd_ops = &vdev_mirror_vsd_ops; 270 return (mm); 271} 272 273static int 274vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize, 275 uint64_t *logical_ashift, uint64_t *physical_ashift) 276{ 277 int numerrors = 0; 278 int lasterror = 0; 279 280 if (vd->vdev_children == 0) { 281 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 282 return (SET_ERROR(EINVAL)); 283 } 284 285 vdev_open_children(vd); 286 287 for (int c = 0; c < vd->vdev_children; c++) { 288 vdev_t *cvd = vd->vdev_child[c]; 289 290 if (cvd->vdev_open_error) { 291 lasterror = cvd->vdev_open_error; 292 numerrors++; 293 continue; 294 } 295 296 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1; 297 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1; 298 *logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift); 299 *physical_ashift = MAX(*physical_ashift, 300 cvd->vdev_physical_ashift); 301 } 302 303 if (numerrors == vd->vdev_children) { 304 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; 305 return (lasterror); 306 } 307 308 return (0); 309} 310 311static void 312vdev_mirror_close(vdev_t *vd) 313{ 314 for (int c = 0; c < vd->vdev_children; c++) 315 vdev_close(vd->vdev_child[c]); 316} 317 318static void 319vdev_mirror_child_done(zio_t *zio) 320{ 321 mirror_child_t *mc = zio->io_private; 322 323 mc->mc_error = zio->io_error; 324 mc->mc_tried = 1; 325 mc->mc_skipped = 0; 326} 327 328static void 329vdev_mirror_scrub_done(zio_t *zio) 330{ 331 mirror_child_t *mc = zio->io_private; 332 333 if (zio->io_error == 0) { 334 zio_t *pio; 335 zio_link_t *zl = NULL; 336 337 mutex_enter(&zio->io_lock); 338 while ((pio = zio_walk_parents(zio, &zl)) != NULL) { 339 mutex_enter(&pio->io_lock); 340 ASSERT3U(zio->io_size, >=, pio->io_size); 341 bcopy(zio->io_data, pio->io_data, pio->io_size); 342 mutex_exit(&pio->io_lock); 343 } 344 mutex_exit(&zio->io_lock); 345 } 346 347 zio_buf_free(zio->io_data, zio->io_size); 348 349 mc->mc_error = zio->io_error; 350 mc->mc_tried = 1; 351 mc->mc_skipped = 0; 352} 353 354/* 355 * Check the other, lower-index DVAs to see if they're on the same 356 * vdev as the child we picked. If they are, use them since they 357 * are likely to have been allocated from the primary metaslab in 358 * use at the time, and hence are more likely to have locality with 359 * single-copy data. 360 */ 361static int 362vdev_mirror_dva_select(zio_t *zio, int p) 363{ 364 dva_t *dva = zio->io_bp->blk_dva; 365 mirror_map_t *mm = zio->io_vsd; 366 int preferred; 367 int c; 368 369 preferred = mm->mm_preferred[p]; 370 for (p-- ; p >= 0; p--) { 371 c = mm->mm_preferred[p]; 372 if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred])) 373 preferred = c; 374 } 375 return (preferred); 376} 377 378static int 379vdev_mirror_preferred_child_randomize(zio_t *zio) 380{ 381 mirror_map_t *mm = zio->io_vsd; 382 int p; 383 384 if (mm->mm_root) { 385 p = spa_get_random(mm->mm_preferred_cnt); 386 return (vdev_mirror_dva_select(zio, p)); 387 } 388 389 /* 390 * To ensure we don't always favour the first matching vdev, 391 * which could lead to wear leveling issues on SSD's, we 392 * use the I/O offset as a pseudo random seed into the vdevs 393 * which have the lowest load. 394 */ 395 p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt; 396 return (mm->mm_preferred[p]); 397} 398 399/* 400 * Try to find a vdev whose DTL doesn't contain the block we want to read 401 * prefering vdevs based on determined load. 402 * 403 * If we can't, try the read on any vdev we haven't already tried. 404 */ 405static int 406vdev_mirror_child_select(zio_t *zio) 407{ 408 mirror_map_t *mm = zio->io_vsd; 409 uint64_t txg = zio->io_txg; 410 int c, lowest_load; 411 412 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg); 413 414 lowest_load = INT_MAX; 415 mm->mm_preferred_cnt = 0; 416 for (c = 0; c < mm->mm_children; c++) { 417 mirror_child_t *mc; 418 419 mc = &mm->mm_child[c]; 420 if (mc->mc_tried || mc->mc_skipped) 421 continue; 422 423 if (!vdev_readable(mc->mc_vd)) { 424 mc->mc_error = SET_ERROR(ENXIO); 425 mc->mc_tried = 1; /* don't even try */ 426 mc->mc_skipped = 1; 427 continue; 428 } 429 430 if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) { 431 mc->mc_error = SET_ERROR(ESTALE); 432 mc->mc_skipped = 1; 433 mc->mc_speculative = 1; 434 continue; 435 } 436 437 mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset); 438 if (mc->mc_load > lowest_load) 439 continue; 440 441 if (mc->mc_load < lowest_load) { 442 lowest_load = mc->mc_load; 443 mm->mm_preferred_cnt = 0; 444 } 445 mm->mm_preferred[mm->mm_preferred_cnt] = c; 446 mm->mm_preferred_cnt++; 447 } 448 449 if (mm->mm_preferred_cnt == 1) { 450 vdev_queue_register_lastoffset( 451 mm->mm_child[mm->mm_preferred[0]].mc_vd, zio); 452 return (mm->mm_preferred[0]); 453 } 454 455 if (mm->mm_preferred_cnt > 1) { 456 int c = vdev_mirror_preferred_child_randomize(zio); 457 458 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd, zio); 459 return (c); 460 } 461 462 /* 463 * Every device is either missing or has this txg in its DTL. 464 * Look for any child we haven't already tried before giving up. 465 */ 466 for (c = 0; c < mm->mm_children; c++) { 467 if (!mm->mm_child[c].mc_tried) { 468 vdev_queue_register_lastoffset(mm->mm_child[c].mc_vd, 469 zio); 470 return (c); 471 } 472 } 473 474 /* 475 * Every child failed. There's no place left to look. 476 */ 477 return (-1); 478} 479 480static void 481vdev_mirror_io_start(zio_t *zio) 482{ 483 mirror_map_t *mm; 484 mirror_child_t *mc; 485 int c, children; 486 487 mm = vdev_mirror_map_init(zio); 488 489 if (zio->io_type == ZIO_TYPE_READ) { 490 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_resilvering && 491 mm->mm_children > 1) { 492 /* 493 * For scrubbing reads we need to allocate a read 494 * buffer for each child and issue reads to all 495 * children. If any child succeeds, it will copy its 496 * data into zio->io_data in vdev_mirror_scrub_done. 497 */ 498 for (c = 0; c < mm->mm_children; c++) { 499 mc = &mm->mm_child[c]; 500 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 501 mc->mc_vd, mc->mc_offset, 502 zio_buf_alloc(zio->io_size), zio->io_size, 503 zio->io_type, zio->io_priority, 0, 504 vdev_mirror_scrub_done, mc)); 505 } 506 zio_execute(zio); 507 return; 508 } 509 /* 510 * For normal reads just pick one child. 511 */ 512 c = vdev_mirror_child_select(zio); 513 children = (c >= 0); 514 } else { 515 ASSERT(zio->io_type == ZIO_TYPE_WRITE || 516 zio->io_type == ZIO_TYPE_FREE); 517 518 /* 519 * Writes and frees go to all children. 520 */ 521 c = 0; 522 children = mm->mm_children; 523 } 524 525 while (children--) { 526 mc = &mm->mm_child[c]; 527 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 528 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size, 529 zio->io_type, zio->io_priority, 0, 530 vdev_mirror_child_done, mc)); 531 c++; 532 } 533 534 zio_execute(zio); 535} 536 537static int 538vdev_mirror_worst_error(mirror_map_t *mm) 539{ 540 int error[2] = { 0, 0 }; 541 542 for (int c = 0; c < mm->mm_children; c++) { 543 mirror_child_t *mc = &mm->mm_child[c]; 544 int s = mc->mc_speculative; 545 error[s] = zio_worst_error(error[s], mc->mc_error); 546 } 547 548 return (error[0] ? error[0] : error[1]); 549} 550 551static void 552vdev_mirror_io_done(zio_t *zio) 553{ 554 mirror_map_t *mm = zio->io_vsd; 555 mirror_child_t *mc; 556 int c; 557 int good_copies = 0; 558 int unexpected_errors = 0; 559 560 for (c = 0; c < mm->mm_children; c++) { 561 mc = &mm->mm_child[c]; 562 563 if (mc->mc_error) { 564 if (!mc->mc_skipped) 565 unexpected_errors++; 566 } else if (mc->mc_tried) { 567 good_copies++; 568 } 569 } 570 571 if (zio->io_type == ZIO_TYPE_WRITE) { 572 /* 573 * XXX -- for now, treat partial writes as success. 574 * 575 * Now that we support write reallocation, it would be better 576 * to treat partial failure as real failure unless there are 577 * no non-degraded top-level vdevs left, and not update DTLs 578 * if we intend to reallocate. 579 */ 580 /* XXPOLICY */ 581 if (good_copies != mm->mm_children) { 582 /* 583 * Always require at least one good copy. 584 * 585 * For ditto blocks (io_vd == NULL), require 586 * all copies to be good. 587 * 588 * XXX -- for replacing vdevs, there's no great answer. 589 * If the old device is really dead, we may not even 590 * be able to access it -- so we only want to 591 * require good writes to the new device. But if 592 * the new device turns out to be flaky, we want 593 * to be able to detach it -- which requires all 594 * writes to the old device to have succeeded. 595 */ 596 if (good_copies == 0 || zio->io_vd == NULL) 597 zio->io_error = vdev_mirror_worst_error(mm); 598 } 599 return; 600 } else if (zio->io_type == ZIO_TYPE_FREE) { 601 return; 602 } 603 604 ASSERT(zio->io_type == ZIO_TYPE_READ); 605 606 /* 607 * If we don't have a good copy yet, keep trying other children. 608 */ 609 /* XXPOLICY */ 610 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) { 611 ASSERT(c >= 0 && c < mm->mm_children); 612 mc = &mm->mm_child[c]; 613 zio_vdev_io_redone(zio); 614 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 615 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size, 616 ZIO_TYPE_READ, zio->io_priority, 0, 617 vdev_mirror_child_done, mc)); 618 return; 619 } 620 621 /* XXPOLICY */ 622 if (good_copies == 0) { 623 zio->io_error = vdev_mirror_worst_error(mm); 624 ASSERT(zio->io_error != 0); 625 } 626 627 if (good_copies && spa_writeable(zio->io_spa) && 628 (unexpected_errors || 629 (zio->io_flags & ZIO_FLAG_RESILVER) || 630 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_resilvering))) { 631 /* 632 * Use the good data we have in hand to repair damaged children. 633 */ 634 for (c = 0; c < mm->mm_children; c++) { 635 /* 636 * Don't rewrite known good children. 637 * Not only is it unnecessary, it could 638 * actually be harmful: if the system lost 639 * power while rewriting the only good copy, 640 * there would be no good copies left! 641 */ 642 mc = &mm->mm_child[c]; 643 644 if (mc->mc_error == 0) { 645 if (mc->mc_tried) 646 continue; 647 if (!(zio->io_flags & ZIO_FLAG_SCRUB) && 648 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL, 649 zio->io_txg, 1)) 650 continue; 651 mc->mc_error = SET_ERROR(ESTALE); 652 } 653 654 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 655 mc->mc_vd, mc->mc_offset, 656 zio->io_data, zio->io_size, 657 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE, 658 ZIO_FLAG_IO_REPAIR | (unexpected_errors ? 659 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL)); 660 } 661 } 662} 663 664static void 665vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded) 666{ 667 if (faulted == vd->vdev_children) 668 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 669 VDEV_AUX_NO_REPLICAS); 670 else if (degraded + faulted != 0) 671 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); 672 else 673 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); 674} 675 676vdev_ops_t vdev_mirror_ops = { 677 vdev_mirror_open, 678 vdev_mirror_close, 679 vdev_default_asize, 680 vdev_mirror_io_start, 681 vdev_mirror_io_done, 682 vdev_mirror_state_change, 683 NULL, 684 NULL, 685 VDEV_TYPE_MIRROR, /* name of this vdev type */ 686 B_FALSE /* not a leaf vdev */ 687}; 688 689vdev_ops_t vdev_replacing_ops = { 690 vdev_mirror_open, 691 vdev_mirror_close, 692 vdev_default_asize, 693 vdev_mirror_io_start, 694 vdev_mirror_io_done, 695 vdev_mirror_state_change, 696 NULL, 697 NULL, 698 VDEV_TYPE_REPLACING, /* name of this vdev type */ 699 B_FALSE /* not a leaf vdev */ 700}; 701 702vdev_ops_t vdev_spare_ops = { 703 vdev_mirror_open, 704 vdev_mirror_close, 705 vdev_default_asize, 706 vdev_mirror_io_start, 707 vdev_mirror_io_done, 708 vdev_mirror_state_change, 709 NULL, 710 NULL, 711 VDEV_TYPE_SPARE, /* name of this vdev type */ 712 B_FALSE /* not a leaf vdev */ 713}; 714