vdev_mirror.c revision 260763
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26/* 27 * Copyright (c) 2013 by Delphix. All rights reserved. 28 */ 29 30#include <sys/zfs_context.h> 31#include <sys/spa.h> 32#include <sys/vdev_impl.h> 33#include <sys/zio.h> 34#include <sys/fs/zfs.h> 35 36/* 37 * Virtual device vector for mirroring. 38 */ 39 40typedef struct mirror_child { 41 vdev_t *mc_vd; 42 uint64_t mc_offset; 43 int mc_error; 44 uint8_t mc_tried; 45 uint8_t mc_skipped; 46 uint8_t mc_speculative; 47} mirror_child_t; 48 49typedef struct mirror_map { 50 int mm_children; 51 int mm_replacing; 52 int mm_preferred; 53 int mm_root; 54 mirror_child_t mm_child[1]; 55} mirror_map_t; 56 57int vdev_mirror_shift = 21; 58 59static void 60vdev_mirror_map_free(zio_t *zio) 61{ 62 mirror_map_t *mm = zio->io_vsd; 63 64 kmem_free(mm, offsetof(mirror_map_t, mm_child[mm->mm_children])); 65} 66 67static const zio_vsd_ops_t vdev_mirror_vsd_ops = { 68 vdev_mirror_map_free, 69 zio_vsd_default_cksum_report 70}; 71 72static mirror_map_t * 73vdev_mirror_map_alloc(zio_t *zio) 74{ 75 mirror_map_t *mm = NULL; 76 mirror_child_t *mc; 77 vdev_t *vd = zio->io_vd; 78 int c, d; 79 80 if (vd == NULL) { 81 dva_t *dva = zio->io_bp->blk_dva; 82 spa_t *spa = zio->io_spa; 83 84 c = BP_GET_NDVAS(zio->io_bp); 85 86 mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP); 87 mm->mm_children = c; 88 mm->mm_replacing = B_FALSE; 89 mm->mm_preferred = spa_get_random(c); 90 mm->mm_root = B_TRUE; 91 92 /* 93 * Check the other, lower-index DVAs to see if they're on 94 * the same vdev as the child we picked. If they are, use 95 * them since they are likely to have been allocated from 96 * the primary metaslab in use at the time, and hence are 97 * more likely to have locality with single-copy data. 98 */ 99 for (c = mm->mm_preferred, d = c - 1; d >= 0; d--) { 100 if (DVA_GET_VDEV(&dva[d]) == DVA_GET_VDEV(&dva[c])) 101 mm->mm_preferred = d; 102 } 103 104 for (c = 0; c < mm->mm_children; c++) { 105 mc = &mm->mm_child[c]; 106 107 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c])); 108 mc->mc_offset = DVA_GET_OFFSET(&dva[c]); 109 } 110 } else { 111 c = vd->vdev_children; 112 113 mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP); 114 mm->mm_children = c; 115 mm->mm_replacing = (vd->vdev_ops == &vdev_replacing_ops || 116 vd->vdev_ops == &vdev_spare_ops); 117 mm->mm_preferred = mm->mm_replacing ? 0 : 118 (zio->io_offset >> vdev_mirror_shift) % c; 119 mm->mm_root = B_FALSE; 120 121 for (c = 0; c < mm->mm_children; c++) { 122 mc = &mm->mm_child[c]; 123 mc->mc_vd = vd->vdev_child[c]; 124 mc->mc_offset = zio->io_offset; 125 } 126 } 127 128 zio->io_vsd = mm; 129 zio->io_vsd_ops = &vdev_mirror_vsd_ops; 130 return (mm); 131} 132 133static int 134vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize, 135 uint64_t *logical_ashift, uint64_t *physical_ashift) 136{ 137 int numerrors = 0; 138 int lasterror = 0; 139 140 if (vd->vdev_children == 0) { 141 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 142 return (SET_ERROR(EINVAL)); 143 } 144 145 vdev_open_children(vd); 146 147 for (int c = 0; c < vd->vdev_children; c++) { 148 vdev_t *cvd = vd->vdev_child[c]; 149 150 if (cvd->vdev_open_error) { 151 lasterror = cvd->vdev_open_error; 152 numerrors++; 153 continue; 154 } 155 156 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1; 157 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1; 158 *logical_ashift = MAX(*logical_ashift, cvd->vdev_ashift); 159 *physical_ashift = MAX(*physical_ashift, 160 cvd->vdev_physical_ashift); 161 } 162 163 if (numerrors == vd->vdev_children) { 164 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS; 165 return (lasterror); 166 } 167 168 return (0); 169} 170 171static void 172vdev_mirror_close(vdev_t *vd) 173{ 174 for (int c = 0; c < vd->vdev_children; c++) 175 vdev_close(vd->vdev_child[c]); 176} 177 178static void 179vdev_mirror_child_done(zio_t *zio) 180{ 181 mirror_child_t *mc = zio->io_private; 182 183 mc->mc_error = zio->io_error; 184 mc->mc_tried = 1; 185 mc->mc_skipped = 0; 186} 187 188static void 189vdev_mirror_scrub_done(zio_t *zio) 190{ 191 mirror_child_t *mc = zio->io_private; 192 193 if (zio->io_error == 0) { 194 zio_t *pio; 195 196 mutex_enter(&zio->io_lock); 197 while ((pio = zio_walk_parents(zio)) != NULL) { 198 mutex_enter(&pio->io_lock); 199 ASSERT3U(zio->io_size, >=, pio->io_size); 200 bcopy(zio->io_data, pio->io_data, pio->io_size); 201 mutex_exit(&pio->io_lock); 202 } 203 mutex_exit(&zio->io_lock); 204 } 205 206 zio_buf_free(zio->io_data, zio->io_size); 207 208 mc->mc_error = zio->io_error; 209 mc->mc_tried = 1; 210 mc->mc_skipped = 0; 211} 212 213/* 214 * Try to find a child whose DTL doesn't contain the block we want to read. 215 * If we can't, try the read on any vdev we haven't already tried. 216 */ 217static int 218vdev_mirror_child_select(zio_t *zio) 219{ 220 mirror_map_t *mm = zio->io_vsd; 221 mirror_child_t *mc; 222 uint64_t txg = zio->io_txg; 223 int i, c; 224 225 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg); 226 227 /* 228 * Try to find a child whose DTL doesn't contain the block to read. 229 * If a child is known to be completely inaccessible (indicated by 230 * vdev_readable() returning B_FALSE), don't even try. 231 */ 232 for (i = 0, c = mm->mm_preferred; i < mm->mm_children; i++, c++) { 233 if (c >= mm->mm_children) 234 c = 0; 235 mc = &mm->mm_child[c]; 236 if (mc->mc_tried || mc->mc_skipped) 237 continue; 238 if (!vdev_readable(mc->mc_vd)) { 239 mc->mc_error = SET_ERROR(ENXIO); 240 mc->mc_tried = 1; /* don't even try */ 241 mc->mc_skipped = 1; 242 continue; 243 } 244 if (!vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) 245 return (c); 246 mc->mc_error = SET_ERROR(ESTALE); 247 mc->mc_skipped = 1; 248 mc->mc_speculative = 1; 249 } 250 251 /* 252 * Every device is either missing or has this txg in its DTL. 253 * Look for any child we haven't already tried before giving up. 254 */ 255 for (c = 0; c < mm->mm_children; c++) 256 if (!mm->mm_child[c].mc_tried) 257 return (c); 258 259 /* 260 * Every child failed. There's no place left to look. 261 */ 262 return (-1); 263} 264 265static int 266vdev_mirror_io_start(zio_t *zio) 267{ 268 mirror_map_t *mm; 269 mirror_child_t *mc; 270 int c, children; 271 272 mm = vdev_mirror_map_alloc(zio); 273 274 if (zio->io_type == ZIO_TYPE_READ) { 275 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) { 276 /* 277 * For scrubbing reads we need to allocate a read 278 * buffer for each child and issue reads to all 279 * children. If any child succeeds, it will copy its 280 * data into zio->io_data in vdev_mirror_scrub_done. 281 */ 282 for (c = 0; c < mm->mm_children; c++) { 283 mc = &mm->mm_child[c]; 284 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 285 mc->mc_vd, mc->mc_offset, 286 zio_buf_alloc(zio->io_size), zio->io_size, 287 zio->io_type, zio->io_priority, 0, 288 vdev_mirror_scrub_done, mc)); 289 } 290 return (ZIO_PIPELINE_CONTINUE); 291 } 292 /* 293 * For normal reads just pick one child. 294 */ 295 c = vdev_mirror_child_select(zio); 296 children = (c >= 0); 297 } else { 298 ASSERT(zio->io_type == ZIO_TYPE_WRITE || 299 zio->io_type == ZIO_TYPE_FREE); 300 301 /* 302 * Writes and frees go to all children. 303 */ 304 c = 0; 305 children = mm->mm_children; 306 } 307 308 while (children--) { 309 mc = &mm->mm_child[c]; 310 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 311 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size, 312 zio->io_type, zio->io_priority, 0, 313 vdev_mirror_child_done, mc)); 314 c++; 315 } 316 317 return (ZIO_PIPELINE_CONTINUE); 318} 319 320static int 321vdev_mirror_worst_error(mirror_map_t *mm) 322{ 323 int error[2] = { 0, 0 }; 324 325 for (int c = 0; c < mm->mm_children; c++) { 326 mirror_child_t *mc = &mm->mm_child[c]; 327 int s = mc->mc_speculative; 328 error[s] = zio_worst_error(error[s], mc->mc_error); 329 } 330 331 return (error[0] ? error[0] : error[1]); 332} 333 334static void 335vdev_mirror_io_done(zio_t *zio) 336{ 337 mirror_map_t *mm = zio->io_vsd; 338 mirror_child_t *mc; 339 int c; 340 int good_copies = 0; 341 int unexpected_errors = 0; 342 343 for (c = 0; c < mm->mm_children; c++) { 344 mc = &mm->mm_child[c]; 345 346 if (mc->mc_error) { 347 if (!mc->mc_skipped) 348 unexpected_errors++; 349 } else if (mc->mc_tried) { 350 good_copies++; 351 } 352 } 353 354 if (zio->io_type == ZIO_TYPE_WRITE) { 355 /* 356 * XXX -- for now, treat partial writes as success. 357 * 358 * Now that we support write reallocation, it would be better 359 * to treat partial failure as real failure unless there are 360 * no non-degraded top-level vdevs left, and not update DTLs 361 * if we intend to reallocate. 362 */ 363 /* XXPOLICY */ 364 if (good_copies != mm->mm_children) { 365 /* 366 * Always require at least one good copy. 367 * 368 * For ditto blocks (io_vd == NULL), require 369 * all copies to be good. 370 * 371 * XXX -- for replacing vdevs, there's no great answer. 372 * If the old device is really dead, we may not even 373 * be able to access it -- so we only want to 374 * require good writes to the new device. But if 375 * the new device turns out to be flaky, we want 376 * to be able to detach it -- which requires all 377 * writes to the old device to have succeeded. 378 */ 379 if (good_copies == 0 || zio->io_vd == NULL) 380 zio->io_error = vdev_mirror_worst_error(mm); 381 } 382 return; 383 } else if (zio->io_type == ZIO_TYPE_FREE) { 384 return; 385 } 386 387 ASSERT(zio->io_type == ZIO_TYPE_READ); 388 389 /* 390 * If we don't have a good copy yet, keep trying other children. 391 */ 392 /* XXPOLICY */ 393 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) { 394 ASSERT(c >= 0 && c < mm->mm_children); 395 mc = &mm->mm_child[c]; 396 zio_vdev_io_redone(zio); 397 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 398 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size, 399 ZIO_TYPE_READ, zio->io_priority, 0, 400 vdev_mirror_child_done, mc)); 401 return; 402 } 403 404 /* XXPOLICY */ 405 if (good_copies == 0) { 406 zio->io_error = vdev_mirror_worst_error(mm); 407 ASSERT(zio->io_error != 0); 408 } 409 410 if (good_copies && spa_writeable(zio->io_spa) && 411 (unexpected_errors || 412 (zio->io_flags & ZIO_FLAG_RESILVER) || 413 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) { 414 /* 415 * Use the good data we have in hand to repair damaged children. 416 */ 417 for (c = 0; c < mm->mm_children; c++) { 418 /* 419 * Don't rewrite known good children. 420 * Not only is it unnecessary, it could 421 * actually be harmful: if the system lost 422 * power while rewriting the only good copy, 423 * there would be no good copies left! 424 */ 425 mc = &mm->mm_child[c]; 426 427 if (mc->mc_error == 0) { 428 if (mc->mc_tried) 429 continue; 430 if (!(zio->io_flags & ZIO_FLAG_SCRUB) && 431 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL, 432 zio->io_txg, 1)) 433 continue; 434 mc->mc_error = SET_ERROR(ESTALE); 435 } 436 437 zio_nowait(zio_vdev_child_io(zio, zio->io_bp, 438 mc->mc_vd, mc->mc_offset, 439 zio->io_data, zio->io_size, 440 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE, 441 ZIO_FLAG_IO_REPAIR | (unexpected_errors ? 442 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL)); 443 } 444 } 445} 446 447static void 448vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded) 449{ 450 if (faulted == vd->vdev_children) 451 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN, 452 VDEV_AUX_NO_REPLICAS); 453 else if (degraded + faulted != 0) 454 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE); 455 else 456 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE); 457} 458 459vdev_ops_t vdev_mirror_ops = { 460 vdev_mirror_open, 461 vdev_mirror_close, 462 vdev_default_asize, 463 vdev_mirror_io_start, 464 vdev_mirror_io_done, 465 vdev_mirror_state_change, 466 NULL, 467 NULL, 468 VDEV_TYPE_MIRROR, /* name of this vdev type */ 469 B_FALSE /* not a leaf vdev */ 470}; 471 472vdev_ops_t vdev_replacing_ops = { 473 vdev_mirror_open, 474 vdev_mirror_close, 475 vdev_default_asize, 476 vdev_mirror_io_start, 477 vdev_mirror_io_done, 478 vdev_mirror_state_change, 479 NULL, 480 NULL, 481 VDEV_TYPE_REPLACING, /* name of this vdev type */ 482 B_FALSE /* not a leaf vdev */ 483}; 484 485vdev_ops_t vdev_spare_ops = { 486 vdev_mirror_open, 487 vdev_mirror_close, 488 vdev_default_asize, 489 vdev_mirror_io_start, 490 vdev_mirror_io_done, 491 vdev_mirror_state_change, 492 NULL, 493 NULL, 494 VDEV_TYPE_SPARE, /* name of this vdev type */ 495 B_FALSE /* not a leaf vdev */ 496}; 497