dmu_send.c revision 263397
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2013 by Delphix. All rights reserved. 25 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 */ 28 29#include <sys/dmu.h> 30#include <sys/dmu_impl.h> 31#include <sys/dmu_tx.h> 32#include <sys/dbuf.h> 33#include <sys/dnode.h> 34#include <sys/zfs_context.h> 35#include <sys/dmu_objset.h> 36#include <sys/dmu_traverse.h> 37#include <sys/dsl_dataset.h> 38#include <sys/dsl_dir.h> 39#include <sys/dsl_prop.h> 40#include <sys/dsl_pool.h> 41#include <sys/dsl_synctask.h> 42#include <sys/zfs_ioctl.h> 43#include <sys/zap.h> 44#include <sys/zio_checksum.h> 45#include <sys/zfs_znode.h> 46#include <zfs_fletcher.h> 47#include <sys/avl.h> 48#include <sys/ddt.h> 49#include <sys/zfs_onexit.h> 50#include <sys/dmu_send.h> 51#include <sys/dsl_destroy.h> 52 53/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 54int zfs_send_corrupt_data = B_FALSE; 55 56static char *dmu_recv_tag = "dmu_recv_tag"; 57static const char *recv_clone_name = "%recv"; 58 59static int 60dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) 61{ 62 dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset; 63 struct uio auio; 64 struct iovec aiov; 65 ASSERT0(len % 8); 66 67 fletcher_4_incremental_native(buf, len, &dsp->dsa_zc); 68 aiov.iov_base = buf; 69 aiov.iov_len = len; 70 auio.uio_iov = &aiov; 71 auio.uio_iovcnt = 1; 72 auio.uio_resid = len; 73 auio.uio_segflg = UIO_SYSSPACE; 74 auio.uio_rw = UIO_WRITE; 75 auio.uio_offset = (off_t)-1; 76 auio.uio_td = dsp->dsa_td; 77#ifdef _KERNEL 78 if (dsp->dsa_fp->f_type == DTYPE_VNODE) 79 bwillwrite(); 80 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0, 81 dsp->dsa_td); 82#else 83 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 84 dsp->dsa_err = EOPNOTSUPP; 85#endif 86 mutex_enter(&ds->ds_sendstream_lock); 87 *dsp->dsa_off += len; 88 mutex_exit(&ds->ds_sendstream_lock); 89 90 return (dsp->dsa_err); 91} 92 93static int 94dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 95 uint64_t length) 96{ 97 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); 98 99 /* 100 * When we receive a free record, dbuf_free_range() assumes 101 * that the receiving system doesn't have any dbufs in the range 102 * being freed. This is always true because there is a one-record 103 * constraint: we only send one WRITE record for any given 104 * object+offset. We know that the one-record constraint is 105 * true because we always send data in increasing order by 106 * object,offset. 107 * 108 * If the increasing-order constraint ever changes, we should find 109 * another way to assert that the one-record constraint is still 110 * satisfied. 111 */ 112 ASSERT(object > dsp->dsa_last_data_object || 113 (object == dsp->dsa_last_data_object && 114 offset > dsp->dsa_last_data_offset)); 115 116 /* 117 * If we are doing a non-incremental send, then there can't 118 * be any data in the dataset we're receiving into. Therefore 119 * a free record would simply be a no-op. Save space by not 120 * sending it to begin with. 121 */ 122 if (!dsp->dsa_incremental) 123 return (0); 124 125 if (length != -1ULL && offset + length < offset) 126 length = -1ULL; 127 128 /* 129 * If there is a pending op, but it's not PENDING_FREE, push it out, 130 * since free block aggregation can only be done for blocks of the 131 * same type (i.e., DRR_FREE records can only be aggregated with 132 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 133 * aggregated with other DRR_FREEOBJECTS records. 134 */ 135 if (dsp->dsa_pending_op != PENDING_NONE && 136 dsp->dsa_pending_op != PENDING_FREE) { 137 if (dump_bytes(dsp, dsp->dsa_drr, 138 sizeof (dmu_replay_record_t)) != 0) 139 return (SET_ERROR(EINTR)); 140 dsp->dsa_pending_op = PENDING_NONE; 141 } 142 143 if (dsp->dsa_pending_op == PENDING_FREE) { 144 /* 145 * There should never be a PENDING_FREE if length is -1 146 * (because dump_dnode is the only place where this 147 * function is called with a -1, and only after flushing 148 * any pending record). 149 */ 150 ASSERT(length != -1ULL); 151 /* 152 * Check to see whether this free block can be aggregated 153 * with pending one. 154 */ 155 if (drrf->drr_object == object && drrf->drr_offset + 156 drrf->drr_length == offset) { 157 drrf->drr_length += length; 158 return (0); 159 } else { 160 /* not a continuation. Push out pending record */ 161 if (dump_bytes(dsp, dsp->dsa_drr, 162 sizeof (dmu_replay_record_t)) != 0) 163 return (SET_ERROR(EINTR)); 164 dsp->dsa_pending_op = PENDING_NONE; 165 } 166 } 167 /* create a FREE record and make it pending */ 168 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 169 dsp->dsa_drr->drr_type = DRR_FREE; 170 drrf->drr_object = object; 171 drrf->drr_offset = offset; 172 drrf->drr_length = length; 173 drrf->drr_toguid = dsp->dsa_toguid; 174 if (length == -1ULL) { 175 if (dump_bytes(dsp, dsp->dsa_drr, 176 sizeof (dmu_replay_record_t)) != 0) 177 return (SET_ERROR(EINTR)); 178 } else { 179 dsp->dsa_pending_op = PENDING_FREE; 180 } 181 182 return (0); 183} 184 185static int 186dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type, 187 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data) 188{ 189 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); 190 191 /* 192 * We send data in increasing object, offset order. 193 * See comment in dump_free() for details. 194 */ 195 ASSERT(object > dsp->dsa_last_data_object || 196 (object == dsp->dsa_last_data_object && 197 offset > dsp->dsa_last_data_offset)); 198 dsp->dsa_last_data_object = object; 199 dsp->dsa_last_data_offset = offset + blksz - 1; 200 201 /* 202 * If there is any kind of pending aggregation (currently either 203 * a grouping of free objects or free blocks), push it out to 204 * the stream, since aggregation can't be done across operations 205 * of different types. 206 */ 207 if (dsp->dsa_pending_op != PENDING_NONE) { 208 if (dump_bytes(dsp, dsp->dsa_drr, 209 sizeof (dmu_replay_record_t)) != 0) 210 return (SET_ERROR(EINTR)); 211 dsp->dsa_pending_op = PENDING_NONE; 212 } 213 /* write a DATA record */ 214 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 215 dsp->dsa_drr->drr_type = DRR_WRITE; 216 drrw->drr_object = object; 217 drrw->drr_type = type; 218 drrw->drr_offset = offset; 219 drrw->drr_length = blksz; 220 drrw->drr_toguid = dsp->dsa_toguid; 221 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 222 if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup) 223 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; 224 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 225 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 226 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 227 drrw->drr_key.ddk_cksum = bp->blk_cksum; 228 229 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 230 return (SET_ERROR(EINTR)); 231 if (dump_bytes(dsp, data, blksz) != 0) 232 return (SET_ERROR(EINTR)); 233 return (0); 234} 235 236static int 237dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) 238{ 239 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); 240 241 if (dsp->dsa_pending_op != PENDING_NONE) { 242 if (dump_bytes(dsp, dsp->dsa_drr, 243 sizeof (dmu_replay_record_t)) != 0) 244 return (SET_ERROR(EINTR)); 245 dsp->dsa_pending_op = PENDING_NONE; 246 } 247 248 /* write a SPILL record */ 249 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 250 dsp->dsa_drr->drr_type = DRR_SPILL; 251 drrs->drr_object = object; 252 drrs->drr_length = blksz; 253 drrs->drr_toguid = dsp->dsa_toguid; 254 255 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t))) 256 return (SET_ERROR(EINTR)); 257 if (dump_bytes(dsp, data, blksz)) 258 return (SET_ERROR(EINTR)); 259 return (0); 260} 261 262static int 263dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) 264{ 265 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); 266 267 /* See comment in dump_free(). */ 268 if (!dsp->dsa_incremental) 269 return (0); 270 271 /* 272 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 273 * push it out, since free block aggregation can only be done for 274 * blocks of the same type (i.e., DRR_FREE records can only be 275 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 276 * can only be aggregated with other DRR_FREEOBJECTS records. 277 */ 278 if (dsp->dsa_pending_op != PENDING_NONE && 279 dsp->dsa_pending_op != PENDING_FREEOBJECTS) { 280 if (dump_bytes(dsp, dsp->dsa_drr, 281 sizeof (dmu_replay_record_t)) != 0) 282 return (SET_ERROR(EINTR)); 283 dsp->dsa_pending_op = PENDING_NONE; 284 } 285 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { 286 /* 287 * See whether this free object array can be aggregated 288 * with pending one 289 */ 290 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 291 drrfo->drr_numobjs += numobjs; 292 return (0); 293 } else { 294 /* can't be aggregated. Push out pending record */ 295 if (dump_bytes(dsp, dsp->dsa_drr, 296 sizeof (dmu_replay_record_t)) != 0) 297 return (SET_ERROR(EINTR)); 298 dsp->dsa_pending_op = PENDING_NONE; 299 } 300 } 301 302 /* write a FREEOBJECTS record */ 303 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 304 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; 305 drrfo->drr_firstobj = firstobj; 306 drrfo->drr_numobjs = numobjs; 307 drrfo->drr_toguid = dsp->dsa_toguid; 308 309 dsp->dsa_pending_op = PENDING_FREEOBJECTS; 310 311 return (0); 312} 313 314static int 315dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) 316{ 317 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); 318 319 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 320 return (dump_freeobjects(dsp, object, 1)); 321 322 if (dsp->dsa_pending_op != PENDING_NONE) { 323 if (dump_bytes(dsp, dsp->dsa_drr, 324 sizeof (dmu_replay_record_t)) != 0) 325 return (SET_ERROR(EINTR)); 326 dsp->dsa_pending_op = PENDING_NONE; 327 } 328 329 /* write an OBJECT record */ 330 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 331 dsp->dsa_drr->drr_type = DRR_OBJECT; 332 drro->drr_object = object; 333 drro->drr_type = dnp->dn_type; 334 drro->drr_bonustype = dnp->dn_bonustype; 335 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 336 drro->drr_bonuslen = dnp->dn_bonuslen; 337 drro->drr_checksumtype = dnp->dn_checksum; 338 drro->drr_compress = dnp->dn_compress; 339 drro->drr_toguid = dsp->dsa_toguid; 340 341 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 342 return (SET_ERROR(EINTR)); 343 344 if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) 345 return (SET_ERROR(EINTR)); 346 347 /* Free anything past the end of the file. */ 348 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * 349 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) 350 return (SET_ERROR(EINTR)); 351 if (dsp->dsa_err != 0) 352 return (SET_ERROR(EINTR)); 353 return (0); 354} 355 356#define BP_SPAN(dnp, level) \ 357 (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \ 358 (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) 359 360/* ARGSUSED */ 361static int 362backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 363 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg) 364{ 365 dmu_sendarg_t *dsp = arg; 366 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 367 int err = 0; 368 369 if (issig(JUSTLOOKING) && issig(FORREAL)) 370 return (SET_ERROR(EINTR)); 371 372 if (zb->zb_object != DMU_META_DNODE_OBJECT && 373 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { 374 return (0); 375 } else if (BP_IS_HOLE(bp) && 376 zb->zb_object == DMU_META_DNODE_OBJECT) { 377 uint64_t span = BP_SPAN(dnp, zb->zb_level); 378 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; 379 err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT); 380 } else if (BP_IS_HOLE(bp)) { 381 uint64_t span = BP_SPAN(dnp, zb->zb_level); 382 err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span); 383 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { 384 return (0); 385 } else if (type == DMU_OT_DNODE) { 386 dnode_phys_t *blk; 387 int i; 388 int blksz = BP_GET_LSIZE(bp); 389 uint32_t aflags = ARC_WAIT; 390 arc_buf_t *abuf; 391 392 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 393 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 394 &aflags, zb) != 0) 395 return (SET_ERROR(EIO)); 396 397 blk = abuf->b_data; 398 for (i = 0; i < blksz >> DNODE_SHIFT; i++) { 399 uint64_t dnobj = (zb->zb_blkid << 400 (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i; 401 err = dump_dnode(dsp, dnobj, blk+i); 402 if (err != 0) 403 break; 404 } 405 (void) arc_buf_remove_ref(abuf, &abuf); 406 } else if (type == DMU_OT_SA) { 407 uint32_t aflags = ARC_WAIT; 408 arc_buf_t *abuf; 409 int blksz = BP_GET_LSIZE(bp); 410 411 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 412 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 413 &aflags, zb) != 0) 414 return (SET_ERROR(EIO)); 415 416 err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data); 417 (void) arc_buf_remove_ref(abuf, &abuf); 418 } else { /* it's a level-0 block of a regular object */ 419 uint32_t aflags = ARC_WAIT; 420 arc_buf_t *abuf; 421 int blksz = BP_GET_LSIZE(bp); 422 423 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 424 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 425 &aflags, zb) != 0) { 426 if (zfs_send_corrupt_data) { 427 /* Send a block filled with 0x"zfs badd bloc" */ 428 abuf = arc_buf_alloc(spa, blksz, &abuf, 429 ARC_BUFC_DATA); 430 uint64_t *ptr; 431 for (ptr = abuf->b_data; 432 (char *)ptr < (char *)abuf->b_data + blksz; 433 ptr++) 434 *ptr = 0x2f5baddb10c; 435 } else { 436 return (SET_ERROR(EIO)); 437 } 438 } 439 440 err = dump_data(dsp, type, zb->zb_object, zb->zb_blkid * blksz, 441 blksz, bp, abuf->b_data); 442 (void) arc_buf_remove_ref(abuf, &abuf); 443 } 444 445 ASSERT(err == 0 || err == EINTR); 446 return (err); 447} 448 449/* 450 * Releases dp, ds, and fromds, using the specified tag. 451 */ 452static int 453dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds, 454#ifdef illumos 455 dsl_dataset_t *fromds, int outfd, vnode_t *vp, offset_t *off) 456#else 457 dsl_dataset_t *fromds, int outfd, struct file *fp, offset_t *off) 458#endif 459{ 460 objset_t *os; 461 dmu_replay_record_t *drr; 462 dmu_sendarg_t *dsp; 463 int err; 464 uint64_t fromtxg = 0; 465 466 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) { 467 dsl_dataset_rele(fromds, tag); 468 dsl_dataset_rele(ds, tag); 469 dsl_pool_rele(dp, tag); 470 return (SET_ERROR(EXDEV)); 471 } 472 473 err = dmu_objset_from_ds(ds, &os); 474 if (err != 0) { 475 if (fromds != NULL) 476 dsl_dataset_rele(fromds, tag); 477 dsl_dataset_rele(ds, tag); 478 dsl_pool_rele(dp, tag); 479 return (err); 480 } 481 482 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 483 drr->drr_type = DRR_BEGIN; 484 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 485 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, 486 DMU_SUBSTREAM); 487 488#ifdef _KERNEL 489 if (dmu_objset_type(os) == DMU_OST_ZFS) { 490 uint64_t version; 491 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { 492 kmem_free(drr, sizeof (dmu_replay_record_t)); 493 if (fromds != NULL) 494 dsl_dataset_rele(fromds, tag); 495 dsl_dataset_rele(ds, tag); 496 dsl_pool_rele(dp, tag); 497 return (SET_ERROR(EINVAL)); 498 } 499 if (version >= ZPL_VERSION_SA) { 500 DMU_SET_FEATUREFLAGS( 501 drr->drr_u.drr_begin.drr_versioninfo, 502 DMU_BACKUP_FEATURE_SA_SPILL); 503 } 504 } 505#endif 506 507 drr->drr_u.drr_begin.drr_creation_time = 508 ds->ds_phys->ds_creation_time; 509 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); 510 if (fromds != NULL && ds->ds_dir != fromds->ds_dir) 511 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 512 drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid; 513 if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET) 514 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; 515 516 if (fromds != NULL) 517 drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid; 518 dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname); 519 520 if (fromds != NULL) { 521 fromtxg = fromds->ds_phys->ds_creation_txg; 522 dsl_dataset_rele(fromds, tag); 523 fromds = NULL; 524 } 525 526 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); 527 528 dsp->dsa_drr = drr; 529 dsp->dsa_outfd = outfd; 530 dsp->dsa_proc = curproc; 531 dsp->dsa_td = curthread; 532 dsp->dsa_fp = fp; 533 dsp->dsa_os = os; 534 dsp->dsa_off = off; 535 dsp->dsa_toguid = ds->ds_phys->ds_guid; 536 ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0); 537 dsp->dsa_pending_op = PENDING_NONE; 538 dsp->dsa_incremental = (fromtxg != 0); 539 540 mutex_enter(&ds->ds_sendstream_lock); 541 list_insert_head(&ds->ds_sendstreams, dsp); 542 mutex_exit(&ds->ds_sendstream_lock); 543 544 dsl_dataset_long_hold(ds, FTAG); 545 dsl_pool_rele(dp, tag); 546 547 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { 548 err = dsp->dsa_err; 549 goto out; 550 } 551 552 err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH, 553 backup_cb, dsp); 554 555 if (dsp->dsa_pending_op != PENDING_NONE) 556 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) 557 err = SET_ERROR(EINTR); 558 559 if (err != 0) { 560 if (err == EINTR && dsp->dsa_err != 0) 561 err = dsp->dsa_err; 562 goto out; 563 } 564 565 bzero(drr, sizeof (dmu_replay_record_t)); 566 drr->drr_type = DRR_END; 567 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; 568 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; 569 570 if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) { 571 err = dsp->dsa_err; 572 goto out; 573 } 574 575out: 576 mutex_enter(&ds->ds_sendstream_lock); 577 list_remove(&ds->ds_sendstreams, dsp); 578 mutex_exit(&ds->ds_sendstream_lock); 579 580 kmem_free(drr, sizeof (dmu_replay_record_t)); 581 kmem_free(dsp, sizeof (dmu_sendarg_t)); 582 583 dsl_dataset_long_rele(ds, FTAG); 584 dsl_dataset_rele(ds, tag); 585 586 return (err); 587} 588 589int 590dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 591#ifdef illumos 592 int outfd, vnode_t *vp, offset_t *off) 593#else 594 int outfd, struct file *fp, offset_t *off) 595#endif 596{ 597 dsl_pool_t *dp; 598 dsl_dataset_t *ds; 599 dsl_dataset_t *fromds = NULL; 600 int err; 601 602 err = dsl_pool_hold(pool, FTAG, &dp); 603 if (err != 0) 604 return (err); 605 606 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); 607 if (err != 0) { 608 dsl_pool_rele(dp, FTAG); 609 return (err); 610 } 611 612 if (fromsnap != 0) { 613 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); 614 if (err != 0) { 615 dsl_dataset_rele(ds, FTAG); 616 dsl_pool_rele(dp, FTAG); 617 return (err); 618 } 619 } 620 621 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off)); 622} 623 624int 625dmu_send(const char *tosnap, const char *fromsnap, 626#ifdef illumos 627 int outfd, vnode_t *vp, offset_t *off) 628#else 629 int outfd, struct file *fp, offset_t *off) 630#endif 631{ 632 dsl_pool_t *dp; 633 dsl_dataset_t *ds; 634 dsl_dataset_t *fromds = NULL; 635 int err; 636 637 if (strchr(tosnap, '@') == NULL) 638 return (SET_ERROR(EINVAL)); 639 if (fromsnap != NULL && strchr(fromsnap, '@') == NULL) 640 return (SET_ERROR(EINVAL)); 641 642 err = dsl_pool_hold(tosnap, FTAG, &dp); 643 if (err != 0) 644 return (err); 645 646 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); 647 if (err != 0) { 648 dsl_pool_rele(dp, FTAG); 649 return (err); 650 } 651 652 if (fromsnap != NULL) { 653 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); 654 if (err != 0) { 655 dsl_dataset_rele(ds, FTAG); 656 dsl_pool_rele(dp, FTAG); 657 return (err); 658 } 659 } 660 return (dmu_send_impl(FTAG, dp, ds, fromds, outfd, fp, off)); 661} 662 663int 664dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep) 665{ 666 dsl_pool_t *dp = ds->ds_dir->dd_pool; 667 int err; 668 uint64_t size; 669 670 ASSERT(dsl_pool_config_held(dp)); 671 672 /* tosnap must be a snapshot */ 673 if (!dsl_dataset_is_snapshot(ds)) 674 return (SET_ERROR(EINVAL)); 675 676 /* 677 * fromsnap must be an earlier snapshot from the same fs as tosnap, 678 * or the origin's fs. 679 */ 680 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds)) 681 return (SET_ERROR(EXDEV)); 682 683 /* Get uncompressed size estimate of changed data. */ 684 if (fromds == NULL) { 685 size = ds->ds_phys->ds_uncompressed_bytes; 686 } else { 687 uint64_t used, comp; 688 err = dsl_dataset_space_written(fromds, ds, 689 &used, &comp, &size); 690 if (err != 0) 691 return (err); 692 } 693 694 /* 695 * Assume that space (both on-disk and in-stream) is dominated by 696 * data. We will adjust for indirect blocks and the copies property, 697 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 698 */ 699 700 /* 701 * Subtract out approximate space used by indirect blocks. 702 * Assume most space is used by data blocks (non-indirect, non-dnode). 703 * Assume all blocks are recordsize. Assume ditto blocks and 704 * internal fragmentation counter out compression. 705 * 706 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 707 * block, which we observe in practice. 708 */ 709 uint64_t recordsize; 710 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize); 711 if (err != 0) 712 return (err); 713 size -= size / recordsize * sizeof (blkptr_t); 714 715 /* Add in the space for the record associated with each block. */ 716 size += size / recordsize * sizeof (dmu_replay_record_t); 717 718 *sizep = size; 719 720 return (0); 721} 722 723typedef struct dmu_recv_begin_arg { 724 const char *drba_origin; 725 dmu_recv_cookie_t *drba_cookie; 726 cred_t *drba_cred; 727 uint64_t drba_snapobj; 728} dmu_recv_begin_arg_t; 729 730static int 731recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 732 uint64_t fromguid) 733{ 734 uint64_t val; 735 int error; 736 dsl_pool_t *dp = ds->ds_dir->dd_pool; 737 738 /* temporary clone name must not exist */ 739 error = zap_lookup(dp->dp_meta_objset, 740 ds->ds_dir->dd_phys->dd_child_dir_zapobj, recv_clone_name, 741 8, 1, &val); 742 if (error != ENOENT) 743 return (error == 0 ? EBUSY : error); 744 745 /* new snapshot name must not exist */ 746 error = zap_lookup(dp->dp_meta_objset, 747 ds->ds_phys->ds_snapnames_zapobj, drba->drba_cookie->drc_tosnap, 748 8, 1, &val); 749 if (error != ENOENT) 750 return (error == 0 ? EEXIST : error); 751 752 if (fromguid != 0) { 753 dsl_dataset_t *snap; 754 uint64_t obj = ds->ds_phys->ds_prev_snap_obj; 755 756 /* Find snapshot in this dir that matches fromguid. */ 757 while (obj != 0) { 758 error = dsl_dataset_hold_obj(dp, obj, FTAG, 759 &snap); 760 if (error != 0) 761 return (SET_ERROR(ENODEV)); 762 if (snap->ds_dir != ds->ds_dir) { 763 dsl_dataset_rele(snap, FTAG); 764 return (SET_ERROR(ENODEV)); 765 } 766 if (snap->ds_phys->ds_guid == fromguid) 767 break; 768 obj = snap->ds_phys->ds_prev_snap_obj; 769 dsl_dataset_rele(snap, FTAG); 770 } 771 if (obj == 0) 772 return (SET_ERROR(ENODEV)); 773 774 if (drba->drba_cookie->drc_force) { 775 drba->drba_snapobj = obj; 776 } else { 777 /* 778 * If we are not forcing, there must be no 779 * changes since fromsnap. 780 */ 781 if (dsl_dataset_modified_since_snap(ds, snap)) { 782 dsl_dataset_rele(snap, FTAG); 783 return (SET_ERROR(ETXTBSY)); 784 } 785 drba->drba_snapobj = ds->ds_prev->ds_object; 786 } 787 788 dsl_dataset_rele(snap, FTAG); 789 } else { 790 /* if full, most recent snapshot must be $ORIGIN */ 791 if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL) 792 return (SET_ERROR(ENODEV)); 793 drba->drba_snapobj = ds->ds_phys->ds_prev_snap_obj; 794 } 795 796 return (0); 797 798} 799 800static int 801dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 802{ 803 dmu_recv_begin_arg_t *drba = arg; 804 dsl_pool_t *dp = dmu_tx_pool(tx); 805 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 806 uint64_t fromguid = drrb->drr_fromguid; 807 int flags = drrb->drr_flags; 808 int error; 809 dsl_dataset_t *ds; 810 const char *tofs = drba->drba_cookie->drc_tofs; 811 812 /* already checked */ 813 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 814 815 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 816 DMU_COMPOUNDSTREAM || 817 drrb->drr_type >= DMU_OST_NUMTYPES || 818 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 819 return (SET_ERROR(EINVAL)); 820 821 /* Verify pool version supports SA if SA_SPILL feature set */ 822 if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 823 DMU_BACKUP_FEATURE_SA_SPILL) && 824 spa_version(dp->dp_spa) < SPA_VERSION_SA) { 825 return (SET_ERROR(ENOTSUP)); 826 } 827 828 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 829 if (error == 0) { 830 /* target fs already exists; recv into temp clone */ 831 832 /* Can't recv a clone into an existing fs */ 833 if (flags & DRR_FLAG_CLONE) { 834 dsl_dataset_rele(ds, FTAG); 835 return (SET_ERROR(EINVAL)); 836 } 837 838 error = recv_begin_check_existing_impl(drba, ds, fromguid); 839 dsl_dataset_rele(ds, FTAG); 840 } else if (error == ENOENT) { 841 /* target fs does not exist; must be a full backup or clone */ 842 char buf[MAXNAMELEN]; 843 844 /* 845 * If it's a non-clone incremental, we are missing the 846 * target fs, so fail the recv. 847 */ 848 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE)) 849 return (SET_ERROR(ENOENT)); 850 851 /* Open the parent of tofs */ 852 ASSERT3U(strlen(tofs), <, MAXNAMELEN); 853 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 854 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 855 if (error != 0) 856 return (error); 857 858 if (drba->drba_origin != NULL) { 859 dsl_dataset_t *origin; 860 error = dsl_dataset_hold(dp, drba->drba_origin, 861 FTAG, &origin); 862 if (error != 0) { 863 dsl_dataset_rele(ds, FTAG); 864 return (error); 865 } 866 if (!dsl_dataset_is_snapshot(origin)) { 867 dsl_dataset_rele(origin, FTAG); 868 dsl_dataset_rele(ds, FTAG); 869 return (SET_ERROR(EINVAL)); 870 } 871 if (origin->ds_phys->ds_guid != fromguid) { 872 dsl_dataset_rele(origin, FTAG); 873 dsl_dataset_rele(ds, FTAG); 874 return (SET_ERROR(ENODEV)); 875 } 876 dsl_dataset_rele(origin, FTAG); 877 } 878 dsl_dataset_rele(ds, FTAG); 879 error = 0; 880 } 881 return (error); 882} 883 884static void 885dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 886{ 887 dmu_recv_begin_arg_t *drba = arg; 888 dsl_pool_t *dp = dmu_tx_pool(tx); 889 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 890 const char *tofs = drba->drba_cookie->drc_tofs; 891 dsl_dataset_t *ds, *newds; 892 uint64_t dsobj; 893 int error; 894 uint64_t crflags; 895 896 crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ? 897 DS_FLAG_CI_DATASET : 0; 898 899 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 900 if (error == 0) { 901 /* create temporary clone */ 902 dsl_dataset_t *snap = NULL; 903 if (drba->drba_snapobj != 0) { 904 VERIFY0(dsl_dataset_hold_obj(dp, 905 drba->drba_snapobj, FTAG, &snap)); 906 } 907 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, 908 snap, crflags, drba->drba_cred, tx); 909 dsl_dataset_rele(snap, FTAG); 910 dsl_dataset_rele(ds, FTAG); 911 } else { 912 dsl_dir_t *dd; 913 const char *tail; 914 dsl_dataset_t *origin = NULL; 915 916 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 917 918 if (drba->drba_origin != NULL) { 919 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 920 FTAG, &origin)); 921 } 922 923 /* Create new dataset. */ 924 dsobj = dsl_dataset_create_sync(dd, 925 strrchr(tofs, '/') + 1, 926 origin, crflags, drba->drba_cred, tx); 927 if (origin != NULL) 928 dsl_dataset_rele(origin, FTAG); 929 dsl_dir_rele(dd, FTAG); 930 drba->drba_cookie->drc_newfs = B_TRUE; 931 } 932 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); 933 934 dmu_buf_will_dirty(newds->ds_dbuf, tx); 935 newds->ds_phys->ds_flags |= DS_FLAG_INCONSISTENT; 936 937 /* 938 * If we actually created a non-clone, we need to create the 939 * objset in our new dataset. 940 */ 941 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { 942 (void) dmu_objset_create_impl(dp->dp_spa, 943 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 944 } 945 946 drba->drba_cookie->drc_ds = newds; 947 948 spa_history_log_internal_ds(newds, "receive", tx, ""); 949} 950 951/* 952 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 953 * succeeds; otherwise we will leak the holds on the datasets. 954 */ 955int 956dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb, 957 boolean_t force, char *origin, dmu_recv_cookie_t *drc) 958{ 959 dmu_recv_begin_arg_t drba = { 0 }; 960 dmu_replay_record_t *drr; 961 962 bzero(drc, sizeof (dmu_recv_cookie_t)); 963 drc->drc_drrb = drrb; 964 drc->drc_tosnap = tosnap; 965 drc->drc_tofs = tofs; 966 drc->drc_force = force; 967 968 if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) 969 drc->drc_byteswap = B_TRUE; 970 else if (drrb->drr_magic != DMU_BACKUP_MAGIC) 971 return (SET_ERROR(EINVAL)); 972 973 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 974 drr->drr_type = DRR_BEGIN; 975 drr->drr_u.drr_begin = *drc->drc_drrb; 976 if (drc->drc_byteswap) { 977 fletcher_4_incremental_byteswap(drr, 978 sizeof (dmu_replay_record_t), &drc->drc_cksum); 979 } else { 980 fletcher_4_incremental_native(drr, 981 sizeof (dmu_replay_record_t), &drc->drc_cksum); 982 } 983 kmem_free(drr, sizeof (dmu_replay_record_t)); 984 985 if (drc->drc_byteswap) { 986 drrb->drr_magic = BSWAP_64(drrb->drr_magic); 987 drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo); 988 drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time); 989 drrb->drr_type = BSWAP_32(drrb->drr_type); 990 drrb->drr_toguid = BSWAP_64(drrb->drr_toguid); 991 drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid); 992 } 993 994 drba.drba_origin = origin; 995 drba.drba_cookie = drc; 996 drba.drba_cred = CRED(); 997 998 return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync, 999 &drba, 5)); 1000} 1001 1002struct restorearg { 1003 int err; 1004 boolean_t byteswap; 1005 kthread_t *td; 1006 struct file *fp; 1007 char *buf; 1008 uint64_t voff; 1009 int bufsize; /* amount of memory allocated for buf */ 1010 zio_cksum_t cksum; 1011 avl_tree_t *guid_to_ds_map; 1012}; 1013 1014typedef struct guid_map_entry { 1015 uint64_t guid; 1016 dsl_dataset_t *gme_ds; 1017 avl_node_t avlnode; 1018} guid_map_entry_t; 1019 1020static int 1021guid_compare(const void *arg1, const void *arg2) 1022{ 1023 const guid_map_entry_t *gmep1 = arg1; 1024 const guid_map_entry_t *gmep2 = arg2; 1025 1026 if (gmep1->guid < gmep2->guid) 1027 return (-1); 1028 else if (gmep1->guid > gmep2->guid) 1029 return (1); 1030 return (0); 1031} 1032 1033static void 1034free_guid_map_onexit(void *arg) 1035{ 1036 avl_tree_t *ca = arg; 1037 void *cookie = NULL; 1038 guid_map_entry_t *gmep; 1039 1040 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { 1041 dsl_dataset_long_rele(gmep->gme_ds, gmep); 1042 dsl_dataset_rele(gmep->gme_ds, gmep); 1043 kmem_free(gmep, sizeof (guid_map_entry_t)); 1044 } 1045 avl_destroy(ca); 1046 kmem_free(ca, sizeof (avl_tree_t)); 1047} 1048 1049static int 1050restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid) 1051{ 1052 struct uio auio; 1053 struct iovec aiov; 1054 int error; 1055 1056 aiov.iov_base = buf; 1057 aiov.iov_len = len; 1058 auio.uio_iov = &aiov; 1059 auio.uio_iovcnt = 1; 1060 auio.uio_resid = len; 1061 auio.uio_segflg = UIO_SYSSPACE; 1062 auio.uio_rw = UIO_READ; 1063 auio.uio_offset = off; 1064 auio.uio_td = ra->td; 1065#ifdef _KERNEL 1066 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td); 1067#else 1068 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 1069 error = EOPNOTSUPP; 1070#endif 1071 *resid = auio.uio_resid; 1072 return (error); 1073} 1074 1075static void * 1076restore_read(struct restorearg *ra, int len) 1077{ 1078 void *rv; 1079 int done = 0; 1080 1081 /* some things will require 8-byte alignment, so everything must */ 1082 ASSERT0(len % 8); 1083 1084 while (done < len) { 1085 ssize_t resid; 1086 1087 ra->err = restore_bytes(ra, (caddr_t)ra->buf + done, 1088 len - done, ra->voff, &resid); 1089 1090 if (resid == len - done) 1091 ra->err = SET_ERROR(EINVAL); 1092 ra->voff += len - done - resid; 1093 done = len - resid; 1094 if (ra->err != 0) 1095 return (NULL); 1096 } 1097 1098 ASSERT3U(done, ==, len); 1099 rv = ra->buf; 1100 if (ra->byteswap) 1101 fletcher_4_incremental_byteswap(rv, len, &ra->cksum); 1102 else 1103 fletcher_4_incremental_native(rv, len, &ra->cksum); 1104 return (rv); 1105} 1106 1107static void 1108backup_byteswap(dmu_replay_record_t *drr) 1109{ 1110#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 1111#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 1112 drr->drr_type = BSWAP_32(drr->drr_type); 1113 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 1114 switch (drr->drr_type) { 1115 case DRR_BEGIN: 1116 DO64(drr_begin.drr_magic); 1117 DO64(drr_begin.drr_versioninfo); 1118 DO64(drr_begin.drr_creation_time); 1119 DO32(drr_begin.drr_type); 1120 DO32(drr_begin.drr_flags); 1121 DO64(drr_begin.drr_toguid); 1122 DO64(drr_begin.drr_fromguid); 1123 break; 1124 case DRR_OBJECT: 1125 DO64(drr_object.drr_object); 1126 /* DO64(drr_object.drr_allocation_txg); */ 1127 DO32(drr_object.drr_type); 1128 DO32(drr_object.drr_bonustype); 1129 DO32(drr_object.drr_blksz); 1130 DO32(drr_object.drr_bonuslen); 1131 DO64(drr_object.drr_toguid); 1132 break; 1133 case DRR_FREEOBJECTS: 1134 DO64(drr_freeobjects.drr_firstobj); 1135 DO64(drr_freeobjects.drr_numobjs); 1136 DO64(drr_freeobjects.drr_toguid); 1137 break; 1138 case DRR_WRITE: 1139 DO64(drr_write.drr_object); 1140 DO32(drr_write.drr_type); 1141 DO64(drr_write.drr_offset); 1142 DO64(drr_write.drr_length); 1143 DO64(drr_write.drr_toguid); 1144 DO64(drr_write.drr_key.ddk_cksum.zc_word[0]); 1145 DO64(drr_write.drr_key.ddk_cksum.zc_word[1]); 1146 DO64(drr_write.drr_key.ddk_cksum.zc_word[2]); 1147 DO64(drr_write.drr_key.ddk_cksum.zc_word[3]); 1148 DO64(drr_write.drr_key.ddk_prop); 1149 break; 1150 case DRR_WRITE_BYREF: 1151 DO64(drr_write_byref.drr_object); 1152 DO64(drr_write_byref.drr_offset); 1153 DO64(drr_write_byref.drr_length); 1154 DO64(drr_write_byref.drr_toguid); 1155 DO64(drr_write_byref.drr_refguid); 1156 DO64(drr_write_byref.drr_refobject); 1157 DO64(drr_write_byref.drr_refoffset); 1158 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]); 1159 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]); 1160 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]); 1161 DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]); 1162 DO64(drr_write_byref.drr_key.ddk_prop); 1163 break; 1164 case DRR_FREE: 1165 DO64(drr_free.drr_object); 1166 DO64(drr_free.drr_offset); 1167 DO64(drr_free.drr_length); 1168 DO64(drr_free.drr_toguid); 1169 break; 1170 case DRR_SPILL: 1171 DO64(drr_spill.drr_object); 1172 DO64(drr_spill.drr_length); 1173 DO64(drr_spill.drr_toguid); 1174 break; 1175 case DRR_END: 1176 DO64(drr_end.drr_checksum.zc_word[0]); 1177 DO64(drr_end.drr_checksum.zc_word[1]); 1178 DO64(drr_end.drr_checksum.zc_word[2]); 1179 DO64(drr_end.drr_checksum.zc_word[3]); 1180 DO64(drr_end.drr_toguid); 1181 break; 1182 } 1183#undef DO64 1184#undef DO32 1185} 1186 1187static int 1188restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro) 1189{ 1190 int err; 1191 dmu_tx_t *tx; 1192 void *data = NULL; 1193 1194 if (drro->drr_type == DMU_OT_NONE || 1195 !DMU_OT_IS_VALID(drro->drr_type) || 1196 !DMU_OT_IS_VALID(drro->drr_bonustype) || 1197 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 1198 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 1199 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 1200 drro->drr_blksz < SPA_MINBLOCKSIZE || 1201 drro->drr_blksz > SPA_MAXBLOCKSIZE || 1202 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 1203 return (SET_ERROR(EINVAL)); 1204 } 1205 1206 err = dmu_object_info(os, drro->drr_object, NULL); 1207 1208 if (err != 0 && err != ENOENT) 1209 return (SET_ERROR(EINVAL)); 1210 1211 if (drro->drr_bonuslen) { 1212 data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8)); 1213 if (ra->err != 0) 1214 return (ra->err); 1215 } 1216 1217 if (err == ENOENT) { 1218 /* currently free, want to be allocated */ 1219 tx = dmu_tx_create(os); 1220 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1221 err = dmu_tx_assign(tx, TXG_WAIT); 1222 if (err != 0) { 1223 dmu_tx_abort(tx); 1224 return (err); 1225 } 1226 err = dmu_object_claim(os, drro->drr_object, 1227 drro->drr_type, drro->drr_blksz, 1228 drro->drr_bonustype, drro->drr_bonuslen, tx); 1229 dmu_tx_commit(tx); 1230 } else { 1231 /* currently allocated, want to be allocated */ 1232 err = dmu_object_reclaim(os, drro->drr_object, 1233 drro->drr_type, drro->drr_blksz, 1234 drro->drr_bonustype, drro->drr_bonuslen); 1235 } 1236 if (err != 0) { 1237 return (SET_ERROR(EINVAL)); 1238 } 1239 1240 tx = dmu_tx_create(os); 1241 dmu_tx_hold_bonus(tx, drro->drr_object); 1242 err = dmu_tx_assign(tx, TXG_WAIT); 1243 if (err != 0) { 1244 dmu_tx_abort(tx); 1245 return (err); 1246 } 1247 1248 dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype, 1249 tx); 1250 dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx); 1251 1252 if (data != NULL) { 1253 dmu_buf_t *db; 1254 1255 VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db)); 1256 dmu_buf_will_dirty(db, tx); 1257 1258 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 1259 bcopy(data, db->db_data, drro->drr_bonuslen); 1260 if (ra->byteswap) { 1261 dmu_object_byteswap_t byteswap = 1262 DMU_OT_BYTESWAP(drro->drr_bonustype); 1263 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 1264 drro->drr_bonuslen); 1265 } 1266 dmu_buf_rele(db, FTAG); 1267 } 1268 dmu_tx_commit(tx); 1269 return (0); 1270} 1271 1272/* ARGSUSED */ 1273static int 1274restore_freeobjects(struct restorearg *ra, objset_t *os, 1275 struct drr_freeobjects *drrfo) 1276{ 1277 uint64_t obj; 1278 1279 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 1280 return (SET_ERROR(EINVAL)); 1281 1282 for (obj = drrfo->drr_firstobj; 1283 obj < drrfo->drr_firstobj + drrfo->drr_numobjs; 1284 (void) dmu_object_next(os, &obj, FALSE, 0)) { 1285 int err; 1286 1287 if (dmu_object_info(os, obj, NULL) != 0) 1288 continue; 1289 1290 err = dmu_free_long_object(os, obj); 1291 if (err != 0) 1292 return (err); 1293 } 1294 return (0); 1295} 1296 1297static int 1298restore_write(struct restorearg *ra, objset_t *os, 1299 struct drr_write *drrw) 1300{ 1301 dmu_tx_t *tx; 1302 void *data; 1303 int err; 1304 1305 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 1306 !DMU_OT_IS_VALID(drrw->drr_type)) 1307 return (SET_ERROR(EINVAL)); 1308 1309 data = restore_read(ra, drrw->drr_length); 1310 if (data == NULL) 1311 return (ra->err); 1312 1313 if (dmu_object_info(os, drrw->drr_object, NULL) != 0) 1314 return (SET_ERROR(EINVAL)); 1315 1316 tx = dmu_tx_create(os); 1317 1318 dmu_tx_hold_write(tx, drrw->drr_object, 1319 drrw->drr_offset, drrw->drr_length); 1320 err = dmu_tx_assign(tx, TXG_WAIT); 1321 if (err != 0) { 1322 dmu_tx_abort(tx); 1323 return (err); 1324 } 1325 if (ra->byteswap) { 1326 dmu_object_byteswap_t byteswap = 1327 DMU_OT_BYTESWAP(drrw->drr_type); 1328 dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length); 1329 } 1330 dmu_write(os, drrw->drr_object, 1331 drrw->drr_offset, drrw->drr_length, data, tx); 1332 dmu_tx_commit(tx); 1333 return (0); 1334} 1335 1336/* 1337 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed 1338 * streams to refer to a copy of the data that is already on the 1339 * system because it came in earlier in the stream. This function 1340 * finds the earlier copy of the data, and uses that copy instead of 1341 * data from the stream to fulfill this write. 1342 */ 1343static int 1344restore_write_byref(struct restorearg *ra, objset_t *os, 1345 struct drr_write_byref *drrwbr) 1346{ 1347 dmu_tx_t *tx; 1348 int err; 1349 guid_map_entry_t gmesrch; 1350 guid_map_entry_t *gmep; 1351 avl_index_t where; 1352 objset_t *ref_os = NULL; 1353 dmu_buf_t *dbp; 1354 1355 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) 1356 return (SET_ERROR(EINVAL)); 1357 1358 /* 1359 * If the GUID of the referenced dataset is different from the 1360 * GUID of the target dataset, find the referenced dataset. 1361 */ 1362 if (drrwbr->drr_toguid != drrwbr->drr_refguid) { 1363 gmesrch.guid = drrwbr->drr_refguid; 1364 if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch, 1365 &where)) == NULL) { 1366 return (SET_ERROR(EINVAL)); 1367 } 1368 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) 1369 return (SET_ERROR(EINVAL)); 1370 } else { 1371 ref_os = os; 1372 } 1373 1374 if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, 1375 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH)) 1376 return (err); 1377 1378 tx = dmu_tx_create(os); 1379 1380 dmu_tx_hold_write(tx, drrwbr->drr_object, 1381 drrwbr->drr_offset, drrwbr->drr_length); 1382 err = dmu_tx_assign(tx, TXG_WAIT); 1383 if (err != 0) { 1384 dmu_tx_abort(tx); 1385 return (err); 1386 } 1387 dmu_write(os, drrwbr->drr_object, 1388 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); 1389 dmu_buf_rele(dbp, FTAG); 1390 dmu_tx_commit(tx); 1391 return (0); 1392} 1393 1394static int 1395restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs) 1396{ 1397 dmu_tx_t *tx; 1398 void *data; 1399 dmu_buf_t *db, *db_spill; 1400 int err; 1401 1402 if (drrs->drr_length < SPA_MINBLOCKSIZE || 1403 drrs->drr_length > SPA_MAXBLOCKSIZE) 1404 return (SET_ERROR(EINVAL)); 1405 1406 data = restore_read(ra, drrs->drr_length); 1407 if (data == NULL) 1408 return (ra->err); 1409 1410 if (dmu_object_info(os, drrs->drr_object, NULL) != 0) 1411 return (SET_ERROR(EINVAL)); 1412 1413 VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db)); 1414 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { 1415 dmu_buf_rele(db, FTAG); 1416 return (err); 1417 } 1418 1419 tx = dmu_tx_create(os); 1420 1421 dmu_tx_hold_spill(tx, db->db_object); 1422 1423 err = dmu_tx_assign(tx, TXG_WAIT); 1424 if (err != 0) { 1425 dmu_buf_rele(db, FTAG); 1426 dmu_buf_rele(db_spill, FTAG); 1427 dmu_tx_abort(tx); 1428 return (err); 1429 } 1430 dmu_buf_will_dirty(db_spill, tx); 1431 1432 if (db_spill->db_size < drrs->drr_length) 1433 VERIFY(0 == dbuf_spill_set_blksz(db_spill, 1434 drrs->drr_length, tx)); 1435 bcopy(data, db_spill->db_data, drrs->drr_length); 1436 1437 dmu_buf_rele(db, FTAG); 1438 dmu_buf_rele(db_spill, FTAG); 1439 1440 dmu_tx_commit(tx); 1441 return (0); 1442} 1443 1444/* ARGSUSED */ 1445static int 1446restore_free(struct restorearg *ra, objset_t *os, 1447 struct drr_free *drrf) 1448{ 1449 int err; 1450 1451 if (drrf->drr_length != -1ULL && 1452 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 1453 return (SET_ERROR(EINVAL)); 1454 1455 if (dmu_object_info(os, drrf->drr_object, NULL) != 0) 1456 return (SET_ERROR(EINVAL)); 1457 1458 err = dmu_free_long_range(os, drrf->drr_object, 1459 drrf->drr_offset, drrf->drr_length); 1460 return (err); 1461} 1462 1463/* used to destroy the drc_ds on error */ 1464static void 1465dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 1466{ 1467 char name[MAXNAMELEN]; 1468 dsl_dataset_name(drc->drc_ds, name); 1469 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 1470 (void) dsl_destroy_head(name); 1471} 1472 1473/* 1474 * NB: callers *must* call dmu_recv_end() if this succeeds. 1475 */ 1476int 1477dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp, 1478 int cleanup_fd, uint64_t *action_handlep) 1479{ 1480 struct restorearg ra = { 0 }; 1481 dmu_replay_record_t *drr; 1482 objset_t *os; 1483 zio_cksum_t pcksum; 1484 int featureflags; 1485 1486 ra.byteswap = drc->drc_byteswap; 1487 ra.cksum = drc->drc_cksum; 1488 ra.td = curthread; 1489 ra.fp = fp; 1490 ra.voff = *voffp; 1491 ra.bufsize = 1<<20; 1492 ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP); 1493 1494 /* these were verified in dmu_recv_begin */ 1495 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 1496 DMU_SUBSTREAM); 1497 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 1498 1499 /* 1500 * Open the objset we are modifying. 1501 */ 1502 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os)); 1503 1504 ASSERT(drc->drc_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT); 1505 1506 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 1507 1508 /* if this stream is dedup'ed, set up the avl tree for guid mapping */ 1509 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { 1510 minor_t minor; 1511 1512 if (cleanup_fd == -1) { 1513 ra.err = SET_ERROR(EBADF); 1514 goto out; 1515 } 1516 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); 1517 if (ra.err != 0) { 1518 cleanup_fd = -1; 1519 goto out; 1520 } 1521 1522 if (*action_handlep == 0) { 1523 ra.guid_to_ds_map = 1524 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 1525 avl_create(ra.guid_to_ds_map, guid_compare, 1526 sizeof (guid_map_entry_t), 1527 offsetof(guid_map_entry_t, avlnode)); 1528 ra.err = zfs_onexit_add_cb(minor, 1529 free_guid_map_onexit, ra.guid_to_ds_map, 1530 action_handlep); 1531 if (ra.err != 0) 1532 goto out; 1533 } else { 1534 ra.err = zfs_onexit_cb_data(minor, *action_handlep, 1535 (void **)&ra.guid_to_ds_map); 1536 if (ra.err != 0) 1537 goto out; 1538 } 1539 1540 drc->drc_guid_to_ds_map = ra.guid_to_ds_map; 1541 } 1542 1543 /* 1544 * Read records and process them. 1545 */ 1546 pcksum = ra.cksum; 1547 while (ra.err == 0 && 1548 NULL != (drr = restore_read(&ra, sizeof (*drr)))) { 1549 if (issig(JUSTLOOKING) && issig(FORREAL)) { 1550 ra.err = SET_ERROR(EINTR); 1551 goto out; 1552 } 1553 1554 if (ra.byteswap) 1555 backup_byteswap(drr); 1556 1557 switch (drr->drr_type) { 1558 case DRR_OBJECT: 1559 { 1560 /* 1561 * We need to make a copy of the record header, 1562 * because restore_{object,write} may need to 1563 * restore_read(), which will invalidate drr. 1564 */ 1565 struct drr_object drro = drr->drr_u.drr_object; 1566 ra.err = restore_object(&ra, os, &drro); 1567 break; 1568 } 1569 case DRR_FREEOBJECTS: 1570 { 1571 struct drr_freeobjects drrfo = 1572 drr->drr_u.drr_freeobjects; 1573 ra.err = restore_freeobjects(&ra, os, &drrfo); 1574 break; 1575 } 1576 case DRR_WRITE: 1577 { 1578 struct drr_write drrw = drr->drr_u.drr_write; 1579 ra.err = restore_write(&ra, os, &drrw); 1580 break; 1581 } 1582 case DRR_WRITE_BYREF: 1583 { 1584 struct drr_write_byref drrwbr = 1585 drr->drr_u.drr_write_byref; 1586 ra.err = restore_write_byref(&ra, os, &drrwbr); 1587 break; 1588 } 1589 case DRR_FREE: 1590 { 1591 struct drr_free drrf = drr->drr_u.drr_free; 1592 ra.err = restore_free(&ra, os, &drrf); 1593 break; 1594 } 1595 case DRR_END: 1596 { 1597 struct drr_end drre = drr->drr_u.drr_end; 1598 /* 1599 * We compare against the *previous* checksum 1600 * value, because the stored checksum is of 1601 * everything before the DRR_END record. 1602 */ 1603 if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum)) 1604 ra.err = SET_ERROR(ECKSUM); 1605 goto out; 1606 } 1607 case DRR_SPILL: 1608 { 1609 struct drr_spill drrs = drr->drr_u.drr_spill; 1610 ra.err = restore_spill(&ra, os, &drrs); 1611 break; 1612 } 1613 default: 1614 ra.err = SET_ERROR(EINVAL); 1615 goto out; 1616 } 1617 pcksum = ra.cksum; 1618 } 1619 ASSERT(ra.err != 0); 1620 1621out: 1622 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) 1623 zfs_onexit_fd_rele(cleanup_fd); 1624 1625 if (ra.err != 0) { 1626 /* 1627 * destroy what we created, so we don't leave it in the 1628 * inconsistent restoring state. 1629 */ 1630 dmu_recv_cleanup_ds(drc); 1631 } 1632 1633 kmem_free(ra.buf, ra.bufsize); 1634 *voffp = ra.voff; 1635 return (ra.err); 1636} 1637 1638static int 1639dmu_recv_end_check(void *arg, dmu_tx_t *tx) 1640{ 1641 dmu_recv_cookie_t *drc = arg; 1642 dsl_pool_t *dp = dmu_tx_pool(tx); 1643 int error; 1644 1645 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 1646 1647 if (!drc->drc_newfs) { 1648 dsl_dataset_t *origin_head; 1649 1650 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 1651 if (error != 0) 1652 return (error); 1653 if (drc->drc_force) { 1654 /* 1655 * We will destroy any snapshots in tofs (i.e. before 1656 * origin_head) that are after the origin (which is 1657 * the snap before drc_ds, because drc_ds can not 1658 * have any snaps of its own). 1659 */ 1660 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj; 1661 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) { 1662 dsl_dataset_t *snap; 1663 error = dsl_dataset_hold_obj(dp, obj, FTAG, 1664 &snap); 1665 if (error != 0) 1666 return (error); 1667 if (snap->ds_dir != origin_head->ds_dir) 1668 error = SET_ERROR(EINVAL); 1669 if (error == 0) { 1670 error = dsl_destroy_snapshot_check_impl( 1671 snap, B_FALSE); 1672 } 1673 obj = snap->ds_phys->ds_prev_snap_obj; 1674 dsl_dataset_rele(snap, FTAG); 1675 if (error != 0) 1676 return (error); 1677 } 1678 } 1679 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 1680 origin_head, drc->drc_force, drc->drc_owner, tx); 1681 if (error != 0) { 1682 dsl_dataset_rele(origin_head, FTAG); 1683 return (error); 1684 } 1685 error = dsl_dataset_snapshot_check_impl(origin_head, 1686 drc->drc_tosnap, tx, B_TRUE); 1687 dsl_dataset_rele(origin_head, FTAG); 1688 if (error != 0) 1689 return (error); 1690 1691 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 1692 } else { 1693 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 1694 drc->drc_tosnap, tx, B_TRUE); 1695 } 1696 return (error); 1697} 1698 1699static void 1700dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 1701{ 1702 dmu_recv_cookie_t *drc = arg; 1703 dsl_pool_t *dp = dmu_tx_pool(tx); 1704 1705 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 1706 tx, "snap=%s", drc->drc_tosnap); 1707 1708 if (!drc->drc_newfs) { 1709 dsl_dataset_t *origin_head; 1710 1711 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 1712 &origin_head)); 1713 1714 if (drc->drc_force) { 1715 /* 1716 * Destroy any snapshots of drc_tofs (origin_head) 1717 * after the origin (the snap before drc_ds). 1718 */ 1719 uint64_t obj = origin_head->ds_phys->ds_prev_snap_obj; 1720 while (obj != drc->drc_ds->ds_phys->ds_prev_snap_obj) { 1721 dsl_dataset_t *snap; 1722 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 1723 &snap)); 1724 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 1725 obj = snap->ds_phys->ds_prev_snap_obj; 1726 dsl_destroy_snapshot_sync_impl(snap, 1727 B_FALSE, tx); 1728 dsl_dataset_rele(snap, FTAG); 1729 } 1730 } 1731 VERIFY3P(drc->drc_ds->ds_prev, ==, 1732 origin_head->ds_prev); 1733 1734 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 1735 origin_head, tx); 1736 dsl_dataset_snapshot_sync_impl(origin_head, 1737 drc->drc_tosnap, tx); 1738 1739 /* set snapshot's creation time and guid */ 1740 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 1741 origin_head->ds_prev->ds_phys->ds_creation_time = 1742 drc->drc_drrb->drr_creation_time; 1743 origin_head->ds_prev->ds_phys->ds_guid = 1744 drc->drc_drrb->drr_toguid; 1745 origin_head->ds_prev->ds_phys->ds_flags &= 1746 ~DS_FLAG_INCONSISTENT; 1747 1748 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 1749 origin_head->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1750 1751 dsl_dataset_rele(origin_head, FTAG); 1752 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 1753 1754 if (drc->drc_owner != NULL) 1755 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 1756 } else { 1757 dsl_dataset_t *ds = drc->drc_ds; 1758 1759 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 1760 1761 /* set snapshot's creation time and guid */ 1762 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 1763 ds->ds_prev->ds_phys->ds_creation_time = 1764 drc->drc_drrb->drr_creation_time; 1765 ds->ds_prev->ds_phys->ds_guid = drc->drc_drrb->drr_toguid; 1766 ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1767 1768 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1769 ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT; 1770 } 1771 drc->drc_newsnapobj = drc->drc_ds->ds_phys->ds_prev_snap_obj; 1772 /* 1773 * Release the hold from dmu_recv_begin. This must be done before 1774 * we return to open context, so that when we free the dataset's dnode, 1775 * we can evict its bonus buffer. 1776 */ 1777 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 1778 drc->drc_ds = NULL; 1779} 1780 1781static int 1782add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) 1783{ 1784 dsl_pool_t *dp; 1785 dsl_dataset_t *snapds; 1786 guid_map_entry_t *gmep; 1787 int err; 1788 1789 ASSERT(guid_map != NULL); 1790 1791 err = dsl_pool_hold(name, FTAG, &dp); 1792 if (err != 0) 1793 return (err); 1794 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); 1795 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); 1796 if (err == 0) { 1797 gmep->guid = snapds->ds_phys->ds_guid; 1798 gmep->gme_ds = snapds; 1799 avl_add(guid_map, gmep); 1800 dsl_dataset_long_hold(snapds, gmep); 1801 } else 1802 kmem_free(gmep, sizeof (*gmep)); 1803 1804 dsl_pool_rele(dp, FTAG); 1805 return (err); 1806} 1807 1808static int dmu_recv_end_modified_blocks = 3; 1809 1810static int 1811dmu_recv_existing_end(dmu_recv_cookie_t *drc) 1812{ 1813 int error; 1814 char name[MAXNAMELEN]; 1815 1816#ifdef _KERNEL 1817 /* 1818 * We will be destroying the ds; make sure its origin is unmounted if 1819 * necessary. 1820 */ 1821 dsl_dataset_name(drc->drc_ds, name); 1822 zfs_destroy_unmount_origin(name); 1823#endif 1824 1825 error = dsl_sync_task(drc->drc_tofs, 1826 dmu_recv_end_check, dmu_recv_end_sync, drc, 1827 dmu_recv_end_modified_blocks); 1828 1829 if (error != 0) 1830 dmu_recv_cleanup_ds(drc); 1831 return (error); 1832} 1833 1834static int 1835dmu_recv_new_end(dmu_recv_cookie_t *drc) 1836{ 1837 int error; 1838 1839 error = dsl_sync_task(drc->drc_tofs, 1840 dmu_recv_end_check, dmu_recv_end_sync, drc, 1841 dmu_recv_end_modified_blocks); 1842 1843 if (error != 0) { 1844 dmu_recv_cleanup_ds(drc); 1845 } else if (drc->drc_guid_to_ds_map != NULL) { 1846 (void) add_ds_to_guidmap(drc->drc_tofs, 1847 drc->drc_guid_to_ds_map, 1848 drc->drc_newsnapobj); 1849 } 1850 return (error); 1851} 1852 1853int 1854dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 1855{ 1856 drc->drc_owner = owner; 1857 1858 if (drc->drc_newfs) 1859 return (dmu_recv_new_end(drc)); 1860 else 1861 return (dmu_recv_existing_end(drc)); 1862} 1863 1864/* 1865 * Return TRUE if this objset is currently being received into. 1866 */ 1867boolean_t 1868dmu_objset_is_receiving(objset_t *os) 1869{ 1870 return (os->os_dsl_dataset != NULL && 1871 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 1872} 1873