dmu_send.c revision 308083
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 * Copyright 2014 HybridCluster. All rights reserved. 28 * Copyright 2016 RackTop Systems. 29 * Copyright (c) 2014 Integros [integros.com] 30 */ 31 32#include <sys/dmu.h> 33#include <sys/dmu_impl.h> 34#include <sys/dmu_tx.h> 35#include <sys/dbuf.h> 36#include <sys/dnode.h> 37#include <sys/zfs_context.h> 38#include <sys/dmu_objset.h> 39#include <sys/dmu_traverse.h> 40#include <sys/dsl_dataset.h> 41#include <sys/dsl_dir.h> 42#include <sys/dsl_prop.h> 43#include <sys/dsl_pool.h> 44#include <sys/dsl_synctask.h> 45#include <sys/zfs_ioctl.h> 46#include <sys/zap.h> 47#include <sys/zio_checksum.h> 48#include <sys/zfs_znode.h> 49#include <zfs_fletcher.h> 50#include <sys/avl.h> 51#include <sys/ddt.h> 52#include <sys/zfs_onexit.h> 53#include <sys/dmu_send.h> 54#include <sys/dsl_destroy.h> 55#include <sys/blkptr.h> 56#include <sys/dsl_bookmark.h> 57#include <sys/zfeature.h> 58#include <sys/bqueue.h> 59 60#ifdef __FreeBSD__ 61#undef dump_write 62#define dump_write dmu_dump_write 63#endif 64 65/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 66int zfs_send_corrupt_data = B_FALSE; 67int zfs_send_queue_length = 16 * 1024 * 1024; 68int zfs_recv_queue_length = 16 * 1024 * 1024; 69/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ 70int zfs_send_set_freerecords_bit = B_TRUE; 71 72#ifdef _KERNEL 73TUNABLE_INT("vfs.zfs.send_set_freerecords_bit", &zfs_send_set_freerecords_bit); 74#endif 75 76static char *dmu_recv_tag = "dmu_recv_tag"; 77const char *recv_clone_name = "%recv"; 78 79#define BP_SPAN(datablkszsec, indblkshift, level) \ 80 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \ 81 (level) * (indblkshift - SPA_BLKPTRSHIFT))) 82 83static void byteswap_record(dmu_replay_record_t *drr); 84 85struct send_thread_arg { 86 bqueue_t q; 87 dsl_dataset_t *ds; /* Dataset to traverse */ 88 uint64_t fromtxg; /* Traverse from this txg */ 89 int flags; /* flags to pass to traverse_dataset */ 90 int error_code; 91 boolean_t cancel; 92 zbookmark_phys_t resume; 93}; 94 95struct send_block_record { 96 boolean_t eos_marker; /* Marks the end of the stream */ 97 blkptr_t bp; 98 zbookmark_phys_t zb; 99 uint8_t indblkshift; 100 uint16_t datablkszsec; 101 bqueue_node_t ln; 102}; 103 104static int 105dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) 106{ 107 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os); 108 struct uio auio; 109 struct iovec aiov; 110 111 /* 112 * The code does not rely on this (len being a multiple of 8). We keep 113 * this assertion because of the corresponding assertion in 114 * receive_read(). Keeping this assertion ensures that we do not 115 * inadvertently break backwards compatibility (causing the assertion 116 * in receive_read() to trigger on old software). 117 * 118 * Removing the assertions could be rolled into a new feature that uses 119 * data that isn't 8-byte aligned; if the assertions were removed, a 120 * feature flag would have to be added. 121 */ 122 123 ASSERT0(len % 8); 124 125 aiov.iov_base = buf; 126 aiov.iov_len = len; 127 auio.uio_iov = &aiov; 128 auio.uio_iovcnt = 1; 129 auio.uio_resid = len; 130 auio.uio_segflg = UIO_SYSSPACE; 131 auio.uio_rw = UIO_WRITE; 132 auio.uio_offset = (off_t)-1; 133 auio.uio_td = dsp->dsa_td; 134#ifdef _KERNEL 135 if (dsp->dsa_fp->f_type == DTYPE_VNODE) 136 bwillwrite(); 137 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0, 138 dsp->dsa_td); 139#else 140 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 141 dsp->dsa_err = EOPNOTSUPP; 142#endif 143 mutex_enter(&ds->ds_sendstream_lock); 144 *dsp->dsa_off += len; 145 mutex_exit(&ds->ds_sendstream_lock); 146 147 return (dsp->dsa_err); 148} 149 150/* 151 * For all record types except BEGIN, fill in the checksum (overlaid in 152 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything 153 * up to the start of the checksum itself. 154 */ 155static int 156dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len) 157{ 158 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 159 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 160 fletcher_4_incremental_native(dsp->dsa_drr, 161 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 162 &dsp->dsa_zc); 163 if (dsp->dsa_drr->drr_type == DRR_BEGIN) { 164 dsp->dsa_sent_begin = B_TRUE; 165 } else { 166 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u. 167 drr_checksum.drr_checksum)); 168 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc; 169 } 170 if (dsp->dsa_drr->drr_type == DRR_END) { 171 dsp->dsa_sent_end = B_TRUE; 172 } 173 fletcher_4_incremental_native(&dsp->dsa_drr-> 174 drr_u.drr_checksum.drr_checksum, 175 sizeof (zio_cksum_t), &dsp->dsa_zc); 176 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 177 return (SET_ERROR(EINTR)); 178 if (payload_len != 0) { 179 fletcher_4_incremental_native(payload, payload_len, 180 &dsp->dsa_zc); 181 if (dump_bytes(dsp, payload, payload_len) != 0) 182 return (SET_ERROR(EINTR)); 183 } 184 return (0); 185} 186 187/* 188 * Fill in the drr_free struct, or perform aggregation if the previous record is 189 * also a free record, and the two are adjacent. 190 * 191 * Note that we send free records even for a full send, because we want to be 192 * able to receive a full send as a clone, which requires a list of all the free 193 * and freeobject records that were generated on the source. 194 */ 195static int 196dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 197 uint64_t length) 198{ 199 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); 200 201 /* 202 * When we receive a free record, dbuf_free_range() assumes 203 * that the receiving system doesn't have any dbufs in the range 204 * being freed. This is always true because there is a one-record 205 * constraint: we only send one WRITE record for any given 206 * object,offset. We know that the one-record constraint is 207 * true because we always send data in increasing order by 208 * object,offset. 209 * 210 * If the increasing-order constraint ever changes, we should find 211 * another way to assert that the one-record constraint is still 212 * satisfied. 213 */ 214 ASSERT(object > dsp->dsa_last_data_object || 215 (object == dsp->dsa_last_data_object && 216 offset > dsp->dsa_last_data_offset)); 217 218 if (length != -1ULL && offset + length < offset) 219 length = -1ULL; 220 221 /* 222 * If there is a pending op, but it's not PENDING_FREE, push it out, 223 * since free block aggregation can only be done for blocks of the 224 * same type (i.e., DRR_FREE records can only be aggregated with 225 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 226 * aggregated with other DRR_FREEOBJECTS records. 227 */ 228 if (dsp->dsa_pending_op != PENDING_NONE && 229 dsp->dsa_pending_op != PENDING_FREE) { 230 if (dump_record(dsp, NULL, 0) != 0) 231 return (SET_ERROR(EINTR)); 232 dsp->dsa_pending_op = PENDING_NONE; 233 } 234 235 if (dsp->dsa_pending_op == PENDING_FREE) { 236 /* 237 * There should never be a PENDING_FREE if length is -1 238 * (because dump_dnode is the only place where this 239 * function is called with a -1, and only after flushing 240 * any pending record). 241 */ 242 ASSERT(length != -1ULL); 243 /* 244 * Check to see whether this free block can be aggregated 245 * with pending one. 246 */ 247 if (drrf->drr_object == object && drrf->drr_offset + 248 drrf->drr_length == offset) { 249 drrf->drr_length += length; 250 return (0); 251 } else { 252 /* not a continuation. Push out pending record */ 253 if (dump_record(dsp, NULL, 0) != 0) 254 return (SET_ERROR(EINTR)); 255 dsp->dsa_pending_op = PENDING_NONE; 256 } 257 } 258 /* create a FREE record and make it pending */ 259 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 260 dsp->dsa_drr->drr_type = DRR_FREE; 261 drrf->drr_object = object; 262 drrf->drr_offset = offset; 263 drrf->drr_length = length; 264 drrf->drr_toguid = dsp->dsa_toguid; 265 if (length == -1ULL) { 266 if (dump_record(dsp, NULL, 0) != 0) 267 return (SET_ERROR(EINTR)); 268 } else { 269 dsp->dsa_pending_op = PENDING_FREE; 270 } 271 272 return (0); 273} 274 275static int 276dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, 277 uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data) 278{ 279 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); 280 281 /* 282 * We send data in increasing object, offset order. 283 * See comment in dump_free() for details. 284 */ 285 ASSERT(object > dsp->dsa_last_data_object || 286 (object == dsp->dsa_last_data_object && 287 offset > dsp->dsa_last_data_offset)); 288 dsp->dsa_last_data_object = object; 289 dsp->dsa_last_data_offset = offset + blksz - 1; 290 291 /* 292 * If there is any kind of pending aggregation (currently either 293 * a grouping of free objects or free blocks), push it out to 294 * the stream, since aggregation can't be done across operations 295 * of different types. 296 */ 297 if (dsp->dsa_pending_op != PENDING_NONE) { 298 if (dump_record(dsp, NULL, 0) != 0) 299 return (SET_ERROR(EINTR)); 300 dsp->dsa_pending_op = PENDING_NONE; 301 } 302 /* write a WRITE record */ 303 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 304 dsp->dsa_drr->drr_type = DRR_WRITE; 305 drrw->drr_object = object; 306 drrw->drr_type = type; 307 drrw->drr_offset = offset; 308 drrw->drr_length = blksz; 309 drrw->drr_toguid = dsp->dsa_toguid; 310 if (bp == NULL || BP_IS_EMBEDDED(bp)) { 311 /* 312 * There's no pre-computed checksum for partial-block 313 * writes or embedded BP's, so (like 314 * fletcher4-checkummed blocks) userland will have to 315 * compute a dedup-capable checksum itself. 316 */ 317 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; 318 } else { 319 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 320 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & 321 ZCHECKSUM_FLAG_DEDUP) 322 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; 323 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 324 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 325 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 326 drrw->drr_key.ddk_cksum = bp->blk_cksum; 327 } 328 329 if (dump_record(dsp, data, blksz) != 0) 330 return (SET_ERROR(EINTR)); 331 return (0); 332} 333 334static int 335dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 336 int blksz, const blkptr_t *bp) 337{ 338 char buf[BPE_PAYLOAD_SIZE]; 339 struct drr_write_embedded *drrw = 340 &(dsp->dsa_drr->drr_u.drr_write_embedded); 341 342 if (dsp->dsa_pending_op != PENDING_NONE) { 343 if (dump_record(dsp, NULL, 0) != 0) 344 return (EINTR); 345 dsp->dsa_pending_op = PENDING_NONE; 346 } 347 348 ASSERT(BP_IS_EMBEDDED(bp)); 349 350 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 351 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED; 352 drrw->drr_object = object; 353 drrw->drr_offset = offset; 354 drrw->drr_length = blksz; 355 drrw->drr_toguid = dsp->dsa_toguid; 356 drrw->drr_compression = BP_GET_COMPRESS(bp); 357 drrw->drr_etype = BPE_GET_ETYPE(bp); 358 drrw->drr_lsize = BPE_GET_LSIZE(bp); 359 drrw->drr_psize = BPE_GET_PSIZE(bp); 360 361 decode_embedded_bp_compressed(bp, buf); 362 363 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) 364 return (EINTR); 365 return (0); 366} 367 368static int 369dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) 370{ 371 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); 372 373 if (dsp->dsa_pending_op != PENDING_NONE) { 374 if (dump_record(dsp, NULL, 0) != 0) 375 return (SET_ERROR(EINTR)); 376 dsp->dsa_pending_op = PENDING_NONE; 377 } 378 379 /* write a SPILL record */ 380 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 381 dsp->dsa_drr->drr_type = DRR_SPILL; 382 drrs->drr_object = object; 383 drrs->drr_length = blksz; 384 drrs->drr_toguid = dsp->dsa_toguid; 385 386 if (dump_record(dsp, data, blksz) != 0) 387 return (SET_ERROR(EINTR)); 388 return (0); 389} 390 391static int 392dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) 393{ 394 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); 395 396 /* 397 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 398 * push it out, since free block aggregation can only be done for 399 * blocks of the same type (i.e., DRR_FREE records can only be 400 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 401 * can only be aggregated with other DRR_FREEOBJECTS records. 402 */ 403 if (dsp->dsa_pending_op != PENDING_NONE && 404 dsp->dsa_pending_op != PENDING_FREEOBJECTS) { 405 if (dump_record(dsp, NULL, 0) != 0) 406 return (SET_ERROR(EINTR)); 407 dsp->dsa_pending_op = PENDING_NONE; 408 } 409 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { 410 /* 411 * See whether this free object array can be aggregated 412 * with pending one 413 */ 414 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 415 drrfo->drr_numobjs += numobjs; 416 return (0); 417 } else { 418 /* can't be aggregated. Push out pending record */ 419 if (dump_record(dsp, NULL, 0) != 0) 420 return (SET_ERROR(EINTR)); 421 dsp->dsa_pending_op = PENDING_NONE; 422 } 423 } 424 425 /* write a FREEOBJECTS record */ 426 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 427 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; 428 drrfo->drr_firstobj = firstobj; 429 drrfo->drr_numobjs = numobjs; 430 drrfo->drr_toguid = dsp->dsa_toguid; 431 432 dsp->dsa_pending_op = PENDING_FREEOBJECTS; 433 434 return (0); 435} 436 437static int 438dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) 439{ 440 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); 441 442 if (object < dsp->dsa_resume_object) { 443 /* 444 * Note: when resuming, we will visit all the dnodes in 445 * the block of dnodes that we are resuming from. In 446 * this case it's unnecessary to send the dnodes prior to 447 * the one we are resuming from. We should be at most one 448 * block's worth of dnodes behind the resume point. 449 */ 450 ASSERT3U(dsp->dsa_resume_object - object, <, 451 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); 452 return (0); 453 } 454 455 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 456 return (dump_freeobjects(dsp, object, 1)); 457 458 if (dsp->dsa_pending_op != PENDING_NONE) { 459 if (dump_record(dsp, NULL, 0) != 0) 460 return (SET_ERROR(EINTR)); 461 dsp->dsa_pending_op = PENDING_NONE; 462 } 463 464 /* write an OBJECT record */ 465 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 466 dsp->dsa_drr->drr_type = DRR_OBJECT; 467 drro->drr_object = object; 468 drro->drr_type = dnp->dn_type; 469 drro->drr_bonustype = dnp->dn_bonustype; 470 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 471 drro->drr_bonuslen = dnp->dn_bonuslen; 472 drro->drr_checksumtype = dnp->dn_checksum; 473 drro->drr_compress = dnp->dn_compress; 474 drro->drr_toguid = dsp->dsa_toguid; 475 476 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 477 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) 478 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; 479 480 if (dump_record(dsp, DN_BONUS(dnp), 481 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) { 482 return (SET_ERROR(EINTR)); 483 } 484 485 /* Free anything past the end of the file. */ 486 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * 487 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) 488 return (SET_ERROR(EINTR)); 489 if (dsp->dsa_err != 0) 490 return (SET_ERROR(EINTR)); 491 return (0); 492} 493 494static boolean_t 495backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp) 496{ 497 if (!BP_IS_EMBEDDED(bp)) 498 return (B_FALSE); 499 500 /* 501 * Compression function must be legacy, or explicitly enabled. 502 */ 503 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && 504 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4))) 505 return (B_FALSE); 506 507 /* 508 * Embed type must be explicitly enabled. 509 */ 510 switch (BPE_GET_ETYPE(bp)) { 511 case BP_EMBEDDED_TYPE_DATA: 512 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 513 return (B_TRUE); 514 break; 515 default: 516 return (B_FALSE); 517 } 518 return (B_FALSE); 519} 520 521/* 522 * This is the callback function to traverse_dataset that acts as the worker 523 * thread for dmu_send_impl. 524 */ 525/*ARGSUSED*/ 526static int 527send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 528 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) 529{ 530 struct send_thread_arg *sta = arg; 531 struct send_block_record *record; 532 uint64_t record_size; 533 int err = 0; 534 535 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 536 zb->zb_object >= sta->resume.zb_object); 537 538 if (sta->cancel) 539 return (SET_ERROR(EINTR)); 540 541 if (bp == NULL) { 542 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL); 543 return (0); 544 } else if (zb->zb_level < 0) { 545 return (0); 546 } 547 548 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP); 549 record->eos_marker = B_FALSE; 550 record->bp = *bp; 551 record->zb = *zb; 552 record->indblkshift = dnp->dn_indblkshift; 553 record->datablkszsec = dnp->dn_datablkszsec; 554 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 555 bqueue_enqueue(&sta->q, record, record_size); 556 557 return (err); 558} 559 560/* 561 * This function kicks off the traverse_dataset. It also handles setting the 562 * error code of the thread in case something goes wrong, and pushes the End of 563 * Stream record when the traverse_dataset call has finished. If there is no 564 * dataset to traverse, the thread immediately pushes End of Stream marker. 565 */ 566static void 567send_traverse_thread(void *arg) 568{ 569 struct send_thread_arg *st_arg = arg; 570 int err; 571 struct send_block_record *data; 572 573 if (st_arg->ds != NULL) { 574 err = traverse_dataset_resume(st_arg->ds, 575 st_arg->fromtxg, &st_arg->resume, 576 st_arg->flags, send_cb, st_arg); 577 578 if (err != EINTR) 579 st_arg->error_code = err; 580 } 581 data = kmem_zalloc(sizeof (*data), KM_SLEEP); 582 data->eos_marker = B_TRUE; 583 bqueue_enqueue(&st_arg->q, data, 1); 584 thread_exit(); 585} 586 587/* 588 * This function actually handles figuring out what kind of record needs to be 589 * dumped, reading the data (which has hopefully been prefetched), and calling 590 * the appropriate helper function. 591 */ 592static int 593do_dump(dmu_sendarg_t *dsa, struct send_block_record *data) 594{ 595 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os); 596 const blkptr_t *bp = &data->bp; 597 const zbookmark_phys_t *zb = &data->zb; 598 uint8_t indblkshift = data->indblkshift; 599 uint16_t dblkszsec = data->datablkszsec; 600 spa_t *spa = ds->ds_dir->dd_pool->dp_spa; 601 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 602 int err = 0; 603 604 ASSERT3U(zb->zb_level, >=, 0); 605 606 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 607 zb->zb_object >= dsa->dsa_resume_object); 608 609 if (zb->zb_object != DMU_META_DNODE_OBJECT && 610 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { 611 return (0); 612 } else if (BP_IS_HOLE(bp) && 613 zb->zb_object == DMU_META_DNODE_OBJECT) { 614 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 615 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; 616 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT); 617 } else if (BP_IS_HOLE(bp)) { 618 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 619 uint64_t offset = zb->zb_blkid * span; 620 err = dump_free(dsa, zb->zb_object, offset, span); 621 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { 622 return (0); 623 } else if (type == DMU_OT_DNODE) { 624 int blksz = BP_GET_LSIZE(bp); 625 arc_flags_t aflags = ARC_FLAG_WAIT; 626 arc_buf_t *abuf; 627 628 ASSERT0(zb->zb_level); 629 630 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 631 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 632 &aflags, zb) != 0) 633 return (SET_ERROR(EIO)); 634 635 dnode_phys_t *blk = abuf->b_data; 636 uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT); 637 for (int i = 0; i < blksz >> DNODE_SHIFT; i++) { 638 err = dump_dnode(dsa, dnobj + i, blk + i); 639 if (err != 0) 640 break; 641 } 642 arc_buf_destroy(abuf, &abuf); 643 } else if (type == DMU_OT_SA) { 644 arc_flags_t aflags = ARC_FLAG_WAIT; 645 arc_buf_t *abuf; 646 int blksz = BP_GET_LSIZE(bp); 647 648 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 649 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 650 &aflags, zb) != 0) 651 return (SET_ERROR(EIO)); 652 653 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data); 654 arc_buf_destroy(abuf, &abuf); 655 } else if (backup_do_embed(dsa, bp)) { 656 /* it's an embedded level-0 block of a regular object */ 657 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 658 ASSERT0(zb->zb_level); 659 err = dump_write_embedded(dsa, zb->zb_object, 660 zb->zb_blkid * blksz, blksz, bp); 661 } else { 662 /* it's a level-0 block of a regular object */ 663 arc_flags_t aflags = ARC_FLAG_WAIT; 664 arc_buf_t *abuf; 665 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 666 uint64_t offset; 667 668 ASSERT0(zb->zb_level); 669 ASSERT(zb->zb_object > dsa->dsa_resume_object || 670 (zb->zb_object == dsa->dsa_resume_object && 671 zb->zb_blkid * blksz >= dsa->dsa_resume_offset)); 672 673 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 674 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 675 &aflags, zb) != 0) { 676 if (zfs_send_corrupt_data) { 677 /* Send a block filled with 0x"zfs badd bloc" */ 678 abuf = arc_alloc_buf(spa, blksz, &abuf, 679 ARC_BUFC_DATA); 680 uint64_t *ptr; 681 for (ptr = abuf->b_data; 682 (char *)ptr < (char *)abuf->b_data + blksz; 683 ptr++) 684 *ptr = 0x2f5baddb10cULL; 685 } else { 686 return (SET_ERROR(EIO)); 687 } 688 } 689 690 offset = zb->zb_blkid * blksz; 691 692 if (!(dsa->dsa_featureflags & 693 DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 694 blksz > SPA_OLD_MAXBLOCKSIZE) { 695 char *buf = abuf->b_data; 696 while (blksz > 0 && err == 0) { 697 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE); 698 err = dump_write(dsa, type, zb->zb_object, 699 offset, n, NULL, buf); 700 offset += n; 701 buf += n; 702 blksz -= n; 703 } 704 } else { 705 err = dump_write(dsa, type, zb->zb_object, 706 offset, blksz, bp, abuf->b_data); 707 } 708 arc_buf_destroy(abuf, &abuf); 709 } 710 711 ASSERT(err == 0 || err == EINTR); 712 return (err); 713} 714 715/* 716 * Pop the new data off the queue, and free the old data. 717 */ 718static struct send_block_record * 719get_next_record(bqueue_t *bq, struct send_block_record *data) 720{ 721 struct send_block_record *tmp = bqueue_dequeue(bq); 722 kmem_free(data, sizeof (*data)); 723 return (tmp); 724} 725 726/* 727 * Actually do the bulk of the work in a zfs send. 728 * 729 * Note: Releases dp using the specified tag. 730 */ 731static int 732dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds, 733 zfs_bookmark_phys_t *ancestor_zb, 734 boolean_t is_clone, boolean_t embedok, boolean_t large_block_ok, int outfd, 735 uint64_t resumeobj, uint64_t resumeoff, 736#ifdef illumos 737 vnode_t *vp, offset_t *off) 738#else 739 struct file *fp, offset_t *off) 740#endif 741{ 742 objset_t *os; 743 dmu_replay_record_t *drr; 744 dmu_sendarg_t *dsp; 745 int err; 746 uint64_t fromtxg = 0; 747 uint64_t featureflags = 0; 748 struct send_thread_arg to_arg = { 0 }; 749 750 err = dmu_objset_from_ds(to_ds, &os); 751 if (err != 0) { 752 dsl_pool_rele(dp, tag); 753 return (err); 754 } 755 756 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 757 drr->drr_type = DRR_BEGIN; 758 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 759 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, 760 DMU_SUBSTREAM); 761 762#ifdef _KERNEL 763 if (dmu_objset_type(os) == DMU_OST_ZFS) { 764 uint64_t version; 765 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { 766 kmem_free(drr, sizeof (dmu_replay_record_t)); 767 dsl_pool_rele(dp, tag); 768 return (SET_ERROR(EINVAL)); 769 } 770 if (version >= ZPL_VERSION_SA) { 771 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; 772 } 773 } 774#endif 775 776 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS]) 777 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; 778 if (embedok && 779 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { 780 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; 781 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 782 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4; 783 } 784 785 if (resumeobj != 0 || resumeoff != 0) { 786 featureflags |= DMU_BACKUP_FEATURE_RESUMING; 787 } 788 789 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo, 790 featureflags); 791 792 drr->drr_u.drr_begin.drr_creation_time = 793 dsl_dataset_phys(to_ds)->ds_creation_time; 794 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); 795 if (is_clone) 796 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 797 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; 798 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET) 799 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; 800 if (zfs_send_set_freerecords_bit) 801 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS; 802 803 if (ancestor_zb != NULL) { 804 drr->drr_u.drr_begin.drr_fromguid = 805 ancestor_zb->zbm_guid; 806 fromtxg = ancestor_zb->zbm_creation_txg; 807 } 808 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname); 809 if (!to_ds->ds_is_snapshot) { 810 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--", 811 sizeof (drr->drr_u.drr_begin.drr_toname)); 812 } 813 814 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); 815 816 dsp->dsa_drr = drr; 817 dsp->dsa_outfd = outfd; 818 dsp->dsa_proc = curproc; 819 dsp->dsa_td = curthread; 820 dsp->dsa_fp = fp; 821 dsp->dsa_os = os; 822 dsp->dsa_off = off; 823 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid; 824 dsp->dsa_pending_op = PENDING_NONE; 825 dsp->dsa_featureflags = featureflags; 826 dsp->dsa_resume_object = resumeobj; 827 dsp->dsa_resume_offset = resumeoff; 828 829 mutex_enter(&to_ds->ds_sendstream_lock); 830 list_insert_head(&to_ds->ds_sendstreams, dsp); 831 mutex_exit(&to_ds->ds_sendstream_lock); 832 833 dsl_dataset_long_hold(to_ds, FTAG); 834 dsl_pool_rele(dp, tag); 835 836 void *payload = NULL; 837 size_t payload_len = 0; 838 if (resumeobj != 0 || resumeoff != 0) { 839 dmu_object_info_t to_doi; 840 err = dmu_object_info(os, resumeobj, &to_doi); 841 if (err != 0) 842 goto out; 843 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0, 844 resumeoff / to_doi.doi_data_block_size); 845 846 nvlist_t *nvl = fnvlist_alloc(); 847 fnvlist_add_uint64(nvl, "resume_object", resumeobj); 848 fnvlist_add_uint64(nvl, "resume_offset", resumeoff); 849 payload = fnvlist_pack(nvl, &payload_len); 850 drr->drr_payloadlen = payload_len; 851 fnvlist_free(nvl); 852 } 853 854 err = dump_record(dsp, payload, payload_len); 855 fnvlist_pack_free(payload, payload_len); 856 if (err != 0) { 857 err = dsp->dsa_err; 858 goto out; 859 } 860 861 err = bqueue_init(&to_arg.q, zfs_send_queue_length, 862 offsetof(struct send_block_record, ln)); 863 to_arg.error_code = 0; 864 to_arg.cancel = B_FALSE; 865 to_arg.ds = to_ds; 866 to_arg.fromtxg = fromtxg; 867 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH; 868 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0, 869 TS_RUN, minclsyspri); 870 871 struct send_block_record *to_data; 872 to_data = bqueue_dequeue(&to_arg.q); 873 874 while (!to_data->eos_marker && err == 0) { 875 err = do_dump(dsp, to_data); 876 to_data = get_next_record(&to_arg.q, to_data); 877 if (issig(JUSTLOOKING) && issig(FORREAL)) 878 err = EINTR; 879 } 880 881 if (err != 0) { 882 to_arg.cancel = B_TRUE; 883 while (!to_data->eos_marker) { 884 to_data = get_next_record(&to_arg.q, to_data); 885 } 886 } 887 kmem_free(to_data, sizeof (*to_data)); 888 889 bqueue_destroy(&to_arg.q); 890 891 if (err == 0 && to_arg.error_code != 0) 892 err = to_arg.error_code; 893 894 if (err != 0) 895 goto out; 896 897 if (dsp->dsa_pending_op != PENDING_NONE) 898 if (dump_record(dsp, NULL, 0) != 0) 899 err = SET_ERROR(EINTR); 900 901 if (err != 0) { 902 if (err == EINTR && dsp->dsa_err != 0) 903 err = dsp->dsa_err; 904 goto out; 905 } 906 907 bzero(drr, sizeof (dmu_replay_record_t)); 908 drr->drr_type = DRR_END; 909 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; 910 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; 911 912 if (dump_record(dsp, NULL, 0) != 0) 913 err = dsp->dsa_err; 914 915out: 916 mutex_enter(&to_ds->ds_sendstream_lock); 917 list_remove(&to_ds->ds_sendstreams, dsp); 918 mutex_exit(&to_ds->ds_sendstream_lock); 919 920 VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end)); 921 922 kmem_free(drr, sizeof (dmu_replay_record_t)); 923 kmem_free(dsp, sizeof (dmu_sendarg_t)); 924 925 dsl_dataset_long_rele(to_ds, FTAG); 926 927 return (err); 928} 929 930int 931dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 932 boolean_t embedok, boolean_t large_block_ok, 933#ifdef illumos 934 int outfd, vnode_t *vp, offset_t *off) 935#else 936 int outfd, struct file *fp, offset_t *off) 937#endif 938{ 939 dsl_pool_t *dp; 940 dsl_dataset_t *ds; 941 dsl_dataset_t *fromds = NULL; 942 int err; 943 944 err = dsl_pool_hold(pool, FTAG, &dp); 945 if (err != 0) 946 return (err); 947 948 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); 949 if (err != 0) { 950 dsl_pool_rele(dp, FTAG); 951 return (err); 952 } 953 954 if (fromsnap != 0) { 955 zfs_bookmark_phys_t zb; 956 boolean_t is_clone; 957 958 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); 959 if (err != 0) { 960 dsl_dataset_rele(ds, FTAG); 961 dsl_pool_rele(dp, FTAG); 962 return (err); 963 } 964 if (!dsl_dataset_is_before(ds, fromds, 0)) 965 err = SET_ERROR(EXDEV); 966 zb.zbm_creation_time = 967 dsl_dataset_phys(fromds)->ds_creation_time; 968 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg; 969 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 970 is_clone = (fromds->ds_dir != ds->ds_dir); 971 dsl_dataset_rele(fromds, FTAG); 972 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 973 embedok, large_block_ok, outfd, 0, 0, fp, off); 974 } else { 975 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 976 embedok, large_block_ok, outfd, 0, 0, fp, off); 977 } 978 dsl_dataset_rele(ds, FTAG); 979 return (err); 980} 981 982int 983dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, 984 boolean_t large_block_ok, int outfd, uint64_t resumeobj, uint64_t resumeoff, 985#ifdef illumos 986 vnode_t *vp, offset_t *off) 987#else 988 struct file *fp, offset_t *off) 989#endif 990{ 991 dsl_pool_t *dp; 992 dsl_dataset_t *ds; 993 int err; 994 boolean_t owned = B_FALSE; 995 996 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) 997 return (SET_ERROR(EINVAL)); 998 999 err = dsl_pool_hold(tosnap, FTAG, &dp); 1000 if (err != 0) 1001 return (err); 1002 1003 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) { 1004 /* 1005 * We are sending a filesystem or volume. Ensure 1006 * that it doesn't change by owning the dataset. 1007 */ 1008 err = dsl_dataset_own(dp, tosnap, FTAG, &ds); 1009 owned = B_TRUE; 1010 } else { 1011 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); 1012 } 1013 if (err != 0) { 1014 dsl_pool_rele(dp, FTAG); 1015 return (err); 1016 } 1017 1018 if (fromsnap != NULL) { 1019 zfs_bookmark_phys_t zb; 1020 boolean_t is_clone = B_FALSE; 1021 int fsnamelen = strchr(tosnap, '@') - tosnap; 1022 1023 /* 1024 * If the fromsnap is in a different filesystem, then 1025 * mark the send stream as a clone. 1026 */ 1027 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || 1028 (fromsnap[fsnamelen] != '@' && 1029 fromsnap[fsnamelen] != '#')) { 1030 is_clone = B_TRUE; 1031 } 1032 1033 if (strchr(fromsnap, '@')) { 1034 dsl_dataset_t *fromds; 1035 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); 1036 if (err == 0) { 1037 if (!dsl_dataset_is_before(ds, fromds, 0)) 1038 err = SET_ERROR(EXDEV); 1039 zb.zbm_creation_time = 1040 dsl_dataset_phys(fromds)->ds_creation_time; 1041 zb.zbm_creation_txg = 1042 dsl_dataset_phys(fromds)->ds_creation_txg; 1043 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 1044 is_clone = (ds->ds_dir != fromds->ds_dir); 1045 dsl_dataset_rele(fromds, FTAG); 1046 } 1047 } else { 1048 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb); 1049 } 1050 if (err != 0) { 1051 dsl_dataset_rele(ds, FTAG); 1052 dsl_pool_rele(dp, FTAG); 1053 return (err); 1054 } 1055 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 1056 embedok, large_block_ok, 1057 outfd, resumeobj, resumeoff, fp, off); 1058 } else { 1059 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 1060 embedok, large_block_ok, 1061 outfd, resumeobj, resumeoff, fp, off); 1062 } 1063 if (owned) 1064 dsl_dataset_disown(ds, FTAG); 1065 else 1066 dsl_dataset_rele(ds, FTAG); 1067 return (err); 1068} 1069 1070static int 1071dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size, 1072 uint64_t *sizep) 1073{ 1074 int err; 1075 /* 1076 * Assume that space (both on-disk and in-stream) is dominated by 1077 * data. We will adjust for indirect blocks and the copies property, 1078 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 1079 */ 1080 1081 /* 1082 * Subtract out approximate space used by indirect blocks. 1083 * Assume most space is used by data blocks (non-indirect, non-dnode). 1084 * Assume all blocks are recordsize. Assume ditto blocks and 1085 * internal fragmentation counter out compression. 1086 * 1087 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 1088 * block, which we observe in practice. 1089 */ 1090 uint64_t recordsize; 1091 err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize); 1092 if (err != 0) 1093 return (err); 1094 size -= size / recordsize * sizeof (blkptr_t); 1095 1096 /* Add in the space for the record associated with each block. */ 1097 size += size / recordsize * sizeof (dmu_replay_record_t); 1098 1099 *sizep = size; 1100 1101 return (0); 1102} 1103 1104int 1105dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep) 1106{ 1107 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1108 int err; 1109 uint64_t size; 1110 1111 ASSERT(dsl_pool_config_held(dp)); 1112 1113 /* tosnap must be a snapshot */ 1114 if (!ds->ds_is_snapshot) 1115 return (SET_ERROR(EINVAL)); 1116 1117 /* fromsnap, if provided, must be a snapshot */ 1118 if (fromds != NULL && !fromds->ds_is_snapshot) 1119 return (SET_ERROR(EINVAL)); 1120 1121 /* 1122 * fromsnap must be an earlier snapshot from the same fs as tosnap, 1123 * or the origin's fs. 1124 */ 1125 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0)) 1126 return (SET_ERROR(EXDEV)); 1127 1128 /* Get uncompressed size estimate of changed data. */ 1129 if (fromds == NULL) { 1130 size = dsl_dataset_phys(ds)->ds_uncompressed_bytes; 1131 } else { 1132 uint64_t used, comp; 1133 err = dsl_dataset_space_written(fromds, ds, 1134 &used, &comp, &size); 1135 if (err != 0) 1136 return (err); 1137 } 1138 1139 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep); 1140 return (err); 1141} 1142 1143/* 1144 * Simple callback used to traverse the blocks of a snapshot and sum their 1145 * uncompressed size 1146 */ 1147/* ARGSUSED */ 1148static int 1149dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1150 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1151{ 1152 uint64_t *spaceptr = arg; 1153 if (bp != NULL && !BP_IS_HOLE(bp)) { 1154 *spaceptr += BP_GET_UCSIZE(bp); 1155 } 1156 return (0); 1157} 1158 1159/* 1160 * Given a desination snapshot and a TXG, calculate the approximate size of a 1161 * send stream sent from that TXG. from_txg may be zero, indicating that the 1162 * whole snapshot will be sent. 1163 */ 1164int 1165dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg, 1166 uint64_t *sizep) 1167{ 1168 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1169 int err; 1170 uint64_t size = 0; 1171 1172 ASSERT(dsl_pool_config_held(dp)); 1173 1174 /* tosnap must be a snapshot */ 1175 if (!dsl_dataset_is_snapshot(ds)) 1176 return (SET_ERROR(EINVAL)); 1177 1178 /* verify that from_txg is before the provided snapshot was taken */ 1179 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) { 1180 return (SET_ERROR(EXDEV)); 1181 } 1182 1183 /* 1184 * traverse the blocks of the snapshot with birth times after 1185 * from_txg, summing their uncompressed size 1186 */ 1187 err = traverse_dataset(ds, from_txg, TRAVERSE_POST, 1188 dmu_calculate_send_traversal, &size); 1189 if (err) 1190 return (err); 1191 1192 err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep); 1193 return (err); 1194} 1195 1196typedef struct dmu_recv_begin_arg { 1197 const char *drba_origin; 1198 dmu_recv_cookie_t *drba_cookie; 1199 cred_t *drba_cred; 1200 uint64_t drba_snapobj; 1201} dmu_recv_begin_arg_t; 1202 1203static int 1204recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 1205 uint64_t fromguid) 1206{ 1207 uint64_t val; 1208 int error; 1209 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1210 1211 /* temporary clone name must not exist */ 1212 error = zap_lookup(dp->dp_meta_objset, 1213 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, 1214 8, 1, &val); 1215 if (error != ENOENT) 1216 return (error == 0 ? EBUSY : error); 1217 1218 /* new snapshot name must not exist */ 1219 error = zap_lookup(dp->dp_meta_objset, 1220 dsl_dataset_phys(ds)->ds_snapnames_zapobj, 1221 drba->drba_cookie->drc_tosnap, 8, 1, &val); 1222 if (error != ENOENT) 1223 return (error == 0 ? EEXIST : error); 1224 1225 /* 1226 * Check snapshot limit before receiving. We'll recheck again at the 1227 * end, but might as well abort before receiving if we're already over 1228 * the limit. 1229 * 1230 * Note that we do not check the file system limit with 1231 * dsl_dir_fscount_check because the temporary %clones don't count 1232 * against that limit. 1233 */ 1234 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, 1235 NULL, drba->drba_cred); 1236 if (error != 0) 1237 return (error); 1238 1239 if (fromguid != 0) { 1240 dsl_dataset_t *snap; 1241 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1242 1243 /* Find snapshot in this dir that matches fromguid. */ 1244 while (obj != 0) { 1245 error = dsl_dataset_hold_obj(dp, obj, FTAG, 1246 &snap); 1247 if (error != 0) 1248 return (SET_ERROR(ENODEV)); 1249 if (snap->ds_dir != ds->ds_dir) { 1250 dsl_dataset_rele(snap, FTAG); 1251 return (SET_ERROR(ENODEV)); 1252 } 1253 if (dsl_dataset_phys(snap)->ds_guid == fromguid) 1254 break; 1255 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 1256 dsl_dataset_rele(snap, FTAG); 1257 } 1258 if (obj == 0) 1259 return (SET_ERROR(ENODEV)); 1260 1261 if (drba->drba_cookie->drc_force) { 1262 drba->drba_snapobj = obj; 1263 } else { 1264 /* 1265 * If we are not forcing, there must be no 1266 * changes since fromsnap. 1267 */ 1268 if (dsl_dataset_modified_since_snap(ds, snap)) { 1269 dsl_dataset_rele(snap, FTAG); 1270 return (SET_ERROR(ETXTBSY)); 1271 } 1272 drba->drba_snapobj = ds->ds_prev->ds_object; 1273 } 1274 1275 dsl_dataset_rele(snap, FTAG); 1276 } else { 1277 /* if full, then must be forced */ 1278 if (!drba->drba_cookie->drc_force) 1279 return (SET_ERROR(EEXIST)); 1280 /* start from $ORIGIN@$ORIGIN, if supported */ 1281 drba->drba_snapobj = dp->dp_origin_snap != NULL ? 1282 dp->dp_origin_snap->ds_object : 0; 1283 } 1284 1285 return (0); 1286 1287} 1288 1289static int 1290dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 1291{ 1292 dmu_recv_begin_arg_t *drba = arg; 1293 dsl_pool_t *dp = dmu_tx_pool(tx); 1294 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1295 uint64_t fromguid = drrb->drr_fromguid; 1296 int flags = drrb->drr_flags; 1297 int error; 1298 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1299 dsl_dataset_t *ds; 1300 const char *tofs = drba->drba_cookie->drc_tofs; 1301 1302 /* already checked */ 1303 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1304 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING)); 1305 1306 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1307 DMU_COMPOUNDSTREAM || 1308 drrb->drr_type >= DMU_OST_NUMTYPES || 1309 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 1310 return (SET_ERROR(EINVAL)); 1311 1312 /* Verify pool version supports SA if SA_SPILL feature set */ 1313 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1314 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1315 return (SET_ERROR(ENOTSUP)); 1316 1317 if (drba->drba_cookie->drc_resumable && 1318 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) 1319 return (SET_ERROR(ENOTSUP)); 1320 1321 /* 1322 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1323 * record to a plan WRITE record, so the pool must have the 1324 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1325 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1326 */ 1327 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1328 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1329 return (SET_ERROR(ENOTSUP)); 1330 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) && 1331 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1332 return (SET_ERROR(ENOTSUP)); 1333 1334 /* 1335 * The receiving code doesn't know how to translate large blocks 1336 * to smaller ones, so the pool must have the LARGE_BLOCKS 1337 * feature enabled if the stream has LARGE_BLOCKS. 1338 */ 1339 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 1340 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) 1341 return (SET_ERROR(ENOTSUP)); 1342 1343 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1344 if (error == 0) { 1345 /* target fs already exists; recv into temp clone */ 1346 1347 /* Can't recv a clone into an existing fs */ 1348 if (flags & DRR_FLAG_CLONE || drba->drba_origin) { 1349 dsl_dataset_rele(ds, FTAG); 1350 return (SET_ERROR(EINVAL)); 1351 } 1352 1353 error = recv_begin_check_existing_impl(drba, ds, fromguid); 1354 dsl_dataset_rele(ds, FTAG); 1355 } else if (error == ENOENT) { 1356 /* target fs does not exist; must be a full backup or clone */ 1357 char buf[ZFS_MAX_DATASET_NAME_LEN]; 1358 1359 /* 1360 * If it's a non-clone incremental, we are missing the 1361 * target fs, so fail the recv. 1362 */ 1363 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE || 1364 drba->drba_origin)) 1365 return (SET_ERROR(ENOENT)); 1366 1367 /* 1368 * If we're receiving a full send as a clone, and it doesn't 1369 * contain all the necessary free records and freeobject 1370 * records, reject it. 1371 */ 1372 if (fromguid == 0 && drba->drba_origin && 1373 !(flags & DRR_FLAG_FREERECORDS)) 1374 return (SET_ERROR(EINVAL)); 1375 1376 /* Open the parent of tofs */ 1377 ASSERT3U(strlen(tofs), <, sizeof (buf)); 1378 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 1379 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 1380 if (error != 0) 1381 return (error); 1382 1383 /* 1384 * Check filesystem and snapshot limits before receiving. We'll 1385 * recheck snapshot limits again at the end (we create the 1386 * filesystems and increment those counts during begin_sync). 1387 */ 1388 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1389 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); 1390 if (error != 0) { 1391 dsl_dataset_rele(ds, FTAG); 1392 return (error); 1393 } 1394 1395 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1396 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); 1397 if (error != 0) { 1398 dsl_dataset_rele(ds, FTAG); 1399 return (error); 1400 } 1401 1402 if (drba->drba_origin != NULL) { 1403 dsl_dataset_t *origin; 1404 error = dsl_dataset_hold(dp, drba->drba_origin, 1405 FTAG, &origin); 1406 if (error != 0) { 1407 dsl_dataset_rele(ds, FTAG); 1408 return (error); 1409 } 1410 if (!origin->ds_is_snapshot) { 1411 dsl_dataset_rele(origin, FTAG); 1412 dsl_dataset_rele(ds, FTAG); 1413 return (SET_ERROR(EINVAL)); 1414 } 1415 if (dsl_dataset_phys(origin)->ds_guid != fromguid && 1416 fromguid != 0) { 1417 dsl_dataset_rele(origin, FTAG); 1418 dsl_dataset_rele(ds, FTAG); 1419 return (SET_ERROR(ENODEV)); 1420 } 1421 dsl_dataset_rele(origin, FTAG); 1422 } 1423 dsl_dataset_rele(ds, FTAG); 1424 error = 0; 1425 } 1426 return (error); 1427} 1428 1429static void 1430dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 1431{ 1432 dmu_recv_begin_arg_t *drba = arg; 1433 dsl_pool_t *dp = dmu_tx_pool(tx); 1434 objset_t *mos = dp->dp_meta_objset; 1435 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1436 const char *tofs = drba->drba_cookie->drc_tofs; 1437 dsl_dataset_t *ds, *newds; 1438 uint64_t dsobj; 1439 int error; 1440 uint64_t crflags = 0; 1441 1442 if (drrb->drr_flags & DRR_FLAG_CI_DATA) 1443 crflags |= DS_FLAG_CI_DATASET; 1444 1445 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1446 if (error == 0) { 1447 /* create temporary clone */ 1448 dsl_dataset_t *snap = NULL; 1449 if (drba->drba_snapobj != 0) { 1450 VERIFY0(dsl_dataset_hold_obj(dp, 1451 drba->drba_snapobj, FTAG, &snap)); 1452 } 1453 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, 1454 snap, crflags, drba->drba_cred, tx); 1455 if (drba->drba_snapobj != 0) 1456 dsl_dataset_rele(snap, FTAG); 1457 dsl_dataset_rele(ds, FTAG); 1458 } else { 1459 dsl_dir_t *dd; 1460 const char *tail; 1461 dsl_dataset_t *origin = NULL; 1462 1463 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 1464 1465 if (drba->drba_origin != NULL) { 1466 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 1467 FTAG, &origin)); 1468 } 1469 1470 /* Create new dataset. */ 1471 dsobj = dsl_dataset_create_sync(dd, 1472 strrchr(tofs, '/') + 1, 1473 origin, crflags, drba->drba_cred, tx); 1474 if (origin != NULL) 1475 dsl_dataset_rele(origin, FTAG); 1476 dsl_dir_rele(dd, FTAG); 1477 drba->drba_cookie->drc_newfs = B_TRUE; 1478 } 1479 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); 1480 1481 if (drba->drba_cookie->drc_resumable) { 1482 dsl_dataset_zapify(newds, tx); 1483 if (drrb->drr_fromguid != 0) { 1484 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, 1485 8, 1, &drrb->drr_fromguid, tx)); 1486 } 1487 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, 1488 8, 1, &drrb->drr_toguid, tx)); 1489 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, 1490 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); 1491 uint64_t one = 1; 1492 uint64_t zero = 0; 1493 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, 1494 8, 1, &one, tx)); 1495 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, 1496 8, 1, &zero, tx)); 1497 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, 1498 8, 1, &zero, tx)); 1499 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1500 DMU_BACKUP_FEATURE_EMBED_DATA) { 1501 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, 1502 8, 1, &one, tx)); 1503 } 1504 } 1505 1506 dmu_buf_will_dirty(newds->ds_dbuf, tx); 1507 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; 1508 1509 /* 1510 * If we actually created a non-clone, we need to create the 1511 * objset in our new dataset. 1512 */ 1513 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG); 1514 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { 1515 (void) dmu_objset_create_impl(dp->dp_spa, 1516 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 1517 } 1518 rrw_exit(&newds->ds_bp_rwlock, FTAG); 1519 1520 drba->drba_cookie->drc_ds = newds; 1521 1522 spa_history_log_internal_ds(newds, "receive", tx, ""); 1523} 1524 1525static int 1526dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) 1527{ 1528 dmu_recv_begin_arg_t *drba = arg; 1529 dsl_pool_t *dp = dmu_tx_pool(tx); 1530 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1531 int error; 1532 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1533 dsl_dataset_t *ds; 1534 const char *tofs = drba->drba_cookie->drc_tofs; 1535 1536 /* already checked */ 1537 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1538 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING); 1539 1540 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1541 DMU_COMPOUNDSTREAM || 1542 drrb->drr_type >= DMU_OST_NUMTYPES) 1543 return (SET_ERROR(EINVAL)); 1544 1545 /* Verify pool version supports SA if SA_SPILL feature set */ 1546 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1547 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1548 return (SET_ERROR(ENOTSUP)); 1549 1550 /* 1551 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1552 * record to a plain WRITE record, so the pool must have the 1553 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1554 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1555 */ 1556 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1557 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1558 return (SET_ERROR(ENOTSUP)); 1559 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) && 1560 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1561 return (SET_ERROR(ENOTSUP)); 1562 1563 /* 6 extra bytes for /%recv */ 1564 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1565 1566 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1567 tofs, recv_clone_name); 1568 1569 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1570 /* %recv does not exist; continue in tofs */ 1571 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1572 if (error != 0) 1573 return (error); 1574 } 1575 1576 /* check that ds is marked inconsistent */ 1577 if (!DS_IS_INCONSISTENT(ds)) { 1578 dsl_dataset_rele(ds, FTAG); 1579 return (SET_ERROR(EINVAL)); 1580 } 1581 1582 /* check that there is resuming data, and that the toguid matches */ 1583 if (!dsl_dataset_is_zapified(ds)) { 1584 dsl_dataset_rele(ds, FTAG); 1585 return (SET_ERROR(EINVAL)); 1586 } 1587 uint64_t val; 1588 error = zap_lookup(dp->dp_meta_objset, ds->ds_object, 1589 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); 1590 if (error != 0 || drrb->drr_toguid != val) { 1591 dsl_dataset_rele(ds, FTAG); 1592 return (SET_ERROR(EINVAL)); 1593 } 1594 1595 /* 1596 * Check if the receive is still running. If so, it will be owned. 1597 * Note that nothing else can own the dataset (e.g. after the receive 1598 * fails) because it will be marked inconsistent. 1599 */ 1600 if (dsl_dataset_has_owner(ds)) { 1601 dsl_dataset_rele(ds, FTAG); 1602 return (SET_ERROR(EBUSY)); 1603 } 1604 1605 /* There should not be any snapshots of this fs yet. */ 1606 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { 1607 dsl_dataset_rele(ds, FTAG); 1608 return (SET_ERROR(EINVAL)); 1609 } 1610 1611 /* 1612 * Note: resume point will be checked when we process the first WRITE 1613 * record. 1614 */ 1615 1616 /* check that the origin matches */ 1617 val = 0; 1618 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, 1619 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val); 1620 if (drrb->drr_fromguid != val) { 1621 dsl_dataset_rele(ds, FTAG); 1622 return (SET_ERROR(EINVAL)); 1623 } 1624 1625 dsl_dataset_rele(ds, FTAG); 1626 return (0); 1627} 1628 1629static void 1630dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) 1631{ 1632 dmu_recv_begin_arg_t *drba = arg; 1633 dsl_pool_t *dp = dmu_tx_pool(tx); 1634 const char *tofs = drba->drba_cookie->drc_tofs; 1635 dsl_dataset_t *ds; 1636 uint64_t dsobj; 1637 /* 6 extra bytes for /%recv */ 1638 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1639 1640 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1641 tofs, recv_clone_name); 1642 1643 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1644 /* %recv does not exist; continue in tofs */ 1645 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds)); 1646 drba->drba_cookie->drc_newfs = B_TRUE; 1647 } 1648 1649 /* clear the inconsistent flag so that we can own it */ 1650 ASSERT(DS_IS_INCONSISTENT(ds)); 1651 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1652 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 1653 dsobj = ds->ds_object; 1654 dsl_dataset_rele(ds, FTAG); 1655 1656 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds)); 1657 1658 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1659 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT; 1660 1661 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 1662 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))); 1663 rrw_exit(&ds->ds_bp_rwlock, FTAG); 1664 1665 drba->drba_cookie->drc_ds = ds; 1666 1667 spa_history_log_internal_ds(ds, "resume receive", tx, ""); 1668} 1669 1670/* 1671 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 1672 * succeeds; otherwise we will leak the holds on the datasets. 1673 */ 1674int 1675dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin, 1676 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc) 1677{ 1678 dmu_recv_begin_arg_t drba = { 0 }; 1679 1680 bzero(drc, sizeof (dmu_recv_cookie_t)); 1681 drc->drc_drr_begin = drr_begin; 1682 drc->drc_drrb = &drr_begin->drr_u.drr_begin; 1683 drc->drc_tosnap = tosnap; 1684 drc->drc_tofs = tofs; 1685 drc->drc_force = force; 1686 drc->drc_resumable = resumable; 1687 drc->drc_cred = CRED(); 1688 1689 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { 1690 drc->drc_byteswap = B_TRUE; 1691 fletcher_4_incremental_byteswap(drr_begin, 1692 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1693 byteswap_record(drr_begin); 1694 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { 1695 fletcher_4_incremental_native(drr_begin, 1696 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1697 } else { 1698 return (SET_ERROR(EINVAL)); 1699 } 1700 1701 drba.drba_origin = origin; 1702 drba.drba_cookie = drc; 1703 drba.drba_cred = CRED(); 1704 1705 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & 1706 DMU_BACKUP_FEATURE_RESUMING) { 1707 return (dsl_sync_task(tofs, 1708 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync, 1709 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1710 } else { 1711 return (dsl_sync_task(tofs, 1712 dmu_recv_begin_check, dmu_recv_begin_sync, 1713 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1714 } 1715} 1716 1717struct receive_record_arg { 1718 dmu_replay_record_t header; 1719 void *payload; /* Pointer to a buffer containing the payload */ 1720 /* 1721 * If the record is a write, pointer to the arc_buf_t containing the 1722 * payload. 1723 */ 1724 arc_buf_t *write_buf; 1725 int payload_size; 1726 uint64_t bytes_read; /* bytes read from stream when record created */ 1727 boolean_t eos_marker; /* Marks the end of the stream */ 1728 bqueue_node_t node; 1729}; 1730 1731struct receive_writer_arg { 1732 objset_t *os; 1733 boolean_t byteswap; 1734 bqueue_t q; 1735 1736 /* 1737 * These three args are used to signal to the main thread that we're 1738 * done. 1739 */ 1740 kmutex_t mutex; 1741 kcondvar_t cv; 1742 boolean_t done; 1743 1744 int err; 1745 /* A map from guid to dataset to help handle dedup'd streams. */ 1746 avl_tree_t *guid_to_ds_map; 1747 boolean_t resumable; 1748 uint64_t last_object, last_offset; 1749 uint64_t bytes_read; /* bytes read when current record created */ 1750}; 1751 1752struct objlist { 1753 list_t list; /* List of struct receive_objnode. */ 1754 /* 1755 * Last object looked up. Used to assert that objects are being looked 1756 * up in ascending order. 1757 */ 1758 uint64_t last_lookup; 1759}; 1760 1761struct receive_objnode { 1762 list_node_t node; 1763 uint64_t object; 1764}; 1765 1766struct receive_arg { 1767 objset_t *os; 1768 kthread_t *td; 1769 struct file *fp; 1770 uint64_t voff; /* The current offset in the stream */ 1771 uint64_t bytes_read; 1772 /* 1773 * A record that has had its payload read in, but hasn't yet been handed 1774 * off to the worker thread. 1775 */ 1776 struct receive_record_arg *rrd; 1777 /* A record that has had its header read in, but not its payload. */ 1778 struct receive_record_arg *next_rrd; 1779 zio_cksum_t cksum; 1780 zio_cksum_t prev_cksum; 1781 int err; 1782 boolean_t byteswap; 1783 /* Sorted list of objects not to issue prefetches for. */ 1784 struct objlist ignore_objlist; 1785}; 1786 1787typedef struct guid_map_entry { 1788 uint64_t guid; 1789 dsl_dataset_t *gme_ds; 1790 avl_node_t avlnode; 1791} guid_map_entry_t; 1792 1793static int 1794guid_compare(const void *arg1, const void *arg2) 1795{ 1796 const guid_map_entry_t *gmep1 = arg1; 1797 const guid_map_entry_t *gmep2 = arg2; 1798 1799 if (gmep1->guid < gmep2->guid) 1800 return (-1); 1801 else if (gmep1->guid > gmep2->guid) 1802 return (1); 1803 return (0); 1804} 1805 1806static void 1807free_guid_map_onexit(void *arg) 1808{ 1809 avl_tree_t *ca = arg; 1810 void *cookie = NULL; 1811 guid_map_entry_t *gmep; 1812 1813 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { 1814 dsl_dataset_long_rele(gmep->gme_ds, gmep); 1815 dsl_dataset_rele(gmep->gme_ds, gmep); 1816 kmem_free(gmep, sizeof (guid_map_entry_t)); 1817 } 1818 avl_destroy(ca); 1819 kmem_free(ca, sizeof (avl_tree_t)); 1820} 1821 1822static int 1823restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid) 1824{ 1825 struct uio auio; 1826 struct iovec aiov; 1827 int error; 1828 1829 aiov.iov_base = buf; 1830 aiov.iov_len = len; 1831 auio.uio_iov = &aiov; 1832 auio.uio_iovcnt = 1; 1833 auio.uio_resid = len; 1834 auio.uio_segflg = UIO_SYSSPACE; 1835 auio.uio_rw = UIO_READ; 1836 auio.uio_offset = off; 1837 auio.uio_td = ra->td; 1838#ifdef _KERNEL 1839 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td); 1840#else 1841 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 1842 error = EOPNOTSUPP; 1843#endif 1844 *resid = auio.uio_resid; 1845 return (error); 1846} 1847 1848static int 1849receive_read(struct receive_arg *ra, int len, void *buf) 1850{ 1851 int done = 0; 1852 1853 /* 1854 * The code doesn't rely on this (lengths being multiples of 8). See 1855 * comment in dump_bytes. 1856 */ 1857 ASSERT0(len % 8); 1858 1859 while (done < len) { 1860 ssize_t resid; 1861 1862 ra->err = restore_bytes(ra, buf + done, 1863 len - done, ra->voff, &resid); 1864 1865 if (resid == len - done) { 1866 /* 1867 * Note: ECKSUM indicates that the receive 1868 * was interrupted and can potentially be resumed. 1869 */ 1870 ra->err = SET_ERROR(ECKSUM); 1871 } 1872 ra->voff += len - done - resid; 1873 done = len - resid; 1874 if (ra->err != 0) 1875 return (ra->err); 1876 } 1877 1878 ra->bytes_read += len; 1879 1880 ASSERT3U(done, ==, len); 1881 return (0); 1882} 1883 1884static void 1885byteswap_record(dmu_replay_record_t *drr) 1886{ 1887#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 1888#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 1889 drr->drr_type = BSWAP_32(drr->drr_type); 1890 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 1891 1892 switch (drr->drr_type) { 1893 case DRR_BEGIN: 1894 DO64(drr_begin.drr_magic); 1895 DO64(drr_begin.drr_versioninfo); 1896 DO64(drr_begin.drr_creation_time); 1897 DO32(drr_begin.drr_type); 1898 DO32(drr_begin.drr_flags); 1899 DO64(drr_begin.drr_toguid); 1900 DO64(drr_begin.drr_fromguid); 1901 break; 1902 case DRR_OBJECT: 1903 DO64(drr_object.drr_object); 1904 DO32(drr_object.drr_type); 1905 DO32(drr_object.drr_bonustype); 1906 DO32(drr_object.drr_blksz); 1907 DO32(drr_object.drr_bonuslen); 1908 DO64(drr_object.drr_toguid); 1909 break; 1910 case DRR_FREEOBJECTS: 1911 DO64(drr_freeobjects.drr_firstobj); 1912 DO64(drr_freeobjects.drr_numobjs); 1913 DO64(drr_freeobjects.drr_toguid); 1914 break; 1915 case DRR_WRITE: 1916 DO64(drr_write.drr_object); 1917 DO32(drr_write.drr_type); 1918 DO64(drr_write.drr_offset); 1919 DO64(drr_write.drr_length); 1920 DO64(drr_write.drr_toguid); 1921 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); 1922 DO64(drr_write.drr_key.ddk_prop); 1923 break; 1924 case DRR_WRITE_BYREF: 1925 DO64(drr_write_byref.drr_object); 1926 DO64(drr_write_byref.drr_offset); 1927 DO64(drr_write_byref.drr_length); 1928 DO64(drr_write_byref.drr_toguid); 1929 DO64(drr_write_byref.drr_refguid); 1930 DO64(drr_write_byref.drr_refobject); 1931 DO64(drr_write_byref.drr_refoffset); 1932 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref. 1933 drr_key.ddk_cksum); 1934 DO64(drr_write_byref.drr_key.ddk_prop); 1935 break; 1936 case DRR_WRITE_EMBEDDED: 1937 DO64(drr_write_embedded.drr_object); 1938 DO64(drr_write_embedded.drr_offset); 1939 DO64(drr_write_embedded.drr_length); 1940 DO64(drr_write_embedded.drr_toguid); 1941 DO32(drr_write_embedded.drr_lsize); 1942 DO32(drr_write_embedded.drr_psize); 1943 break; 1944 case DRR_FREE: 1945 DO64(drr_free.drr_object); 1946 DO64(drr_free.drr_offset); 1947 DO64(drr_free.drr_length); 1948 DO64(drr_free.drr_toguid); 1949 break; 1950 case DRR_SPILL: 1951 DO64(drr_spill.drr_object); 1952 DO64(drr_spill.drr_length); 1953 DO64(drr_spill.drr_toguid); 1954 break; 1955 case DRR_END: 1956 DO64(drr_end.drr_toguid); 1957 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); 1958 break; 1959 } 1960 1961 if (drr->drr_type != DRR_BEGIN) { 1962 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); 1963 } 1964 1965#undef DO64 1966#undef DO32 1967} 1968 1969static inline uint8_t 1970deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size) 1971{ 1972 if (bonus_type == DMU_OT_SA) { 1973 return (1); 1974 } else { 1975 return (1 + 1976 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT)); 1977 } 1978} 1979 1980static void 1981save_resume_state(struct receive_writer_arg *rwa, 1982 uint64_t object, uint64_t offset, dmu_tx_t *tx) 1983{ 1984 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 1985 1986 if (!rwa->resumable) 1987 return; 1988 1989 /* 1990 * We use ds_resume_bytes[] != 0 to indicate that we need to 1991 * update this on disk, so it must not be 0. 1992 */ 1993 ASSERT(rwa->bytes_read != 0); 1994 1995 /* 1996 * We only resume from write records, which have a valid 1997 * (non-meta-dnode) object number. 1998 */ 1999 ASSERT(object != 0); 2000 2001 /* 2002 * For resuming to work correctly, we must receive records in order, 2003 * sorted by object,offset. This is checked by the callers, but 2004 * assert it here for good measure. 2005 */ 2006 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); 2007 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || 2008 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); 2009 ASSERT3U(rwa->bytes_read, >=, 2010 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); 2011 2012 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; 2013 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; 2014 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; 2015} 2016 2017static int 2018receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, 2019 void *data) 2020{ 2021 dmu_object_info_t doi; 2022 dmu_tx_t *tx; 2023 uint64_t object; 2024 int err; 2025 2026 if (drro->drr_type == DMU_OT_NONE || 2027 !DMU_OT_IS_VALID(drro->drr_type) || 2028 !DMU_OT_IS_VALID(drro->drr_bonustype) || 2029 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 2030 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 2031 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 2032 drro->drr_blksz < SPA_MINBLOCKSIZE || 2033 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || 2034 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 2035 return (SET_ERROR(EINVAL)); 2036 } 2037 2038 err = dmu_object_info(rwa->os, drro->drr_object, &doi); 2039 2040 if (err != 0 && err != ENOENT) 2041 return (SET_ERROR(EINVAL)); 2042 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT; 2043 2044 /* 2045 * If we are losing blkptrs or changing the block size this must 2046 * be a new file instance. We must clear out the previous file 2047 * contents before we can change this type of metadata in the dnode. 2048 */ 2049 if (err == 0) { 2050 int nblkptr; 2051 2052 nblkptr = deduce_nblkptr(drro->drr_bonustype, 2053 drro->drr_bonuslen); 2054 2055 if (drro->drr_blksz != doi.doi_data_block_size || 2056 nblkptr < doi.doi_nblkptr) { 2057 err = dmu_free_long_range(rwa->os, drro->drr_object, 2058 0, DMU_OBJECT_END); 2059 if (err != 0) 2060 return (SET_ERROR(EINVAL)); 2061 } 2062 } 2063 2064 tx = dmu_tx_create(rwa->os); 2065 dmu_tx_hold_bonus(tx, object); 2066 err = dmu_tx_assign(tx, TXG_WAIT); 2067 if (err != 0) { 2068 dmu_tx_abort(tx); 2069 return (err); 2070 } 2071 2072 if (object == DMU_NEW_OBJECT) { 2073 /* currently free, want to be allocated */ 2074 err = dmu_object_claim(rwa->os, drro->drr_object, 2075 drro->drr_type, drro->drr_blksz, 2076 drro->drr_bonustype, drro->drr_bonuslen, tx); 2077 } else if (drro->drr_type != doi.doi_type || 2078 drro->drr_blksz != doi.doi_data_block_size || 2079 drro->drr_bonustype != doi.doi_bonus_type || 2080 drro->drr_bonuslen != doi.doi_bonus_size) { 2081 /* currently allocated, but with different properties */ 2082 err = dmu_object_reclaim(rwa->os, drro->drr_object, 2083 drro->drr_type, drro->drr_blksz, 2084 drro->drr_bonustype, drro->drr_bonuslen, tx); 2085 } 2086 if (err != 0) { 2087 dmu_tx_commit(tx); 2088 return (SET_ERROR(EINVAL)); 2089 } 2090 2091 dmu_object_set_checksum(rwa->os, drro->drr_object, 2092 drro->drr_checksumtype, tx); 2093 dmu_object_set_compress(rwa->os, drro->drr_object, 2094 drro->drr_compress, tx); 2095 2096 if (data != NULL) { 2097 dmu_buf_t *db; 2098 2099 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db)); 2100 dmu_buf_will_dirty(db, tx); 2101 2102 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 2103 bcopy(data, db->db_data, drro->drr_bonuslen); 2104 if (rwa->byteswap) { 2105 dmu_object_byteswap_t byteswap = 2106 DMU_OT_BYTESWAP(drro->drr_bonustype); 2107 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 2108 drro->drr_bonuslen); 2109 } 2110 dmu_buf_rele(db, FTAG); 2111 } 2112 dmu_tx_commit(tx); 2113 2114 return (0); 2115} 2116 2117/* ARGSUSED */ 2118static int 2119receive_freeobjects(struct receive_writer_arg *rwa, 2120 struct drr_freeobjects *drrfo) 2121{ 2122 uint64_t obj; 2123 int next_err = 0; 2124 2125 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 2126 return (SET_ERROR(EINVAL)); 2127 2128 for (obj = drrfo->drr_firstobj; 2129 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0; 2130 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { 2131 int err; 2132 2133 if (dmu_object_info(rwa->os, obj, NULL) != 0) 2134 continue; 2135 2136 err = dmu_free_long_object(rwa->os, obj); 2137 if (err != 0) 2138 return (err); 2139 } 2140 if (next_err != ESRCH) 2141 return (next_err); 2142 return (0); 2143} 2144 2145static int 2146receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, 2147 arc_buf_t *abuf) 2148{ 2149 dmu_tx_t *tx; 2150 int err; 2151 2152 if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset || 2153 !DMU_OT_IS_VALID(drrw->drr_type)) 2154 return (SET_ERROR(EINVAL)); 2155 2156 /* 2157 * For resuming to work, records must be in increasing order 2158 * by (object, offset). 2159 */ 2160 if (drrw->drr_object < rwa->last_object || 2161 (drrw->drr_object == rwa->last_object && 2162 drrw->drr_offset < rwa->last_offset)) { 2163 return (SET_ERROR(EINVAL)); 2164 } 2165 rwa->last_object = drrw->drr_object; 2166 rwa->last_offset = drrw->drr_offset; 2167 2168 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0) 2169 return (SET_ERROR(EINVAL)); 2170 2171 tx = dmu_tx_create(rwa->os); 2172 2173 dmu_tx_hold_write(tx, drrw->drr_object, 2174 drrw->drr_offset, drrw->drr_length); 2175 err = dmu_tx_assign(tx, TXG_WAIT); 2176 if (err != 0) { 2177 dmu_tx_abort(tx); 2178 return (err); 2179 } 2180 if (rwa->byteswap) { 2181 dmu_object_byteswap_t byteswap = 2182 DMU_OT_BYTESWAP(drrw->drr_type); 2183 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data, 2184 drrw->drr_length); 2185 } 2186 2187 dmu_buf_t *bonus; 2188 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0) 2189 return (SET_ERROR(EINVAL)); 2190 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx); 2191 2192 /* 2193 * Note: If the receive fails, we want the resume stream to start 2194 * with the same record that we last successfully received (as opposed 2195 * to the next record), so that we can verify that we are 2196 * resuming from the correct location. 2197 */ 2198 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); 2199 dmu_tx_commit(tx); 2200 dmu_buf_rele(bonus, FTAG); 2201 2202 return (0); 2203} 2204 2205/* 2206 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed 2207 * streams to refer to a copy of the data that is already on the 2208 * system because it came in earlier in the stream. This function 2209 * finds the earlier copy of the data, and uses that copy instead of 2210 * data from the stream to fulfill this write. 2211 */ 2212static int 2213receive_write_byref(struct receive_writer_arg *rwa, 2214 struct drr_write_byref *drrwbr) 2215{ 2216 dmu_tx_t *tx; 2217 int err; 2218 guid_map_entry_t gmesrch; 2219 guid_map_entry_t *gmep; 2220 avl_index_t where; 2221 objset_t *ref_os = NULL; 2222 dmu_buf_t *dbp; 2223 2224 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) 2225 return (SET_ERROR(EINVAL)); 2226 2227 /* 2228 * If the GUID of the referenced dataset is different from the 2229 * GUID of the target dataset, find the referenced dataset. 2230 */ 2231 if (drrwbr->drr_toguid != drrwbr->drr_refguid) { 2232 gmesrch.guid = drrwbr->drr_refguid; 2233 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch, 2234 &where)) == NULL) { 2235 return (SET_ERROR(EINVAL)); 2236 } 2237 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) 2238 return (SET_ERROR(EINVAL)); 2239 } else { 2240 ref_os = rwa->os; 2241 } 2242 2243 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, 2244 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH); 2245 if (err != 0) 2246 return (err); 2247 2248 tx = dmu_tx_create(rwa->os); 2249 2250 dmu_tx_hold_write(tx, drrwbr->drr_object, 2251 drrwbr->drr_offset, drrwbr->drr_length); 2252 err = dmu_tx_assign(tx, TXG_WAIT); 2253 if (err != 0) { 2254 dmu_tx_abort(tx); 2255 return (err); 2256 } 2257 dmu_write(rwa->os, drrwbr->drr_object, 2258 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); 2259 dmu_buf_rele(dbp, FTAG); 2260 2261 /* See comment in restore_write. */ 2262 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx); 2263 dmu_tx_commit(tx); 2264 return (0); 2265} 2266 2267static int 2268receive_write_embedded(struct receive_writer_arg *rwa, 2269 struct drr_write_embedded *drrwe, void *data) 2270{ 2271 dmu_tx_t *tx; 2272 int err; 2273 2274 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) 2275 return (EINVAL); 2276 2277 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) 2278 return (EINVAL); 2279 2280 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) 2281 return (EINVAL); 2282 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) 2283 return (EINVAL); 2284 2285 tx = dmu_tx_create(rwa->os); 2286 2287 dmu_tx_hold_write(tx, drrwe->drr_object, 2288 drrwe->drr_offset, drrwe->drr_length); 2289 err = dmu_tx_assign(tx, TXG_WAIT); 2290 if (err != 0) { 2291 dmu_tx_abort(tx); 2292 return (err); 2293 } 2294 2295 dmu_write_embedded(rwa->os, drrwe->drr_object, 2296 drrwe->drr_offset, data, drrwe->drr_etype, 2297 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, 2298 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); 2299 2300 /* See comment in restore_write. */ 2301 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); 2302 dmu_tx_commit(tx); 2303 return (0); 2304} 2305 2306static int 2307receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs, 2308 void *data) 2309{ 2310 dmu_tx_t *tx; 2311 dmu_buf_t *db, *db_spill; 2312 int err; 2313 2314 if (drrs->drr_length < SPA_MINBLOCKSIZE || 2315 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) 2316 return (SET_ERROR(EINVAL)); 2317 2318 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) 2319 return (SET_ERROR(EINVAL)); 2320 2321 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); 2322 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { 2323 dmu_buf_rele(db, FTAG); 2324 return (err); 2325 } 2326 2327 tx = dmu_tx_create(rwa->os); 2328 2329 dmu_tx_hold_spill(tx, db->db_object); 2330 2331 err = dmu_tx_assign(tx, TXG_WAIT); 2332 if (err != 0) { 2333 dmu_buf_rele(db, FTAG); 2334 dmu_buf_rele(db_spill, FTAG); 2335 dmu_tx_abort(tx); 2336 return (err); 2337 } 2338 dmu_buf_will_dirty(db_spill, tx); 2339 2340 if (db_spill->db_size < drrs->drr_length) 2341 VERIFY(0 == dbuf_spill_set_blksz(db_spill, 2342 drrs->drr_length, tx)); 2343 bcopy(data, db_spill->db_data, drrs->drr_length); 2344 2345 dmu_buf_rele(db, FTAG); 2346 dmu_buf_rele(db_spill, FTAG); 2347 2348 dmu_tx_commit(tx); 2349 return (0); 2350} 2351 2352/* ARGSUSED */ 2353static int 2354receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf) 2355{ 2356 int err; 2357 2358 if (drrf->drr_length != -1ULL && 2359 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 2360 return (SET_ERROR(EINVAL)); 2361 2362 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) 2363 return (SET_ERROR(EINVAL)); 2364 2365 err = dmu_free_long_range(rwa->os, drrf->drr_object, 2366 drrf->drr_offset, drrf->drr_length); 2367 2368 return (err); 2369} 2370 2371/* used to destroy the drc_ds on error */ 2372static void 2373dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 2374{ 2375 if (drc->drc_resumable) { 2376 /* wait for our resume state to be written to disk */ 2377 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0); 2378 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2379 } else { 2380 char name[ZFS_MAX_DATASET_NAME_LEN]; 2381 dsl_dataset_name(drc->drc_ds, name); 2382 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2383 (void) dsl_destroy_head(name); 2384 } 2385} 2386 2387static void 2388receive_cksum(struct receive_arg *ra, int len, void *buf) 2389{ 2390 if (ra->byteswap) { 2391 fletcher_4_incremental_byteswap(buf, len, &ra->cksum); 2392 } else { 2393 fletcher_4_incremental_native(buf, len, &ra->cksum); 2394 } 2395} 2396 2397/* 2398 * Read the payload into a buffer of size len, and update the current record's 2399 * payload field. 2400 * Allocate ra->next_rrd and read the next record's header into 2401 * ra->next_rrd->header. 2402 * Verify checksum of payload and next record. 2403 */ 2404static int 2405receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf) 2406{ 2407 int err; 2408 2409 if (len != 0) { 2410 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE); 2411 err = receive_read(ra, len, buf); 2412 if (err != 0) 2413 return (err); 2414 receive_cksum(ra, len, buf); 2415 2416 /* note: rrd is NULL when reading the begin record's payload */ 2417 if (ra->rrd != NULL) { 2418 ra->rrd->payload = buf; 2419 ra->rrd->payload_size = len; 2420 ra->rrd->bytes_read = ra->bytes_read; 2421 } 2422 } 2423 2424 ra->prev_cksum = ra->cksum; 2425 2426 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); 2427 err = receive_read(ra, sizeof (ra->next_rrd->header), 2428 &ra->next_rrd->header); 2429 ra->next_rrd->bytes_read = ra->bytes_read; 2430 if (err != 0) { 2431 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2432 ra->next_rrd = NULL; 2433 return (err); 2434 } 2435 if (ra->next_rrd->header.drr_type == DRR_BEGIN) { 2436 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2437 ra->next_rrd = NULL; 2438 return (SET_ERROR(EINVAL)); 2439 } 2440 2441 /* 2442 * Note: checksum is of everything up to but not including the 2443 * checksum itself. 2444 */ 2445 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2446 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 2447 receive_cksum(ra, 2448 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2449 &ra->next_rrd->header); 2450 2451 zio_cksum_t cksum_orig = 2452 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2453 zio_cksum_t *cksump = 2454 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2455 2456 if (ra->byteswap) 2457 byteswap_record(&ra->next_rrd->header); 2458 2459 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) && 2460 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) { 2461 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2462 ra->next_rrd = NULL; 2463 return (SET_ERROR(ECKSUM)); 2464 } 2465 2466 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig); 2467 2468 return (0); 2469} 2470 2471static void 2472objlist_create(struct objlist *list) 2473{ 2474 list_create(&list->list, sizeof (struct receive_objnode), 2475 offsetof(struct receive_objnode, node)); 2476 list->last_lookup = 0; 2477} 2478 2479static void 2480objlist_destroy(struct objlist *list) 2481{ 2482 for (struct receive_objnode *n = list_remove_head(&list->list); 2483 n != NULL; n = list_remove_head(&list->list)) { 2484 kmem_free(n, sizeof (*n)); 2485 } 2486 list_destroy(&list->list); 2487} 2488 2489/* 2490 * This function looks through the objlist to see if the specified object number 2491 * is contained in the objlist. In the process, it will remove all object 2492 * numbers in the list that are smaller than the specified object number. Thus, 2493 * any lookup of an object number smaller than a previously looked up object 2494 * number will always return false; therefore, all lookups should be done in 2495 * ascending order. 2496 */ 2497static boolean_t 2498objlist_exists(struct objlist *list, uint64_t object) 2499{ 2500 struct receive_objnode *node = list_head(&list->list); 2501 ASSERT3U(object, >=, list->last_lookup); 2502 list->last_lookup = object; 2503 while (node != NULL && node->object < object) { 2504 VERIFY3P(node, ==, list_remove_head(&list->list)); 2505 kmem_free(node, sizeof (*node)); 2506 node = list_head(&list->list); 2507 } 2508 return (node != NULL && node->object == object); 2509} 2510 2511/* 2512 * The objlist is a list of object numbers stored in ascending order. However, 2513 * the insertion of new object numbers does not seek out the correct location to 2514 * store a new object number; instead, it appends it to the list for simplicity. 2515 * Thus, any users must take care to only insert new object numbers in ascending 2516 * order. 2517 */ 2518static void 2519objlist_insert(struct objlist *list, uint64_t object) 2520{ 2521 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP); 2522 node->object = object; 2523#ifdef ZFS_DEBUG 2524 struct receive_objnode *last_object = list_tail(&list->list); 2525 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0); 2526 ASSERT3U(node->object, >, last_objnum); 2527#endif 2528 list_insert_tail(&list->list, node); 2529} 2530 2531/* 2532 * Issue the prefetch reads for any necessary indirect blocks. 2533 * 2534 * We use the object ignore list to tell us whether or not to issue prefetches 2535 * for a given object. We do this for both correctness (in case the blocksize 2536 * of an object has changed) and performance (if the object doesn't exist, don't 2537 * needlessly try to issue prefetches). We also trim the list as we go through 2538 * the stream to prevent it from growing to an unbounded size. 2539 * 2540 * The object numbers within will always be in sorted order, and any write 2541 * records we see will also be in sorted order, but they're not sorted with 2542 * respect to each other (i.e. we can get several object records before 2543 * receiving each object's write records). As a result, once we've reached a 2544 * given object number, we can safely remove any reference to lower object 2545 * numbers in the ignore list. In practice, we receive up to 32 object records 2546 * before receiving write records, so the list can have up to 32 nodes in it. 2547 */ 2548/* ARGSUSED */ 2549static void 2550receive_read_prefetch(struct receive_arg *ra, 2551 uint64_t object, uint64_t offset, uint64_t length) 2552{ 2553 if (!objlist_exists(&ra->ignore_objlist, object)) { 2554 dmu_prefetch(ra->os, object, 1, offset, length, 2555 ZIO_PRIORITY_SYNC_READ); 2556 } 2557} 2558 2559/* 2560 * Read records off the stream, issuing any necessary prefetches. 2561 */ 2562static int 2563receive_read_record(struct receive_arg *ra) 2564{ 2565 int err; 2566 2567 switch (ra->rrd->header.drr_type) { 2568 case DRR_OBJECT: 2569 { 2570 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object; 2571 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8); 2572 void *buf = kmem_zalloc(size, KM_SLEEP); 2573 dmu_object_info_t doi; 2574 err = receive_read_payload_and_next_header(ra, size, buf); 2575 if (err != 0) { 2576 kmem_free(buf, size); 2577 return (err); 2578 } 2579 err = dmu_object_info(ra->os, drro->drr_object, &doi); 2580 /* 2581 * See receive_read_prefetch for an explanation why we're 2582 * storing this object in the ignore_obj_list. 2583 */ 2584 if (err == ENOENT || 2585 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { 2586 objlist_insert(&ra->ignore_objlist, drro->drr_object); 2587 err = 0; 2588 } 2589 return (err); 2590 } 2591 case DRR_FREEOBJECTS: 2592 { 2593 err = receive_read_payload_and_next_header(ra, 0, NULL); 2594 return (err); 2595 } 2596 case DRR_WRITE: 2597 { 2598 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write; 2599 arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os), 2600 drrw->drr_length); 2601 2602 err = receive_read_payload_and_next_header(ra, 2603 drrw->drr_length, abuf->b_data); 2604 if (err != 0) { 2605 dmu_return_arcbuf(abuf); 2606 return (err); 2607 } 2608 ra->rrd->write_buf = abuf; 2609 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset, 2610 drrw->drr_length); 2611 return (err); 2612 } 2613 case DRR_WRITE_BYREF: 2614 { 2615 struct drr_write_byref *drrwb = 2616 &ra->rrd->header.drr_u.drr_write_byref; 2617 err = receive_read_payload_and_next_header(ra, 0, NULL); 2618 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset, 2619 drrwb->drr_length); 2620 return (err); 2621 } 2622 case DRR_WRITE_EMBEDDED: 2623 { 2624 struct drr_write_embedded *drrwe = 2625 &ra->rrd->header.drr_u.drr_write_embedded; 2626 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); 2627 void *buf = kmem_zalloc(size, KM_SLEEP); 2628 2629 err = receive_read_payload_and_next_header(ra, size, buf); 2630 if (err != 0) { 2631 kmem_free(buf, size); 2632 return (err); 2633 } 2634 2635 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset, 2636 drrwe->drr_length); 2637 return (err); 2638 } 2639 case DRR_FREE: 2640 { 2641 /* 2642 * It might be beneficial to prefetch indirect blocks here, but 2643 * we don't really have the data to decide for sure. 2644 */ 2645 err = receive_read_payload_and_next_header(ra, 0, NULL); 2646 return (err); 2647 } 2648 case DRR_END: 2649 { 2650 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end; 2651 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum)) 2652 return (SET_ERROR(ECKSUM)); 2653 return (0); 2654 } 2655 case DRR_SPILL: 2656 { 2657 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill; 2658 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP); 2659 err = receive_read_payload_and_next_header(ra, drrs->drr_length, 2660 buf); 2661 if (err != 0) 2662 kmem_free(buf, drrs->drr_length); 2663 return (err); 2664 } 2665 default: 2666 return (SET_ERROR(EINVAL)); 2667 } 2668} 2669 2670/* 2671 * Commit the records to the pool. 2672 */ 2673static int 2674receive_process_record(struct receive_writer_arg *rwa, 2675 struct receive_record_arg *rrd) 2676{ 2677 int err; 2678 2679 /* Processing in order, therefore bytes_read should be increasing. */ 2680 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); 2681 rwa->bytes_read = rrd->bytes_read; 2682 2683 switch (rrd->header.drr_type) { 2684 case DRR_OBJECT: 2685 { 2686 struct drr_object *drro = &rrd->header.drr_u.drr_object; 2687 err = receive_object(rwa, drro, rrd->payload); 2688 kmem_free(rrd->payload, rrd->payload_size); 2689 rrd->payload = NULL; 2690 return (err); 2691 } 2692 case DRR_FREEOBJECTS: 2693 { 2694 struct drr_freeobjects *drrfo = 2695 &rrd->header.drr_u.drr_freeobjects; 2696 return (receive_freeobjects(rwa, drrfo)); 2697 } 2698 case DRR_WRITE: 2699 { 2700 struct drr_write *drrw = &rrd->header.drr_u.drr_write; 2701 err = receive_write(rwa, drrw, rrd->write_buf); 2702 /* if receive_write() is successful, it consumes the arc_buf */ 2703 if (err != 0) 2704 dmu_return_arcbuf(rrd->write_buf); 2705 rrd->write_buf = NULL; 2706 rrd->payload = NULL; 2707 return (err); 2708 } 2709 case DRR_WRITE_BYREF: 2710 { 2711 struct drr_write_byref *drrwbr = 2712 &rrd->header.drr_u.drr_write_byref; 2713 return (receive_write_byref(rwa, drrwbr)); 2714 } 2715 case DRR_WRITE_EMBEDDED: 2716 { 2717 struct drr_write_embedded *drrwe = 2718 &rrd->header.drr_u.drr_write_embedded; 2719 err = receive_write_embedded(rwa, drrwe, rrd->payload); 2720 kmem_free(rrd->payload, rrd->payload_size); 2721 rrd->payload = NULL; 2722 return (err); 2723 } 2724 case DRR_FREE: 2725 { 2726 struct drr_free *drrf = &rrd->header.drr_u.drr_free; 2727 return (receive_free(rwa, drrf)); 2728 } 2729 case DRR_SPILL: 2730 { 2731 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; 2732 err = receive_spill(rwa, drrs, rrd->payload); 2733 kmem_free(rrd->payload, rrd->payload_size); 2734 rrd->payload = NULL; 2735 return (err); 2736 } 2737 default: 2738 return (SET_ERROR(EINVAL)); 2739 } 2740} 2741 2742/* 2743 * dmu_recv_stream's worker thread; pull records off the queue, and then call 2744 * receive_process_record When we're done, signal the main thread and exit. 2745 */ 2746static void 2747receive_writer_thread(void *arg) 2748{ 2749 struct receive_writer_arg *rwa = arg; 2750 struct receive_record_arg *rrd; 2751 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; 2752 rrd = bqueue_dequeue(&rwa->q)) { 2753 /* 2754 * If there's an error, the main thread will stop putting things 2755 * on the queue, but we need to clear everything in it before we 2756 * can exit. 2757 */ 2758 if (rwa->err == 0) { 2759 rwa->err = receive_process_record(rwa, rrd); 2760 } else if (rrd->write_buf != NULL) { 2761 dmu_return_arcbuf(rrd->write_buf); 2762 rrd->write_buf = NULL; 2763 rrd->payload = NULL; 2764 } else if (rrd->payload != NULL) { 2765 kmem_free(rrd->payload, rrd->payload_size); 2766 rrd->payload = NULL; 2767 } 2768 kmem_free(rrd, sizeof (*rrd)); 2769 } 2770 kmem_free(rrd, sizeof (*rrd)); 2771 mutex_enter(&rwa->mutex); 2772 rwa->done = B_TRUE; 2773 cv_signal(&rwa->cv); 2774 mutex_exit(&rwa->mutex); 2775 thread_exit(); 2776} 2777 2778static int 2779resume_check(struct receive_arg *ra, nvlist_t *begin_nvl) 2780{ 2781 uint64_t val; 2782 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset; 2783 uint64_t dsobj = dmu_objset_id(ra->os); 2784 uint64_t resume_obj, resume_off; 2785 2786 if (nvlist_lookup_uint64(begin_nvl, 2787 "resume_object", &resume_obj) != 0 || 2788 nvlist_lookup_uint64(begin_nvl, 2789 "resume_offset", &resume_off) != 0) { 2790 return (SET_ERROR(EINVAL)); 2791 } 2792 VERIFY0(zap_lookup(mos, dsobj, 2793 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val)); 2794 if (resume_obj != val) 2795 return (SET_ERROR(EINVAL)); 2796 VERIFY0(zap_lookup(mos, dsobj, 2797 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val)); 2798 if (resume_off != val) 2799 return (SET_ERROR(EINVAL)); 2800 2801 return (0); 2802} 2803 2804/* 2805 * Read in the stream's records, one by one, and apply them to the pool. There 2806 * are two threads involved; the thread that calls this function will spin up a 2807 * worker thread, read the records off the stream one by one, and issue 2808 * prefetches for any necessary indirect blocks. It will then push the records 2809 * onto an internal blocking queue. The worker thread will pull the records off 2810 * the queue, and actually write the data into the DMU. This way, the worker 2811 * thread doesn't have to wait for reads to complete, since everything it needs 2812 * (the indirect blocks) will be prefetched. 2813 * 2814 * NB: callers *must* call dmu_recv_end() if this succeeds. 2815 */ 2816int 2817dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp, 2818 int cleanup_fd, uint64_t *action_handlep) 2819{ 2820 int err = 0; 2821 struct receive_arg ra = { 0 }; 2822 struct receive_writer_arg rwa = { 0 }; 2823 int featureflags; 2824 nvlist_t *begin_nvl = NULL; 2825 2826 ra.byteswap = drc->drc_byteswap; 2827 ra.cksum = drc->drc_cksum; 2828 ra.td = curthread; 2829 ra.fp = fp; 2830 ra.voff = *voffp; 2831 2832 if (dsl_dataset_is_zapified(drc->drc_ds)) { 2833 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, 2834 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, 2835 sizeof (ra.bytes_read), 1, &ra.bytes_read); 2836 } 2837 2838 objlist_create(&ra.ignore_objlist); 2839 2840 /* these were verified in dmu_recv_begin */ 2841 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 2842 DMU_SUBSTREAM); 2843 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 2844 2845 /* 2846 * Open the objset we are modifying. 2847 */ 2848 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os)); 2849 2850 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); 2851 2852 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 2853 2854 /* if this stream is dedup'ed, set up the avl tree for guid mapping */ 2855 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { 2856 minor_t minor; 2857 2858 if (cleanup_fd == -1) { 2859 ra.err = SET_ERROR(EBADF); 2860 goto out; 2861 } 2862 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); 2863 if (ra.err != 0) { 2864 cleanup_fd = -1; 2865 goto out; 2866 } 2867 2868 if (*action_handlep == 0) { 2869 rwa.guid_to_ds_map = 2870 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 2871 avl_create(rwa.guid_to_ds_map, guid_compare, 2872 sizeof (guid_map_entry_t), 2873 offsetof(guid_map_entry_t, avlnode)); 2874 err = zfs_onexit_add_cb(minor, 2875 free_guid_map_onexit, rwa.guid_to_ds_map, 2876 action_handlep); 2877 if (ra.err != 0) 2878 goto out; 2879 } else { 2880 err = zfs_onexit_cb_data(minor, *action_handlep, 2881 (void **)&rwa.guid_to_ds_map); 2882 if (ra.err != 0) 2883 goto out; 2884 } 2885 2886 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map; 2887 } 2888 2889 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen; 2890 void *payload = NULL; 2891 if (payloadlen != 0) 2892 payload = kmem_alloc(payloadlen, KM_SLEEP); 2893 2894 err = receive_read_payload_and_next_header(&ra, payloadlen, payload); 2895 if (err != 0) { 2896 if (payloadlen != 0) 2897 kmem_free(payload, payloadlen); 2898 goto out; 2899 } 2900 if (payloadlen != 0) { 2901 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP); 2902 kmem_free(payload, payloadlen); 2903 if (err != 0) 2904 goto out; 2905 } 2906 2907 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) { 2908 err = resume_check(&ra, begin_nvl); 2909 if (err != 0) 2910 goto out; 2911 } 2912 2913 (void) bqueue_init(&rwa.q, zfs_recv_queue_length, 2914 offsetof(struct receive_record_arg, node)); 2915 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL); 2916 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL); 2917 rwa.os = ra.os; 2918 rwa.byteswap = drc->drc_byteswap; 2919 rwa.resumable = drc->drc_resumable; 2920 2921 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0, 2922 TS_RUN, minclsyspri); 2923 /* 2924 * We're reading rwa.err without locks, which is safe since we are the 2925 * only reader, and the worker thread is the only writer. It's ok if we 2926 * miss a write for an iteration or two of the loop, since the writer 2927 * thread will keep freeing records we send it until we send it an eos 2928 * marker. 2929 * 2930 * We can leave this loop in 3 ways: First, if rwa.err is 2931 * non-zero. In that case, the writer thread will free the rrd we just 2932 * pushed. Second, if we're interrupted; in that case, either it's the 2933 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd 2934 * has been handed off to the writer thread who will free it. Finally, 2935 * if receive_read_record fails or we're at the end of the stream, then 2936 * we free ra.rrd and exit. 2937 */ 2938 while (rwa.err == 0) { 2939 if (issig(JUSTLOOKING) && issig(FORREAL)) { 2940 err = SET_ERROR(EINTR); 2941 break; 2942 } 2943 2944 ASSERT3P(ra.rrd, ==, NULL); 2945 ra.rrd = ra.next_rrd; 2946 ra.next_rrd = NULL; 2947 /* Allocates and loads header into ra.next_rrd */ 2948 err = receive_read_record(&ra); 2949 2950 if (ra.rrd->header.drr_type == DRR_END || err != 0) { 2951 kmem_free(ra.rrd, sizeof (*ra.rrd)); 2952 ra.rrd = NULL; 2953 break; 2954 } 2955 2956 bqueue_enqueue(&rwa.q, ra.rrd, 2957 sizeof (struct receive_record_arg) + ra.rrd->payload_size); 2958 ra.rrd = NULL; 2959 } 2960 if (ra.next_rrd == NULL) 2961 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP); 2962 ra.next_rrd->eos_marker = B_TRUE; 2963 bqueue_enqueue(&rwa.q, ra.next_rrd, 1); 2964 2965 mutex_enter(&rwa.mutex); 2966 while (!rwa.done) { 2967 cv_wait(&rwa.cv, &rwa.mutex); 2968 } 2969 mutex_exit(&rwa.mutex); 2970 2971 cv_destroy(&rwa.cv); 2972 mutex_destroy(&rwa.mutex); 2973 bqueue_destroy(&rwa.q); 2974 if (err == 0) 2975 err = rwa.err; 2976 2977out: 2978 nvlist_free(begin_nvl); 2979 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) 2980 zfs_onexit_fd_rele(cleanup_fd); 2981 2982 if (err != 0) { 2983 /* 2984 * Clean up references. If receive is not resumable, 2985 * destroy what we created, so we don't leave it in 2986 * the inconsistent state. 2987 */ 2988 dmu_recv_cleanup_ds(drc); 2989 } 2990 2991 *voffp = ra.voff; 2992 objlist_destroy(&ra.ignore_objlist); 2993 return (err); 2994} 2995 2996static int 2997dmu_recv_end_check(void *arg, dmu_tx_t *tx) 2998{ 2999 dmu_recv_cookie_t *drc = arg; 3000 dsl_pool_t *dp = dmu_tx_pool(tx); 3001 int error; 3002 3003 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 3004 3005 if (!drc->drc_newfs) { 3006 dsl_dataset_t *origin_head; 3007 3008 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 3009 if (error != 0) 3010 return (error); 3011 if (drc->drc_force) { 3012 /* 3013 * We will destroy any snapshots in tofs (i.e. before 3014 * origin_head) that are after the origin (which is 3015 * the snap before drc_ds, because drc_ds can not 3016 * have any snaps of its own). 3017 */ 3018 uint64_t obj; 3019 3020 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3021 while (obj != 3022 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3023 dsl_dataset_t *snap; 3024 error = dsl_dataset_hold_obj(dp, obj, FTAG, 3025 &snap); 3026 if (error != 0) 3027 break; 3028 if (snap->ds_dir != origin_head->ds_dir) 3029 error = SET_ERROR(EINVAL); 3030 if (error == 0) { 3031 error = dsl_destroy_snapshot_check_impl( 3032 snap, B_FALSE); 3033 } 3034 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3035 dsl_dataset_rele(snap, FTAG); 3036 if (error != 0) 3037 break; 3038 } 3039 if (error != 0) { 3040 dsl_dataset_rele(origin_head, FTAG); 3041 return (error); 3042 } 3043 } 3044 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 3045 origin_head, drc->drc_force, drc->drc_owner, tx); 3046 if (error != 0) { 3047 dsl_dataset_rele(origin_head, FTAG); 3048 return (error); 3049 } 3050 error = dsl_dataset_snapshot_check_impl(origin_head, 3051 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 3052 dsl_dataset_rele(origin_head, FTAG); 3053 if (error != 0) 3054 return (error); 3055 3056 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 3057 } else { 3058 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 3059 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 3060 } 3061 return (error); 3062} 3063 3064static void 3065dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 3066{ 3067 dmu_recv_cookie_t *drc = arg; 3068 dsl_pool_t *dp = dmu_tx_pool(tx); 3069 3070 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 3071 tx, "snap=%s", drc->drc_tosnap); 3072 3073 if (!drc->drc_newfs) { 3074 dsl_dataset_t *origin_head; 3075 3076 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 3077 &origin_head)); 3078 3079 if (drc->drc_force) { 3080 /* 3081 * Destroy any snapshots of drc_tofs (origin_head) 3082 * after the origin (the snap before drc_ds). 3083 */ 3084 uint64_t obj; 3085 3086 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3087 while (obj != 3088 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3089 dsl_dataset_t *snap; 3090 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 3091 &snap)); 3092 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 3093 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3094 dsl_destroy_snapshot_sync_impl(snap, 3095 B_FALSE, tx); 3096 dsl_dataset_rele(snap, FTAG); 3097 } 3098 } 3099 VERIFY3P(drc->drc_ds->ds_prev, ==, 3100 origin_head->ds_prev); 3101 3102 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 3103 origin_head, tx); 3104 dsl_dataset_snapshot_sync_impl(origin_head, 3105 drc->drc_tosnap, tx); 3106 3107 /* set snapshot's creation time and guid */ 3108 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 3109 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = 3110 drc->drc_drrb->drr_creation_time; 3111 dsl_dataset_phys(origin_head->ds_prev)->ds_guid = 3112 drc->drc_drrb->drr_toguid; 3113 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= 3114 ~DS_FLAG_INCONSISTENT; 3115 3116 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 3117 dsl_dataset_phys(origin_head)->ds_flags &= 3118 ~DS_FLAG_INCONSISTENT; 3119 3120 drc->drc_newsnapobj = 3121 dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3122 3123 dsl_dataset_rele(origin_head, FTAG); 3124 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 3125 3126 if (drc->drc_owner != NULL) 3127 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 3128 } else { 3129 dsl_dataset_t *ds = drc->drc_ds; 3130 3131 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 3132 3133 /* set snapshot's creation time and guid */ 3134 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 3135 dsl_dataset_phys(ds->ds_prev)->ds_creation_time = 3136 drc->drc_drrb->drr_creation_time; 3137 dsl_dataset_phys(ds->ds_prev)->ds_guid = 3138 drc->drc_drrb->drr_toguid; 3139 dsl_dataset_phys(ds->ds_prev)->ds_flags &= 3140 ~DS_FLAG_INCONSISTENT; 3141 3142 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3143 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 3144 if (dsl_dataset_has_resume_receive_state(ds)) { 3145 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3146 DS_FIELD_RESUME_FROMGUID, tx); 3147 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3148 DS_FIELD_RESUME_OBJECT, tx); 3149 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3150 DS_FIELD_RESUME_OFFSET, tx); 3151 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3152 DS_FIELD_RESUME_BYTES, tx); 3153 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3154 DS_FIELD_RESUME_TOGUID, tx); 3155 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3156 DS_FIELD_RESUME_TONAME, tx); 3157 } 3158 drc->drc_newsnapobj = 3159 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; 3160 } 3161 /* 3162 * Release the hold from dmu_recv_begin. This must be done before 3163 * we return to open context, so that when we free the dataset's dnode, 3164 * we can evict its bonus buffer. 3165 */ 3166 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 3167 drc->drc_ds = NULL; 3168} 3169 3170static int 3171add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) 3172{ 3173 dsl_pool_t *dp; 3174 dsl_dataset_t *snapds; 3175 guid_map_entry_t *gmep; 3176 int err; 3177 3178 ASSERT(guid_map != NULL); 3179 3180 err = dsl_pool_hold(name, FTAG, &dp); 3181 if (err != 0) 3182 return (err); 3183 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); 3184 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); 3185 if (err == 0) { 3186 gmep->guid = dsl_dataset_phys(snapds)->ds_guid; 3187 gmep->gme_ds = snapds; 3188 avl_add(guid_map, gmep); 3189 dsl_dataset_long_hold(snapds, gmep); 3190 } else 3191 kmem_free(gmep, sizeof (*gmep)); 3192 3193 dsl_pool_rele(dp, FTAG); 3194 return (err); 3195} 3196 3197static int dmu_recv_end_modified_blocks = 3; 3198 3199static int 3200dmu_recv_existing_end(dmu_recv_cookie_t *drc) 3201{ 3202#ifdef _KERNEL 3203 /* 3204 * We will be destroying the ds; make sure its origin is unmounted if 3205 * necessary. 3206 */ 3207 char name[ZFS_MAX_DATASET_NAME_LEN]; 3208 dsl_dataset_name(drc->drc_ds, name); 3209 zfs_destroy_unmount_origin(name); 3210#endif 3211 3212 return (dsl_sync_task(drc->drc_tofs, 3213 dmu_recv_end_check, dmu_recv_end_sync, drc, 3214 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3215} 3216 3217static int 3218dmu_recv_new_end(dmu_recv_cookie_t *drc) 3219{ 3220 return (dsl_sync_task(drc->drc_tofs, 3221 dmu_recv_end_check, dmu_recv_end_sync, drc, 3222 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3223} 3224 3225int 3226dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 3227{ 3228 int error; 3229 3230 drc->drc_owner = owner; 3231 3232 if (drc->drc_newfs) 3233 error = dmu_recv_new_end(drc); 3234 else 3235 error = dmu_recv_existing_end(drc); 3236 3237 if (error != 0) { 3238 dmu_recv_cleanup_ds(drc); 3239 } else if (drc->drc_guid_to_ds_map != NULL) { 3240 (void) add_ds_to_guidmap(drc->drc_tofs, 3241 drc->drc_guid_to_ds_map, 3242 drc->drc_newsnapobj); 3243 } 3244 return (error); 3245} 3246 3247/* 3248 * Return TRUE if this objset is currently being received into. 3249 */ 3250boolean_t 3251dmu_objset_is_receiving(objset_t *os) 3252{ 3253 return (os->os_dsl_dataset != NULL && 3254 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 3255} 3256