dmu_send.c revision 352376
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014, Joyent, Inc. All rights reserved. 26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved. 27 * Copyright 2014 HybridCluster. All rights reserved. 28 * Copyright 2016 RackTop Systems. 29 * Copyright (c) 2014 Integros [integros.com] 30 */ 31 32#include <sys/dmu.h> 33#include <sys/dmu_impl.h> 34#include <sys/dmu_tx.h> 35#include <sys/dbuf.h> 36#include <sys/dnode.h> 37#include <sys/zfs_context.h> 38#include <sys/dmu_objset.h> 39#include <sys/dmu_traverse.h> 40#include <sys/dsl_dataset.h> 41#include <sys/dsl_dir.h> 42#include <sys/dsl_prop.h> 43#include <sys/dsl_pool.h> 44#include <sys/dsl_synctask.h> 45#include <sys/zfs_ioctl.h> 46#include <sys/zap.h> 47#include <sys/zio_checksum.h> 48#include <sys/zfs_znode.h> 49#include <zfs_fletcher.h> 50#include <sys/avl.h> 51#include <sys/ddt.h> 52#include <sys/zfs_onexit.h> 53#include <sys/dmu_send.h> 54#include <sys/dsl_destroy.h> 55#include <sys/blkptr.h> 56#include <sys/dsl_bookmark.h> 57#include <sys/zfeature.h> 58#include <sys/bqueue.h> 59 60#ifdef __FreeBSD__ 61#undef dump_write 62#define dump_write dmu_dump_write 63#endif 64 65/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */ 66int zfs_send_corrupt_data = B_FALSE; 67int zfs_send_queue_length = 16 * 1024 * 1024; 68int zfs_recv_queue_length = 16 * 1024 * 1024; 69/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */ 70int zfs_send_set_freerecords_bit = B_TRUE; 71 72#ifdef _KERNEL 73TUNABLE_INT("vfs.zfs.send_set_freerecords_bit", &zfs_send_set_freerecords_bit); 74#endif 75 76static char *dmu_recv_tag = "dmu_recv_tag"; 77const char *recv_clone_name = "%recv"; 78 79/* 80 * Use this to override the recordsize calculation for fast zfs send estimates. 81 */ 82uint64_t zfs_override_estimate_recordsize = 0; 83 84#define BP_SPAN(datablkszsec, indblkshift, level) \ 85 (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \ 86 (level) * (indblkshift - SPA_BLKPTRSHIFT))) 87 88static void byteswap_record(dmu_replay_record_t *drr); 89 90struct send_thread_arg { 91 bqueue_t q; 92 dsl_dataset_t *ds; /* Dataset to traverse */ 93 uint64_t fromtxg; /* Traverse from this txg */ 94 int flags; /* flags to pass to traverse_dataset */ 95 int error_code; 96 boolean_t cancel; 97 zbookmark_phys_t resume; 98}; 99 100struct send_block_record { 101 boolean_t eos_marker; /* Marks the end of the stream */ 102 blkptr_t bp; 103 zbookmark_phys_t zb; 104 uint8_t indblkshift; 105 uint16_t datablkszsec; 106 bqueue_node_t ln; 107}; 108 109static int 110dump_bytes(dmu_sendarg_t *dsp, void *buf, int len) 111{ 112 dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os); 113 struct uio auio; 114 struct iovec aiov; 115 116 /* 117 * The code does not rely on this (len being a multiple of 8). We keep 118 * this assertion because of the corresponding assertion in 119 * receive_read(). Keeping this assertion ensures that we do not 120 * inadvertently break backwards compatibility (causing the assertion 121 * in receive_read() to trigger on old software). 122 * 123 * Removing the assertions could be rolled into a new feature that uses 124 * data that isn't 8-byte aligned; if the assertions were removed, a 125 * feature flag would have to be added. 126 */ 127 128 ASSERT0(len % 8); 129 130 aiov.iov_base = buf; 131 aiov.iov_len = len; 132 auio.uio_iov = &aiov; 133 auio.uio_iovcnt = 1; 134 auio.uio_resid = len; 135 auio.uio_segflg = UIO_SYSSPACE; 136 auio.uio_rw = UIO_WRITE; 137 auio.uio_offset = (off_t)-1; 138 auio.uio_td = dsp->dsa_td; 139#ifdef _KERNEL 140 if (dsp->dsa_fp->f_type == DTYPE_VNODE) 141 bwillwrite(); 142 dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0, 143 dsp->dsa_td); 144#else 145 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 146 dsp->dsa_err = EOPNOTSUPP; 147#endif 148 mutex_enter(&ds->ds_sendstream_lock); 149 *dsp->dsa_off += len; 150 mutex_exit(&ds->ds_sendstream_lock); 151 152 return (dsp->dsa_err); 153} 154 155/* 156 * For all record types except BEGIN, fill in the checksum (overlaid in 157 * drr_u.drr_checksum.drr_checksum). The checksum verifies everything 158 * up to the start of the checksum itself. 159 */ 160static int 161dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len) 162{ 163 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 164 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 165 (void) fletcher_4_incremental_native(dsp->dsa_drr, 166 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 167 &dsp->dsa_zc); 168 if (dsp->dsa_drr->drr_type == DRR_BEGIN) { 169 dsp->dsa_sent_begin = B_TRUE; 170 } else { 171 ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u. 172 drr_checksum.drr_checksum)); 173 dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc; 174 } 175 if (dsp->dsa_drr->drr_type == DRR_END) { 176 dsp->dsa_sent_end = B_TRUE; 177 } 178 (void) fletcher_4_incremental_native(&dsp->dsa_drr-> 179 drr_u.drr_checksum.drr_checksum, 180 sizeof (zio_cksum_t), &dsp->dsa_zc); 181 if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0) 182 return (SET_ERROR(EINTR)); 183 if (payload_len != 0) { 184 (void) fletcher_4_incremental_native(payload, payload_len, 185 &dsp->dsa_zc); 186 if (dump_bytes(dsp, payload, payload_len) != 0) 187 return (SET_ERROR(EINTR)); 188 } 189 return (0); 190} 191 192/* 193 * Fill in the drr_free struct, or perform aggregation if the previous record is 194 * also a free record, and the two are adjacent. 195 * 196 * Note that we send free records even for a full send, because we want to be 197 * able to receive a full send as a clone, which requires a list of all the free 198 * and freeobject records that were generated on the source. 199 */ 200static int 201dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 202 uint64_t length) 203{ 204 struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free); 205 206 /* 207 * When we receive a free record, dbuf_free_range() assumes 208 * that the receiving system doesn't have any dbufs in the range 209 * being freed. This is always true because there is a one-record 210 * constraint: we only send one WRITE record for any given 211 * object,offset. We know that the one-record constraint is 212 * true because we always send data in increasing order by 213 * object,offset. 214 * 215 * If the increasing-order constraint ever changes, we should find 216 * another way to assert that the one-record constraint is still 217 * satisfied. 218 */ 219 ASSERT(object > dsp->dsa_last_data_object || 220 (object == dsp->dsa_last_data_object && 221 offset > dsp->dsa_last_data_offset)); 222 223 if (length != -1ULL && offset + length < offset) 224 length = -1ULL; 225 226 /* 227 * If there is a pending op, but it's not PENDING_FREE, push it out, 228 * since free block aggregation can only be done for blocks of the 229 * same type (i.e., DRR_FREE records can only be aggregated with 230 * other DRR_FREE records. DRR_FREEOBJECTS records can only be 231 * aggregated with other DRR_FREEOBJECTS records. 232 */ 233 if (dsp->dsa_pending_op != PENDING_NONE && 234 dsp->dsa_pending_op != PENDING_FREE) { 235 if (dump_record(dsp, NULL, 0) != 0) 236 return (SET_ERROR(EINTR)); 237 dsp->dsa_pending_op = PENDING_NONE; 238 } 239 240 if (dsp->dsa_pending_op == PENDING_FREE) { 241 /* 242 * There should never be a PENDING_FREE if length is -1 243 * (because dump_dnode is the only place where this 244 * function is called with a -1, and only after flushing 245 * any pending record). 246 */ 247 ASSERT(length != -1ULL); 248 /* 249 * Check to see whether this free block can be aggregated 250 * with pending one. 251 */ 252 if (drrf->drr_object == object && drrf->drr_offset + 253 drrf->drr_length == offset) { 254 drrf->drr_length += length; 255 return (0); 256 } else { 257 /* not a continuation. Push out pending record */ 258 if (dump_record(dsp, NULL, 0) != 0) 259 return (SET_ERROR(EINTR)); 260 dsp->dsa_pending_op = PENDING_NONE; 261 } 262 } 263 /* create a FREE record and make it pending */ 264 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 265 dsp->dsa_drr->drr_type = DRR_FREE; 266 drrf->drr_object = object; 267 drrf->drr_offset = offset; 268 drrf->drr_length = length; 269 drrf->drr_toguid = dsp->dsa_toguid; 270 if (length == -1ULL) { 271 if (dump_record(dsp, NULL, 0) != 0) 272 return (SET_ERROR(EINTR)); 273 } else { 274 dsp->dsa_pending_op = PENDING_FREE; 275 } 276 277 return (0); 278} 279 280static int 281dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, 282 uint64_t object, uint64_t offset, int lsize, int psize, const blkptr_t *bp, 283 void *data) 284{ 285 uint64_t payload_size; 286 struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write); 287 288 /* 289 * We send data in increasing object, offset order. 290 * See comment in dump_free() for details. 291 */ 292 ASSERT(object > dsp->dsa_last_data_object || 293 (object == dsp->dsa_last_data_object && 294 offset > dsp->dsa_last_data_offset)); 295 dsp->dsa_last_data_object = object; 296 dsp->dsa_last_data_offset = offset + lsize - 1; 297 298 /* 299 * If there is any kind of pending aggregation (currently either 300 * a grouping of free objects or free blocks), push it out to 301 * the stream, since aggregation can't be done across operations 302 * of different types. 303 */ 304 if (dsp->dsa_pending_op != PENDING_NONE) { 305 if (dump_record(dsp, NULL, 0) != 0) 306 return (SET_ERROR(EINTR)); 307 dsp->dsa_pending_op = PENDING_NONE; 308 } 309 /* write a WRITE record */ 310 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 311 dsp->dsa_drr->drr_type = DRR_WRITE; 312 drrw->drr_object = object; 313 drrw->drr_type = type; 314 drrw->drr_offset = offset; 315 drrw->drr_toguid = dsp->dsa_toguid; 316 drrw->drr_logical_size = lsize; 317 318 /* only set the compression fields if the buf is compressed */ 319 if (lsize != psize) { 320 ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED); 321 ASSERT(!BP_IS_EMBEDDED(bp)); 322 ASSERT(!BP_SHOULD_BYTESWAP(bp)); 323 ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp))); 324 ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF); 325 ASSERT3S(psize, >, 0); 326 ASSERT3S(lsize, >=, psize); 327 328 drrw->drr_compressiontype = BP_GET_COMPRESS(bp); 329 drrw->drr_compressed_size = psize; 330 payload_size = drrw->drr_compressed_size; 331 } else { 332 payload_size = drrw->drr_logical_size; 333 } 334 335 if (bp == NULL || BP_IS_EMBEDDED(bp)) { 336 /* 337 * There's no pre-computed checksum for partial-block 338 * writes or embedded BP's, so (like 339 * fletcher4-checkummed blocks) userland will have to 340 * compute a dedup-capable checksum itself. 341 */ 342 drrw->drr_checksumtype = ZIO_CHECKSUM_OFF; 343 } else { 344 drrw->drr_checksumtype = BP_GET_CHECKSUM(bp); 345 if (zio_checksum_table[drrw->drr_checksumtype].ci_flags & 346 ZCHECKSUM_FLAG_DEDUP) 347 drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP; 348 DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp)); 349 DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp)); 350 DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp)); 351 drrw->drr_key.ddk_cksum = bp->blk_cksum; 352 } 353 354 if (dump_record(dsp, data, payload_size) != 0) 355 return (SET_ERROR(EINTR)); 356 return (0); 357} 358 359static int 360dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset, 361 int blksz, const blkptr_t *bp) 362{ 363 char buf[BPE_PAYLOAD_SIZE]; 364 struct drr_write_embedded *drrw = 365 &(dsp->dsa_drr->drr_u.drr_write_embedded); 366 367 if (dsp->dsa_pending_op != PENDING_NONE) { 368 if (dump_record(dsp, NULL, 0) != 0) 369 return (EINTR); 370 dsp->dsa_pending_op = PENDING_NONE; 371 } 372 373 ASSERT(BP_IS_EMBEDDED(bp)); 374 375 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 376 dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED; 377 drrw->drr_object = object; 378 drrw->drr_offset = offset; 379 drrw->drr_length = blksz; 380 drrw->drr_toguid = dsp->dsa_toguid; 381 drrw->drr_compression = BP_GET_COMPRESS(bp); 382 drrw->drr_etype = BPE_GET_ETYPE(bp); 383 drrw->drr_lsize = BPE_GET_LSIZE(bp); 384 drrw->drr_psize = BPE_GET_PSIZE(bp); 385 386 decode_embedded_bp_compressed(bp, buf); 387 388 if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0) 389 return (EINTR); 390 return (0); 391} 392 393static int 394dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data) 395{ 396 struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill); 397 398 if (dsp->dsa_pending_op != PENDING_NONE) { 399 if (dump_record(dsp, NULL, 0) != 0) 400 return (SET_ERROR(EINTR)); 401 dsp->dsa_pending_op = PENDING_NONE; 402 } 403 404 /* write a SPILL record */ 405 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 406 dsp->dsa_drr->drr_type = DRR_SPILL; 407 drrs->drr_object = object; 408 drrs->drr_length = blksz; 409 drrs->drr_toguid = dsp->dsa_toguid; 410 411 if (dump_record(dsp, data, blksz) != 0) 412 return (SET_ERROR(EINTR)); 413 return (0); 414} 415 416static int 417dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs) 418{ 419 struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects); 420 421 /* 422 * If there is a pending op, but it's not PENDING_FREEOBJECTS, 423 * push it out, since free block aggregation can only be done for 424 * blocks of the same type (i.e., DRR_FREE records can only be 425 * aggregated with other DRR_FREE records. DRR_FREEOBJECTS records 426 * can only be aggregated with other DRR_FREEOBJECTS records. 427 */ 428 if (dsp->dsa_pending_op != PENDING_NONE && 429 dsp->dsa_pending_op != PENDING_FREEOBJECTS) { 430 if (dump_record(dsp, NULL, 0) != 0) 431 return (SET_ERROR(EINTR)); 432 dsp->dsa_pending_op = PENDING_NONE; 433 } 434 if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) { 435 /* 436 * See whether this free object array can be aggregated 437 * with pending one 438 */ 439 if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) { 440 drrfo->drr_numobjs += numobjs; 441 return (0); 442 } else { 443 /* can't be aggregated. Push out pending record */ 444 if (dump_record(dsp, NULL, 0) != 0) 445 return (SET_ERROR(EINTR)); 446 dsp->dsa_pending_op = PENDING_NONE; 447 } 448 } 449 450 /* write a FREEOBJECTS record */ 451 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 452 dsp->dsa_drr->drr_type = DRR_FREEOBJECTS; 453 drrfo->drr_firstobj = firstobj; 454 drrfo->drr_numobjs = numobjs; 455 drrfo->drr_toguid = dsp->dsa_toguid; 456 457 dsp->dsa_pending_op = PENDING_FREEOBJECTS; 458 459 return (0); 460} 461 462static int 463dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp) 464{ 465 struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object); 466 467 if (object < dsp->dsa_resume_object) { 468 /* 469 * Note: when resuming, we will visit all the dnodes in 470 * the block of dnodes that we are resuming from. In 471 * this case it's unnecessary to send the dnodes prior to 472 * the one we are resuming from. We should be at most one 473 * block's worth of dnodes behind the resume point. 474 */ 475 ASSERT3U(dsp->dsa_resume_object - object, <, 476 1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT)); 477 return (0); 478 } 479 480 if (dnp == NULL || dnp->dn_type == DMU_OT_NONE) 481 return (dump_freeobjects(dsp, object, 1)); 482 483 if (dsp->dsa_pending_op != PENDING_NONE) { 484 if (dump_record(dsp, NULL, 0) != 0) 485 return (SET_ERROR(EINTR)); 486 dsp->dsa_pending_op = PENDING_NONE; 487 } 488 489 /* write an OBJECT record */ 490 bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t)); 491 dsp->dsa_drr->drr_type = DRR_OBJECT; 492 drro->drr_object = object; 493 drro->drr_type = dnp->dn_type; 494 drro->drr_bonustype = dnp->dn_bonustype; 495 drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 496 drro->drr_bonuslen = dnp->dn_bonuslen; 497 drro->drr_checksumtype = dnp->dn_checksum; 498 drro->drr_compress = dnp->dn_compress; 499 drro->drr_toguid = dsp->dsa_toguid; 500 501 if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 502 drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE) 503 drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE; 504 505 if (dump_record(dsp, DN_BONUS(dnp), 506 P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) { 507 return (SET_ERROR(EINTR)); 508 } 509 510 /* Free anything past the end of the file. */ 511 if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) * 512 (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0) 513 return (SET_ERROR(EINTR)); 514 if (dsp->dsa_err != 0) 515 return (SET_ERROR(EINTR)); 516 return (0); 517} 518 519static boolean_t 520backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp) 521{ 522 if (!BP_IS_EMBEDDED(bp)) 523 return (B_FALSE); 524 525 /* 526 * Compression function must be legacy, or explicitly enabled. 527 */ 528 if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS && 529 !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4))) 530 return (B_FALSE); 531 532 /* 533 * Embed type must be explicitly enabled. 534 */ 535 switch (BPE_GET_ETYPE(bp)) { 536 case BP_EMBEDDED_TYPE_DATA: 537 if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) 538 return (B_TRUE); 539 break; 540 default: 541 return (B_FALSE); 542 } 543 return (B_FALSE); 544} 545 546/* 547 * This is the callback function to traverse_dataset that acts as the worker 548 * thread for dmu_send_impl. 549 */ 550/*ARGSUSED*/ 551static int 552send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 553 const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg) 554{ 555 struct send_thread_arg *sta = arg; 556 struct send_block_record *record; 557 uint64_t record_size; 558 int err = 0; 559 560 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 561 zb->zb_object >= sta->resume.zb_object); 562 563 if (sta->cancel) 564 return (SET_ERROR(EINTR)); 565 566 if (bp == NULL) { 567 ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL); 568 return (0); 569 } else if (zb->zb_level < 0) { 570 return (0); 571 } 572 573 record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP); 574 record->eos_marker = B_FALSE; 575 record->bp = *bp; 576 record->zb = *zb; 577 record->indblkshift = dnp->dn_indblkshift; 578 record->datablkszsec = dnp->dn_datablkszsec; 579 record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT; 580 bqueue_enqueue(&sta->q, record, record_size); 581 582 return (err); 583} 584 585/* 586 * This function kicks off the traverse_dataset. It also handles setting the 587 * error code of the thread in case something goes wrong, and pushes the End of 588 * Stream record when the traverse_dataset call has finished. If there is no 589 * dataset to traverse, the thread immediately pushes End of Stream marker. 590 */ 591static void 592send_traverse_thread(void *arg) 593{ 594 struct send_thread_arg *st_arg = arg; 595 int err; 596 struct send_block_record *data; 597 598 if (st_arg->ds != NULL) { 599 err = traverse_dataset_resume(st_arg->ds, 600 st_arg->fromtxg, &st_arg->resume, 601 st_arg->flags, send_cb, st_arg); 602 603 if (err != EINTR) 604 st_arg->error_code = err; 605 } 606 data = kmem_zalloc(sizeof (*data), KM_SLEEP); 607 data->eos_marker = B_TRUE; 608 bqueue_enqueue(&st_arg->q, data, 1); 609 thread_exit(); 610} 611 612/* 613 * This function actually handles figuring out what kind of record needs to be 614 * dumped, reading the data (which has hopefully been prefetched), and calling 615 * the appropriate helper function. 616 */ 617static int 618do_dump(dmu_sendarg_t *dsa, struct send_block_record *data) 619{ 620 dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os); 621 const blkptr_t *bp = &data->bp; 622 const zbookmark_phys_t *zb = &data->zb; 623 uint8_t indblkshift = data->indblkshift; 624 uint16_t dblkszsec = data->datablkszsec; 625 spa_t *spa = ds->ds_dir->dd_pool->dp_spa; 626 dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE; 627 int err = 0; 628 629 ASSERT3U(zb->zb_level, >=, 0); 630 631 ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT || 632 zb->zb_object >= dsa->dsa_resume_object); 633 634 if (zb->zb_object != DMU_META_DNODE_OBJECT && 635 DMU_OBJECT_IS_SPECIAL(zb->zb_object)) { 636 return (0); 637 } else if (BP_IS_HOLE(bp) && 638 zb->zb_object == DMU_META_DNODE_OBJECT) { 639 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 640 uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT; 641 err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT); 642 } else if (BP_IS_HOLE(bp)) { 643 uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level); 644 uint64_t offset = zb->zb_blkid * span; 645 err = dump_free(dsa, zb->zb_object, offset, span); 646 } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) { 647 return (0); 648 } else if (type == DMU_OT_DNODE) { 649 int blksz = BP_GET_LSIZE(bp); 650 arc_flags_t aflags = ARC_FLAG_WAIT; 651 arc_buf_t *abuf; 652 653 ASSERT0(zb->zb_level); 654 655 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 656 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 657 &aflags, zb) != 0) 658 return (SET_ERROR(EIO)); 659 660 dnode_phys_t *blk = abuf->b_data; 661 uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT); 662 for (int i = 0; i < blksz >> DNODE_SHIFT; i++) { 663 err = dump_dnode(dsa, dnobj + i, blk + i); 664 if (err != 0) 665 break; 666 } 667 arc_buf_destroy(abuf, &abuf); 668 } else if (type == DMU_OT_SA) { 669 arc_flags_t aflags = ARC_FLAG_WAIT; 670 arc_buf_t *abuf; 671 int blksz = BP_GET_LSIZE(bp); 672 673 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 674 ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, 675 &aflags, zb) != 0) 676 return (SET_ERROR(EIO)); 677 678 err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data); 679 arc_buf_destroy(abuf, &abuf); 680 } else if (backup_do_embed(dsa, bp)) { 681 /* it's an embedded level-0 block of a regular object */ 682 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 683 ASSERT0(zb->zb_level); 684 err = dump_write_embedded(dsa, zb->zb_object, 685 zb->zb_blkid * blksz, blksz, bp); 686 } else { 687 /* it's a level-0 block of a regular object */ 688 arc_flags_t aflags = ARC_FLAG_WAIT; 689 arc_buf_t *abuf; 690 int blksz = dblkszsec << SPA_MINBLOCKSHIFT; 691 uint64_t offset; 692 693 /* 694 * If we have large blocks stored on disk but the send flags 695 * don't allow us to send large blocks, we split the data from 696 * the arc buf into chunks. 697 */ 698 boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE && 699 !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS); 700 /* 701 * We should only request compressed data from the ARC if all 702 * the following are true: 703 * - stream compression was requested 704 * - we aren't splitting large blocks into smaller chunks 705 * - the data won't need to be byteswapped before sending 706 * - this isn't an embedded block 707 * - this isn't metadata (if receiving on a different endian 708 * system it can be byteswapped more easily) 709 */ 710 boolean_t request_compressed = 711 (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) && 712 !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) && 713 !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp)); 714 715 ASSERT0(zb->zb_level); 716 ASSERT(zb->zb_object > dsa->dsa_resume_object || 717 (zb->zb_object == dsa->dsa_resume_object && 718 zb->zb_blkid * blksz >= dsa->dsa_resume_offset)); 719 720 ASSERT0(zb->zb_level); 721 ASSERT(zb->zb_object > dsa->dsa_resume_object || 722 (zb->zb_object == dsa->dsa_resume_object && 723 zb->zb_blkid * blksz >= dsa->dsa_resume_offset)); 724 725 ASSERT3U(blksz, ==, BP_GET_LSIZE(bp)); 726 727 enum zio_flag zioflags = ZIO_FLAG_CANFAIL; 728 if (request_compressed) 729 zioflags |= ZIO_FLAG_RAW; 730 if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf, 731 ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) { 732 if (zfs_send_corrupt_data) { 733 /* Send a block filled with 0x"zfs badd bloc" */ 734 abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA, 735 blksz); 736 uint64_t *ptr; 737 for (ptr = abuf->b_data; 738 (char *)ptr < (char *)abuf->b_data + blksz; 739 ptr++) 740 *ptr = 0x2f5baddb10cULL; 741 } else { 742 return (SET_ERROR(EIO)); 743 } 744 } 745 746 offset = zb->zb_blkid * blksz; 747 748 if (split_large_blocks) { 749 ASSERT3U(arc_get_compression(abuf), ==, 750 ZIO_COMPRESS_OFF); 751 char *buf = abuf->b_data; 752 while (blksz > 0 && err == 0) { 753 int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE); 754 err = dump_write(dsa, type, zb->zb_object, 755 offset, n, n, NULL, buf); 756 offset += n; 757 buf += n; 758 blksz -= n; 759 } 760 } else { 761 err = dump_write(dsa, type, zb->zb_object, offset, 762 blksz, arc_buf_size(abuf), bp, abuf->b_data); 763 } 764 arc_buf_destroy(abuf, &abuf); 765 } 766 767 ASSERT(err == 0 || err == EINTR); 768 return (err); 769} 770 771/* 772 * Pop the new data off the queue, and free the old data. 773 */ 774static struct send_block_record * 775get_next_record(bqueue_t *bq, struct send_block_record *data) 776{ 777 struct send_block_record *tmp = bqueue_dequeue(bq); 778 kmem_free(data, sizeof (*data)); 779 return (tmp); 780} 781 782/* 783 * Actually do the bulk of the work in a zfs send. 784 * 785 * Note: Releases dp using the specified tag. 786 */ 787static int 788dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds, 789 zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone, 790 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, 791 int outfd, uint64_t resumeobj, uint64_t resumeoff, 792#ifdef illumos 793 vnode_t *vp, offset_t *off) 794#else 795 struct file *fp, offset_t *off) 796#endif 797{ 798 objset_t *os; 799 dmu_replay_record_t *drr; 800 dmu_sendarg_t *dsp; 801 int err; 802 uint64_t fromtxg = 0; 803 uint64_t featureflags = 0; 804 struct send_thread_arg to_arg = { 0 }; 805 806 err = dmu_objset_from_ds(to_ds, &os); 807 if (err != 0) { 808 dsl_pool_rele(dp, tag); 809 return (err); 810 } 811 812 drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP); 813 drr->drr_type = DRR_BEGIN; 814 drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC; 815 DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo, 816 DMU_SUBSTREAM); 817 818#ifdef _KERNEL 819 if (dmu_objset_type(os) == DMU_OST_ZFS) { 820 uint64_t version; 821 if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) { 822 kmem_free(drr, sizeof (dmu_replay_record_t)); 823 dsl_pool_rele(dp, tag); 824 return (SET_ERROR(EINVAL)); 825 } 826 if (version >= ZPL_VERSION_SA) { 827 featureflags |= DMU_BACKUP_FEATURE_SA_SPILL; 828 } 829 } 830#endif 831 832 if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS]) 833 featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS; 834 if (embedok && 835 spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) { 836 featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA; 837 if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 838 featureflags |= DMU_BACKUP_FEATURE_LZ4; 839 } 840 if (compressok) { 841 featureflags |= DMU_BACKUP_FEATURE_COMPRESSED; 842 } 843 if ((featureflags & 844 (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED)) != 845 0 && spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) { 846 featureflags |= DMU_BACKUP_FEATURE_LZ4; 847 } 848 849 if (resumeobj != 0 || resumeoff != 0) { 850 featureflags |= DMU_BACKUP_FEATURE_RESUMING; 851 } 852 853 DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo, 854 featureflags); 855 856 drr->drr_u.drr_begin.drr_creation_time = 857 dsl_dataset_phys(to_ds)->ds_creation_time; 858 drr->drr_u.drr_begin.drr_type = dmu_objset_type(os); 859 if (is_clone) 860 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE; 861 drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid; 862 if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET) 863 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA; 864 if (zfs_send_set_freerecords_bit) 865 drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS; 866 867 if (ancestor_zb != NULL) { 868 drr->drr_u.drr_begin.drr_fromguid = 869 ancestor_zb->zbm_guid; 870 fromtxg = ancestor_zb->zbm_creation_txg; 871 } 872 dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname); 873 if (!to_ds->ds_is_snapshot) { 874 (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--", 875 sizeof (drr->drr_u.drr_begin.drr_toname)); 876 } 877 878 dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP); 879 880 dsp->dsa_drr = drr; 881 dsp->dsa_outfd = outfd; 882 dsp->dsa_proc = curproc; 883 dsp->dsa_td = curthread; 884 dsp->dsa_fp = fp; 885 dsp->dsa_os = os; 886 dsp->dsa_off = off; 887 dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid; 888 dsp->dsa_pending_op = PENDING_NONE; 889 dsp->dsa_featureflags = featureflags; 890 dsp->dsa_resume_object = resumeobj; 891 dsp->dsa_resume_offset = resumeoff; 892 893 mutex_enter(&to_ds->ds_sendstream_lock); 894 list_insert_head(&to_ds->ds_sendstreams, dsp); 895 mutex_exit(&to_ds->ds_sendstream_lock); 896 897 dsl_dataset_long_hold(to_ds, FTAG); 898 dsl_pool_rele(dp, tag); 899 900 void *payload = NULL; 901 size_t payload_len = 0; 902 if (resumeobj != 0 || resumeoff != 0) { 903 dmu_object_info_t to_doi; 904 err = dmu_object_info(os, resumeobj, &to_doi); 905 if (err != 0) 906 goto out; 907 SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0, 908 resumeoff / to_doi.doi_data_block_size); 909 910 nvlist_t *nvl = fnvlist_alloc(); 911 fnvlist_add_uint64(nvl, "resume_object", resumeobj); 912 fnvlist_add_uint64(nvl, "resume_offset", resumeoff); 913 payload = fnvlist_pack(nvl, &payload_len); 914 drr->drr_payloadlen = payload_len; 915 fnvlist_free(nvl); 916 } 917 918 err = dump_record(dsp, payload, payload_len); 919 fnvlist_pack_free(payload, payload_len); 920 if (err != 0) { 921 err = dsp->dsa_err; 922 goto out; 923 } 924 925 err = bqueue_init(&to_arg.q, zfs_send_queue_length, 926 offsetof(struct send_block_record, ln)); 927 to_arg.error_code = 0; 928 to_arg.cancel = B_FALSE; 929 to_arg.ds = to_ds; 930 to_arg.fromtxg = fromtxg; 931 to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH; 932 (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0, 933 TS_RUN, minclsyspri); 934 935 struct send_block_record *to_data; 936 to_data = bqueue_dequeue(&to_arg.q); 937 938 while (!to_data->eos_marker && err == 0) { 939 err = do_dump(dsp, to_data); 940 to_data = get_next_record(&to_arg.q, to_data); 941 if (issig(JUSTLOOKING) && issig(FORREAL)) 942 err = EINTR; 943 } 944 945 if (err != 0) { 946 to_arg.cancel = B_TRUE; 947 while (!to_data->eos_marker) { 948 to_data = get_next_record(&to_arg.q, to_data); 949 } 950 } 951 kmem_free(to_data, sizeof (*to_data)); 952 953 bqueue_destroy(&to_arg.q); 954 955 if (err == 0 && to_arg.error_code != 0) 956 err = to_arg.error_code; 957 958 if (err != 0) 959 goto out; 960 961 if (dsp->dsa_pending_op != PENDING_NONE) 962 if (dump_record(dsp, NULL, 0) != 0) 963 err = SET_ERROR(EINTR); 964 965 if (err != 0) { 966 if (err == EINTR && dsp->dsa_err != 0) 967 err = dsp->dsa_err; 968 goto out; 969 } 970 971 bzero(drr, sizeof (dmu_replay_record_t)); 972 drr->drr_type = DRR_END; 973 drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc; 974 drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid; 975 976 if (dump_record(dsp, NULL, 0) != 0) 977 err = dsp->dsa_err; 978 979out: 980 mutex_enter(&to_ds->ds_sendstream_lock); 981 list_remove(&to_ds->ds_sendstreams, dsp); 982 mutex_exit(&to_ds->ds_sendstream_lock); 983 984 VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end)); 985 986 kmem_free(drr, sizeof (dmu_replay_record_t)); 987 kmem_free(dsp, sizeof (dmu_sendarg_t)); 988 989 dsl_dataset_long_rele(to_ds, FTAG); 990 991 return (err); 992} 993 994int 995dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap, 996 boolean_t embedok, boolean_t large_block_ok, boolean_t compressok, 997#ifdef illumos 998 int outfd, vnode_t *vp, offset_t *off) 999#else 1000 int outfd, struct file *fp, offset_t *off) 1001#endif 1002{ 1003 dsl_pool_t *dp; 1004 dsl_dataset_t *ds; 1005 dsl_dataset_t *fromds = NULL; 1006 int err; 1007 1008 err = dsl_pool_hold(pool, FTAG, &dp); 1009 if (err != 0) 1010 return (err); 1011 1012 err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds); 1013 if (err != 0) { 1014 dsl_pool_rele(dp, FTAG); 1015 return (err); 1016 } 1017 1018 if (fromsnap != 0) { 1019 zfs_bookmark_phys_t zb; 1020 boolean_t is_clone; 1021 1022 err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds); 1023 if (err != 0) { 1024 dsl_dataset_rele(ds, FTAG); 1025 dsl_pool_rele(dp, FTAG); 1026 return (err); 1027 } 1028 if (!dsl_dataset_is_before(ds, fromds, 0)) 1029 err = SET_ERROR(EXDEV); 1030 zb.zbm_creation_time = 1031 dsl_dataset_phys(fromds)->ds_creation_time; 1032 zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg; 1033 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 1034 is_clone = (fromds->ds_dir != ds->ds_dir); 1035 dsl_dataset_rele(fromds, FTAG); 1036 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 1037 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off); 1038 } else { 1039 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 1040 embedok, large_block_ok, compressok, outfd, 0, 0, fp, off); 1041 } 1042 dsl_dataset_rele(ds, FTAG); 1043 return (err); 1044} 1045 1046int 1047dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok, 1048 boolean_t large_block_ok, boolean_t compressok, int outfd, 1049 uint64_t resumeobj, uint64_t resumeoff, 1050#ifdef illumos 1051 vnode_t *vp, offset_t *off) 1052#else 1053 struct file *fp, offset_t *off) 1054#endif 1055{ 1056 dsl_pool_t *dp; 1057 dsl_dataset_t *ds; 1058 int err; 1059 boolean_t owned = B_FALSE; 1060 1061 if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL) 1062 return (SET_ERROR(EINVAL)); 1063 1064 err = dsl_pool_hold(tosnap, FTAG, &dp); 1065 if (err != 0) 1066 return (err); 1067 1068 if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) { 1069 /* 1070 * We are sending a filesystem or volume. Ensure 1071 * that it doesn't change by owning the dataset. 1072 */ 1073 err = dsl_dataset_own(dp, tosnap, FTAG, &ds); 1074 owned = B_TRUE; 1075 } else { 1076 err = dsl_dataset_hold(dp, tosnap, FTAG, &ds); 1077 } 1078 if (err != 0) { 1079 dsl_pool_rele(dp, FTAG); 1080 return (err); 1081 } 1082 1083 if (fromsnap != NULL) { 1084 zfs_bookmark_phys_t zb; 1085 boolean_t is_clone = B_FALSE; 1086 int fsnamelen = strchr(tosnap, '@') - tosnap; 1087 1088 /* 1089 * If the fromsnap is in a different filesystem, then 1090 * mark the send stream as a clone. 1091 */ 1092 if (strncmp(tosnap, fromsnap, fsnamelen) != 0 || 1093 (fromsnap[fsnamelen] != '@' && 1094 fromsnap[fsnamelen] != '#')) { 1095 is_clone = B_TRUE; 1096 } 1097 1098 if (strchr(fromsnap, '@')) { 1099 dsl_dataset_t *fromds; 1100 err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds); 1101 if (err == 0) { 1102 if (!dsl_dataset_is_before(ds, fromds, 0)) 1103 err = SET_ERROR(EXDEV); 1104 zb.zbm_creation_time = 1105 dsl_dataset_phys(fromds)->ds_creation_time; 1106 zb.zbm_creation_txg = 1107 dsl_dataset_phys(fromds)->ds_creation_txg; 1108 zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid; 1109 is_clone = (ds->ds_dir != fromds->ds_dir); 1110 dsl_dataset_rele(fromds, FTAG); 1111 } 1112 } else { 1113 err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb); 1114 } 1115 if (err != 0) { 1116 dsl_dataset_rele(ds, FTAG); 1117 dsl_pool_rele(dp, FTAG); 1118 return (err); 1119 } 1120 err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone, 1121 embedok, large_block_ok, compressok, 1122 outfd, resumeobj, resumeoff, fp, off); 1123 } else { 1124 err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE, 1125 embedok, large_block_ok, compressok, 1126 outfd, resumeobj, resumeoff, fp, off); 1127 } 1128 if (owned) 1129 dsl_dataset_disown(ds, FTAG); 1130 else 1131 dsl_dataset_rele(ds, FTAG); 1132 return (err); 1133} 1134 1135static int 1136dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed, 1137 uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep) 1138{ 1139 int err = 0; 1140 uint64_t size; 1141 /* 1142 * Assume that space (both on-disk and in-stream) is dominated by 1143 * data. We will adjust for indirect blocks and the copies property, 1144 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records). 1145 */ 1146 uint64_t recordsize; 1147 uint64_t record_count; 1148 objset_t *os; 1149 VERIFY0(dmu_objset_from_ds(ds, &os)); 1150 1151 /* Assume all (uncompressed) blocks are recordsize. */ 1152 if (zfs_override_estimate_recordsize != 0) { 1153 recordsize = zfs_override_estimate_recordsize; 1154 } else if (os->os_phys->os_type == DMU_OST_ZVOL) { 1155 err = dsl_prop_get_int_ds(ds, 1156 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize); 1157 } else { 1158 err = dsl_prop_get_int_ds(ds, 1159 zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize); 1160 } 1161 if (err != 0) 1162 return (err); 1163 record_count = uncompressed / recordsize; 1164 1165 /* 1166 * If we're estimating a send size for a compressed stream, use the 1167 * compressed data size to estimate the stream size. Otherwise, use the 1168 * uncompressed data size. 1169 */ 1170 size = stream_compressed ? compressed : uncompressed; 1171 1172 /* 1173 * Subtract out approximate space used by indirect blocks. 1174 * Assume most space is used by data blocks (non-indirect, non-dnode). 1175 * Assume no ditto blocks or internal fragmentation. 1176 * 1177 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per 1178 * block. 1179 */ 1180 size -= record_count * sizeof (blkptr_t); 1181 1182 /* Add in the space for the record associated with each block. */ 1183 size += record_count * sizeof (dmu_replay_record_t); 1184 1185 *sizep = size; 1186 1187 return (0); 1188} 1189 1190int 1191dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, 1192 boolean_t stream_compressed, uint64_t *sizep) 1193{ 1194 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1195 int err; 1196 uint64_t uncomp, comp; 1197 1198 ASSERT(dsl_pool_config_held(dp)); 1199 1200 /* tosnap must be a snapshot */ 1201 if (!ds->ds_is_snapshot) 1202 return (SET_ERROR(EINVAL)); 1203 1204 /* fromsnap, if provided, must be a snapshot */ 1205 if (fromds != NULL && !fromds->ds_is_snapshot) 1206 return (SET_ERROR(EINVAL)); 1207 1208 /* 1209 * fromsnap must be an earlier snapshot from the same fs as tosnap, 1210 * or the origin's fs. 1211 */ 1212 if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0)) 1213 return (SET_ERROR(EXDEV)); 1214 1215 /* Get compressed and uncompressed size estimates of changed data. */ 1216 if (fromds == NULL) { 1217 uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes; 1218 comp = dsl_dataset_phys(ds)->ds_compressed_bytes; 1219 } else { 1220 uint64_t used; 1221 err = dsl_dataset_space_written(fromds, ds, 1222 &used, &comp, &uncomp); 1223 if (err != 0) 1224 return (err); 1225 } 1226 1227 err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp, 1228 stream_compressed, sizep); 1229 /* 1230 * Add the size of the BEGIN and END records to the estimate. 1231 */ 1232 *sizep += 2 * sizeof (dmu_replay_record_t); 1233 return (err); 1234} 1235 1236struct calculate_send_arg { 1237 uint64_t uncompressed; 1238 uint64_t compressed; 1239}; 1240 1241/* 1242 * Simple callback used to traverse the blocks of a snapshot and sum their 1243 * uncompressed and compressed sizes. 1244 */ 1245/* ARGSUSED */ 1246static int 1247dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, 1248 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg) 1249{ 1250 struct calculate_send_arg *space = arg; 1251 if (bp != NULL && !BP_IS_HOLE(bp)) { 1252 space->uncompressed += BP_GET_UCSIZE(bp); 1253 space->compressed += BP_GET_PSIZE(bp); 1254 } 1255 return (0); 1256} 1257 1258/* 1259 * Given a desination snapshot and a TXG, calculate the approximate size of a 1260 * send stream sent from that TXG. from_txg may be zero, indicating that the 1261 * whole snapshot will be sent. 1262 */ 1263int 1264dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg, 1265 boolean_t stream_compressed, uint64_t *sizep) 1266{ 1267 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1268 int err; 1269 struct calculate_send_arg size = { 0 }; 1270 1271 ASSERT(dsl_pool_config_held(dp)); 1272 1273 /* tosnap must be a snapshot */ 1274 if (!ds->ds_is_snapshot) 1275 return (SET_ERROR(EINVAL)); 1276 1277 /* verify that from_txg is before the provided snapshot was taken */ 1278 if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) { 1279 return (SET_ERROR(EXDEV)); 1280 } 1281 1282 /* 1283 * traverse the blocks of the snapshot with birth times after 1284 * from_txg, summing their uncompressed size 1285 */ 1286 err = traverse_dataset(ds, from_txg, TRAVERSE_POST, 1287 dmu_calculate_send_traversal, &size); 1288 if (err) 1289 return (err); 1290 1291 err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed, 1292 size.compressed, stream_compressed, sizep); 1293 return (err); 1294} 1295 1296typedef struct dmu_recv_begin_arg { 1297 const char *drba_origin; 1298 dmu_recv_cookie_t *drba_cookie; 1299 cred_t *drba_cred; 1300 uint64_t drba_snapobj; 1301} dmu_recv_begin_arg_t; 1302 1303static int 1304recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds, 1305 uint64_t fromguid) 1306{ 1307 uint64_t val; 1308 int error; 1309 dsl_pool_t *dp = ds->ds_dir->dd_pool; 1310 1311 /* Temporary clone name must not exist. */ 1312 error = zap_lookup(dp->dp_meta_objset, 1313 dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name, 1314 8, 1, &val); 1315 if (error != ENOENT) 1316 return (error == 0 ? SET_ERROR(EBUSY) : error); 1317 1318 /* Resume state must not be set. */ 1319 if (dsl_dataset_has_resume_receive_state(ds)) 1320 return (SET_ERROR(EBUSY)); 1321 1322 /* New snapshot name must not exist. */ 1323 error = zap_lookup(dp->dp_meta_objset, 1324 dsl_dataset_phys(ds)->ds_snapnames_zapobj, 1325 drba->drba_cookie->drc_tosnap, 8, 1, &val); 1326 if (error != ENOENT) 1327 return (error == 0 ? SET_ERROR(EEXIST) : error); 1328 1329 /* 1330 * Check snapshot limit before receiving. We'll recheck again at the 1331 * end, but might as well abort before receiving if we're already over 1332 * the limit. 1333 * 1334 * Note that we do not check the file system limit with 1335 * dsl_dir_fscount_check because the temporary %clones don't count 1336 * against that limit. 1337 */ 1338 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT, 1339 NULL, drba->drba_cred); 1340 if (error != 0) 1341 return (error); 1342 1343 if (fromguid != 0) { 1344 dsl_dataset_t *snap; 1345 uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj; 1346 1347 /* Find snapshot in this dir that matches fromguid. */ 1348 while (obj != 0) { 1349 error = dsl_dataset_hold_obj(dp, obj, FTAG, 1350 &snap); 1351 if (error != 0) 1352 return (SET_ERROR(ENODEV)); 1353 if (snap->ds_dir != ds->ds_dir) { 1354 dsl_dataset_rele(snap, FTAG); 1355 return (SET_ERROR(ENODEV)); 1356 } 1357 if (dsl_dataset_phys(snap)->ds_guid == fromguid) 1358 break; 1359 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 1360 dsl_dataset_rele(snap, FTAG); 1361 } 1362 if (obj == 0) 1363 return (SET_ERROR(ENODEV)); 1364 1365 if (drba->drba_cookie->drc_force) { 1366 drba->drba_snapobj = obj; 1367 } else { 1368 /* 1369 * If we are not forcing, there must be no 1370 * changes since fromsnap. 1371 */ 1372 if (dsl_dataset_modified_since_snap(ds, snap)) { 1373 dsl_dataset_rele(snap, FTAG); 1374 return (SET_ERROR(ETXTBSY)); 1375 } 1376 drba->drba_snapobj = ds->ds_prev->ds_object; 1377 } 1378 1379 dsl_dataset_rele(snap, FTAG); 1380 } else { 1381 /* if full, then must be forced */ 1382 if (!drba->drba_cookie->drc_force) 1383 return (SET_ERROR(EEXIST)); 1384 /* start from $ORIGIN@$ORIGIN, if supported */ 1385 drba->drba_snapobj = dp->dp_origin_snap != NULL ? 1386 dp->dp_origin_snap->ds_object : 0; 1387 } 1388 1389 return (0); 1390 1391} 1392 1393static int 1394dmu_recv_begin_check(void *arg, dmu_tx_t *tx) 1395{ 1396 dmu_recv_begin_arg_t *drba = arg; 1397 dsl_pool_t *dp = dmu_tx_pool(tx); 1398 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1399 uint64_t fromguid = drrb->drr_fromguid; 1400 int flags = drrb->drr_flags; 1401 int error; 1402 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1403 dsl_dataset_t *ds; 1404 const char *tofs = drba->drba_cookie->drc_tofs; 1405 1406 /* already checked */ 1407 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1408 ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING)); 1409 1410 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1411 DMU_COMPOUNDSTREAM || 1412 drrb->drr_type >= DMU_OST_NUMTYPES || 1413 ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL)) 1414 return (SET_ERROR(EINVAL)); 1415 1416 /* Verify pool version supports SA if SA_SPILL feature set */ 1417 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1418 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1419 return (SET_ERROR(ENOTSUP)); 1420 1421 if (drba->drba_cookie->drc_resumable && 1422 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET)) 1423 return (SET_ERROR(ENOTSUP)); 1424 1425 /* 1426 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1427 * record to a plain WRITE record, so the pool must have the 1428 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1429 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1430 */ 1431 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1432 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1433 return (SET_ERROR(ENOTSUP)); 1434 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && 1435 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1436 return (SET_ERROR(ENOTSUP)); 1437 1438 /* 1439 * The receiving code doesn't know how to translate large blocks 1440 * to smaller ones, so the pool must have the LARGE_BLOCKS 1441 * feature enabled if the stream has LARGE_BLOCKS. 1442 */ 1443 if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) && 1444 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS)) 1445 return (SET_ERROR(ENOTSUP)); 1446 1447 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1448 if (error == 0) { 1449 /* target fs already exists; recv into temp clone */ 1450 1451 /* Can't recv a clone into an existing fs */ 1452 if (flags & DRR_FLAG_CLONE || drba->drba_origin) { 1453 dsl_dataset_rele(ds, FTAG); 1454 return (SET_ERROR(EINVAL)); 1455 } 1456 1457 error = recv_begin_check_existing_impl(drba, ds, fromguid); 1458 dsl_dataset_rele(ds, FTAG); 1459 } else if (error == ENOENT) { 1460 /* target fs does not exist; must be a full backup or clone */ 1461 char buf[ZFS_MAX_DATASET_NAME_LEN]; 1462 1463 /* 1464 * If it's a non-clone incremental, we are missing the 1465 * target fs, so fail the recv. 1466 */ 1467 if (fromguid != 0 && !(flags & DRR_FLAG_CLONE || 1468 drba->drba_origin)) 1469 return (SET_ERROR(ENOENT)); 1470 1471 /* 1472 * If we're receiving a full send as a clone, and it doesn't 1473 * contain all the necessary free records and freeobject 1474 * records, reject it. 1475 */ 1476 if (fromguid == 0 && drba->drba_origin && 1477 !(flags & DRR_FLAG_FREERECORDS)) 1478 return (SET_ERROR(EINVAL)); 1479 1480 /* Open the parent of tofs */ 1481 ASSERT3U(strlen(tofs), <, sizeof (buf)); 1482 (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1); 1483 error = dsl_dataset_hold(dp, buf, FTAG, &ds); 1484 if (error != 0) 1485 return (error); 1486 1487 /* 1488 * Check filesystem and snapshot limits before receiving. We'll 1489 * recheck snapshot limits again at the end (we create the 1490 * filesystems and increment those counts during begin_sync). 1491 */ 1492 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1493 ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred); 1494 if (error != 0) { 1495 dsl_dataset_rele(ds, FTAG); 1496 return (error); 1497 } 1498 1499 error = dsl_fs_ss_limit_check(ds->ds_dir, 1, 1500 ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred); 1501 if (error != 0) { 1502 dsl_dataset_rele(ds, FTAG); 1503 return (error); 1504 } 1505 1506 if (drba->drba_origin != NULL) { 1507 dsl_dataset_t *origin; 1508 error = dsl_dataset_hold(dp, drba->drba_origin, 1509 FTAG, &origin); 1510 if (error != 0) { 1511 dsl_dataset_rele(ds, FTAG); 1512 return (error); 1513 } 1514 if (!origin->ds_is_snapshot) { 1515 dsl_dataset_rele(origin, FTAG); 1516 dsl_dataset_rele(ds, FTAG); 1517 return (SET_ERROR(EINVAL)); 1518 } 1519 if (dsl_dataset_phys(origin)->ds_guid != fromguid && 1520 fromguid != 0) { 1521 dsl_dataset_rele(origin, FTAG); 1522 dsl_dataset_rele(ds, FTAG); 1523 return (SET_ERROR(ENODEV)); 1524 } 1525 dsl_dataset_rele(origin, FTAG); 1526 } 1527 dsl_dataset_rele(ds, FTAG); 1528 error = 0; 1529 } 1530 return (error); 1531} 1532 1533static void 1534dmu_recv_begin_sync(void *arg, dmu_tx_t *tx) 1535{ 1536 dmu_recv_begin_arg_t *drba = arg; 1537 dsl_pool_t *dp = dmu_tx_pool(tx); 1538 objset_t *mos = dp->dp_meta_objset; 1539 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1540 const char *tofs = drba->drba_cookie->drc_tofs; 1541 dsl_dataset_t *ds, *newds; 1542 uint64_t dsobj; 1543 int error; 1544 uint64_t crflags = 0; 1545 1546 if (drrb->drr_flags & DRR_FLAG_CI_DATA) 1547 crflags |= DS_FLAG_CI_DATASET; 1548 1549 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1550 if (error == 0) { 1551 /* create temporary clone */ 1552 dsl_dataset_t *snap = NULL; 1553 if (drba->drba_snapobj != 0) { 1554 VERIFY0(dsl_dataset_hold_obj(dp, 1555 drba->drba_snapobj, FTAG, &snap)); 1556 } 1557 dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name, 1558 snap, crflags, drba->drba_cred, tx); 1559 if (drba->drba_snapobj != 0) 1560 dsl_dataset_rele(snap, FTAG); 1561 dsl_dataset_rele(ds, FTAG); 1562 } else { 1563 dsl_dir_t *dd; 1564 const char *tail; 1565 dsl_dataset_t *origin = NULL; 1566 1567 VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail)); 1568 1569 if (drba->drba_origin != NULL) { 1570 VERIFY0(dsl_dataset_hold(dp, drba->drba_origin, 1571 FTAG, &origin)); 1572 } 1573 1574 /* Create new dataset. */ 1575 dsobj = dsl_dataset_create_sync(dd, 1576 strrchr(tofs, '/') + 1, 1577 origin, crflags, drba->drba_cred, tx); 1578 if (origin != NULL) 1579 dsl_dataset_rele(origin, FTAG); 1580 dsl_dir_rele(dd, FTAG); 1581 drba->drba_cookie->drc_newfs = B_TRUE; 1582 } 1583 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds)); 1584 1585 if (drba->drba_cookie->drc_resumable) { 1586 dsl_dataset_zapify(newds, tx); 1587 if (drrb->drr_fromguid != 0) { 1588 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID, 1589 8, 1, &drrb->drr_fromguid, tx)); 1590 } 1591 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID, 1592 8, 1, &drrb->drr_toguid, tx)); 1593 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME, 1594 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx)); 1595 uint64_t one = 1; 1596 uint64_t zero = 0; 1597 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT, 1598 8, 1, &one, tx)); 1599 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET, 1600 8, 1, &zero, tx)); 1601 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES, 1602 8, 1, &zero, tx)); 1603 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1604 DMU_BACKUP_FEATURE_LARGE_BLOCKS) { 1605 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK, 1606 8, 1, &one, tx)); 1607 } 1608 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1609 DMU_BACKUP_FEATURE_EMBED_DATA) { 1610 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK, 1611 8, 1, &one, tx)); 1612 } 1613 if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) & 1614 DMU_BACKUP_FEATURE_COMPRESSED) { 1615 VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK, 1616 8, 1, &one, tx)); 1617 } 1618 } 1619 1620 dmu_buf_will_dirty(newds->ds_dbuf, tx); 1621 dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT; 1622 1623 /* 1624 * If we actually created a non-clone, we need to create the 1625 * objset in our new dataset. 1626 */ 1627 rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG); 1628 if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) { 1629 (void) dmu_objset_create_impl(dp->dp_spa, 1630 newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx); 1631 } 1632 rrw_exit(&newds->ds_bp_rwlock, FTAG); 1633 1634 drba->drba_cookie->drc_ds = newds; 1635 1636 spa_history_log_internal_ds(newds, "receive", tx, ""); 1637} 1638 1639static int 1640dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx) 1641{ 1642 dmu_recv_begin_arg_t *drba = arg; 1643 dsl_pool_t *dp = dmu_tx_pool(tx); 1644 struct drr_begin *drrb = drba->drba_cookie->drc_drrb; 1645 int error; 1646 uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo); 1647 dsl_dataset_t *ds; 1648 const char *tofs = drba->drba_cookie->drc_tofs; 1649 1650 /* already checked */ 1651 ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC); 1652 ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING); 1653 1654 if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == 1655 DMU_COMPOUNDSTREAM || 1656 drrb->drr_type >= DMU_OST_NUMTYPES) 1657 return (SET_ERROR(EINVAL)); 1658 1659 /* Verify pool version supports SA if SA_SPILL feature set */ 1660 if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) && 1661 spa_version(dp->dp_spa) < SPA_VERSION_SA) 1662 return (SET_ERROR(ENOTSUP)); 1663 1664 /* 1665 * The receiving code doesn't know how to translate a WRITE_EMBEDDED 1666 * record to a plain WRITE record, so the pool must have the 1667 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED 1668 * records. Same with WRITE_EMBEDDED records that use LZ4 compression. 1669 */ 1670 if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) && 1671 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) 1672 return (SET_ERROR(ENOTSUP)); 1673 if ((featureflags & DMU_BACKUP_FEATURE_LZ4) && 1674 !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) 1675 return (SET_ERROR(ENOTSUP)); 1676 1677 /* 6 extra bytes for /%recv */ 1678 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1679 1680 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1681 tofs, recv_clone_name); 1682 1683 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1684 /* %recv does not exist; continue in tofs */ 1685 error = dsl_dataset_hold(dp, tofs, FTAG, &ds); 1686 if (error != 0) 1687 return (error); 1688 } 1689 1690 /* check that ds is marked inconsistent */ 1691 if (!DS_IS_INCONSISTENT(ds)) { 1692 dsl_dataset_rele(ds, FTAG); 1693 return (SET_ERROR(EINVAL)); 1694 } 1695 1696 /* check that there is resuming data, and that the toguid matches */ 1697 if (!dsl_dataset_is_zapified(ds)) { 1698 dsl_dataset_rele(ds, FTAG); 1699 return (SET_ERROR(EINVAL)); 1700 } 1701 uint64_t val; 1702 error = zap_lookup(dp->dp_meta_objset, ds->ds_object, 1703 DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val); 1704 if (error != 0 || drrb->drr_toguid != val) { 1705 dsl_dataset_rele(ds, FTAG); 1706 return (SET_ERROR(EINVAL)); 1707 } 1708 1709 /* 1710 * Check if the receive is still running. If so, it will be owned. 1711 * Note that nothing else can own the dataset (e.g. after the receive 1712 * fails) because it will be marked inconsistent. 1713 */ 1714 if (dsl_dataset_has_owner(ds)) { 1715 dsl_dataset_rele(ds, FTAG); 1716 return (SET_ERROR(EBUSY)); 1717 } 1718 1719 /* There should not be any snapshots of this fs yet. */ 1720 if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) { 1721 dsl_dataset_rele(ds, FTAG); 1722 return (SET_ERROR(EINVAL)); 1723 } 1724 1725 /* 1726 * Note: resume point will be checked when we process the first WRITE 1727 * record. 1728 */ 1729 1730 /* check that the origin matches */ 1731 val = 0; 1732 (void) zap_lookup(dp->dp_meta_objset, ds->ds_object, 1733 DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val); 1734 if (drrb->drr_fromguid != val) { 1735 dsl_dataset_rele(ds, FTAG); 1736 return (SET_ERROR(EINVAL)); 1737 } 1738 1739 dsl_dataset_rele(ds, FTAG); 1740 return (0); 1741} 1742 1743static void 1744dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx) 1745{ 1746 dmu_recv_begin_arg_t *drba = arg; 1747 dsl_pool_t *dp = dmu_tx_pool(tx); 1748 const char *tofs = drba->drba_cookie->drc_tofs; 1749 dsl_dataset_t *ds; 1750 uint64_t dsobj; 1751 /* 6 extra bytes for /%recv */ 1752 char recvname[ZFS_MAX_DATASET_NAME_LEN + 6]; 1753 1754 (void) snprintf(recvname, sizeof (recvname), "%s/%s", 1755 tofs, recv_clone_name); 1756 1757 if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) { 1758 /* %recv does not exist; continue in tofs */ 1759 VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds)); 1760 drba->drba_cookie->drc_newfs = B_TRUE; 1761 } 1762 1763 /* clear the inconsistent flag so that we can own it */ 1764 ASSERT(DS_IS_INCONSISTENT(ds)); 1765 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1766 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 1767 dsobj = ds->ds_object; 1768 dsl_dataset_rele(ds, FTAG); 1769 1770 VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds)); 1771 1772 dmu_buf_will_dirty(ds->ds_dbuf, tx); 1773 dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT; 1774 1775 rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); 1776 ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds))); 1777 rrw_exit(&ds->ds_bp_rwlock, FTAG); 1778 1779 drba->drba_cookie->drc_ds = ds; 1780 1781 spa_history_log_internal_ds(ds, "resume receive", tx, ""); 1782} 1783 1784/* 1785 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() 1786 * succeeds; otherwise we will leak the holds on the datasets. 1787 */ 1788int 1789dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin, 1790 boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc) 1791{ 1792 dmu_recv_begin_arg_t drba = { 0 }; 1793 1794 bzero(drc, sizeof (dmu_recv_cookie_t)); 1795 drc->drc_drr_begin = drr_begin; 1796 drc->drc_drrb = &drr_begin->drr_u.drr_begin; 1797 drc->drc_tosnap = tosnap; 1798 drc->drc_tofs = tofs; 1799 drc->drc_force = force; 1800 drc->drc_resumable = resumable; 1801 drc->drc_cred = CRED(); 1802 drc->drc_clone = (origin != NULL); 1803 1804 if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) { 1805 drc->drc_byteswap = B_TRUE; 1806 (void) fletcher_4_incremental_byteswap(drr_begin, 1807 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1808 byteswap_record(drr_begin); 1809 } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) { 1810 (void) fletcher_4_incremental_native(drr_begin, 1811 sizeof (dmu_replay_record_t), &drc->drc_cksum); 1812 } else { 1813 return (SET_ERROR(EINVAL)); 1814 } 1815 1816 drba.drba_origin = origin; 1817 drba.drba_cookie = drc; 1818 drba.drba_cred = CRED(); 1819 1820 if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) & 1821 DMU_BACKUP_FEATURE_RESUMING) { 1822 return (dsl_sync_task(tofs, 1823 dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync, 1824 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1825 } else { 1826 return (dsl_sync_task(tofs, 1827 dmu_recv_begin_check, dmu_recv_begin_sync, 1828 &drba, 5, ZFS_SPACE_CHECK_NORMAL)); 1829 } 1830} 1831 1832struct receive_record_arg { 1833 dmu_replay_record_t header; 1834 void *payload; /* Pointer to a buffer containing the payload */ 1835 /* 1836 * If the record is a write, pointer to the arc_buf_t containing the 1837 * payload. 1838 */ 1839 arc_buf_t *write_buf; 1840 int payload_size; 1841 uint64_t bytes_read; /* bytes read from stream when record created */ 1842 boolean_t eos_marker; /* Marks the end of the stream */ 1843 bqueue_node_t node; 1844}; 1845 1846struct receive_writer_arg { 1847 objset_t *os; 1848 boolean_t byteswap; 1849 bqueue_t q; 1850 1851 /* 1852 * These three args are used to signal to the main thread that we're 1853 * done. 1854 */ 1855 kmutex_t mutex; 1856 kcondvar_t cv; 1857 boolean_t done; 1858 1859 int err; 1860 /* A map from guid to dataset to help handle dedup'd streams. */ 1861 avl_tree_t *guid_to_ds_map; 1862 boolean_t resumable; 1863 uint64_t last_object; 1864 uint64_t last_offset; 1865 uint64_t max_object; /* highest object ID referenced in stream */ 1866 uint64_t bytes_read; /* bytes read when current record created */ 1867}; 1868 1869struct objlist { 1870 list_t list; /* List of struct receive_objnode. */ 1871 /* 1872 * Last object looked up. Used to assert that objects are being looked 1873 * up in ascending order. 1874 */ 1875 uint64_t last_lookup; 1876}; 1877 1878struct receive_objnode { 1879 list_node_t node; 1880 uint64_t object; 1881}; 1882 1883struct receive_arg { 1884 objset_t *os; 1885 kthread_t *td; 1886 struct file *fp; 1887 uint64_t voff; /* The current offset in the stream */ 1888 uint64_t bytes_read; 1889 /* 1890 * A record that has had its payload read in, but hasn't yet been handed 1891 * off to the worker thread. 1892 */ 1893 struct receive_record_arg *rrd; 1894 /* A record that has had its header read in, but not its payload. */ 1895 struct receive_record_arg *next_rrd; 1896 zio_cksum_t cksum; 1897 zio_cksum_t prev_cksum; 1898 int err; 1899 boolean_t byteswap; 1900 /* Sorted list of objects not to issue prefetches for. */ 1901 struct objlist ignore_objlist; 1902}; 1903 1904typedef struct guid_map_entry { 1905 uint64_t guid; 1906 dsl_dataset_t *gme_ds; 1907 avl_node_t avlnode; 1908} guid_map_entry_t; 1909 1910static int 1911guid_compare(const void *arg1, const void *arg2) 1912{ 1913 const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1; 1914 const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2; 1915 1916 return (AVL_CMP(gmep1->guid, gmep2->guid)); 1917} 1918 1919static void 1920free_guid_map_onexit(void *arg) 1921{ 1922 avl_tree_t *ca = arg; 1923 void *cookie = NULL; 1924 guid_map_entry_t *gmep; 1925 1926 while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) { 1927 dsl_dataset_long_rele(gmep->gme_ds, gmep); 1928 dsl_dataset_rele(gmep->gme_ds, gmep); 1929 kmem_free(gmep, sizeof (guid_map_entry_t)); 1930 } 1931 avl_destroy(ca); 1932 kmem_free(ca, sizeof (avl_tree_t)); 1933} 1934 1935static int 1936restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid) 1937{ 1938 struct uio auio; 1939 struct iovec aiov; 1940 int error; 1941 1942 aiov.iov_base = buf; 1943 aiov.iov_len = len; 1944 auio.uio_iov = &aiov; 1945 auio.uio_iovcnt = 1; 1946 auio.uio_resid = len; 1947 auio.uio_segflg = UIO_SYSSPACE; 1948 auio.uio_rw = UIO_READ; 1949 auio.uio_offset = off; 1950 auio.uio_td = ra->td; 1951#ifdef _KERNEL 1952 error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td); 1953#else 1954 fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__); 1955 error = EOPNOTSUPP; 1956#endif 1957 *resid = auio.uio_resid; 1958 return (error); 1959} 1960 1961static int 1962receive_read(struct receive_arg *ra, int len, void *buf) 1963{ 1964 int done = 0; 1965 1966 /* 1967 * The code doesn't rely on this (lengths being multiples of 8). See 1968 * comment in dump_bytes. 1969 */ 1970 ASSERT0(len % 8); 1971 1972 while (done < len) { 1973 ssize_t resid; 1974 1975 ra->err = restore_bytes(ra, buf + done, 1976 len - done, ra->voff, &resid); 1977 1978 if (resid == len - done) { 1979 /* 1980 * Note: ECKSUM indicates that the receive 1981 * was interrupted and can potentially be resumed. 1982 */ 1983 ra->err = SET_ERROR(ECKSUM); 1984 } 1985 ra->voff += len - done - resid; 1986 done = len - resid; 1987 if (ra->err != 0) 1988 return (ra->err); 1989 } 1990 1991 ra->bytes_read += len; 1992 1993 ASSERT3U(done, ==, len); 1994 return (0); 1995} 1996 1997static void 1998byteswap_record(dmu_replay_record_t *drr) 1999{ 2000#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X)) 2001#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X)) 2002 drr->drr_type = BSWAP_32(drr->drr_type); 2003 drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen); 2004 2005 switch (drr->drr_type) { 2006 case DRR_BEGIN: 2007 DO64(drr_begin.drr_magic); 2008 DO64(drr_begin.drr_versioninfo); 2009 DO64(drr_begin.drr_creation_time); 2010 DO32(drr_begin.drr_type); 2011 DO32(drr_begin.drr_flags); 2012 DO64(drr_begin.drr_toguid); 2013 DO64(drr_begin.drr_fromguid); 2014 break; 2015 case DRR_OBJECT: 2016 DO64(drr_object.drr_object); 2017 DO32(drr_object.drr_type); 2018 DO32(drr_object.drr_bonustype); 2019 DO32(drr_object.drr_blksz); 2020 DO32(drr_object.drr_bonuslen); 2021 DO64(drr_object.drr_toguid); 2022 break; 2023 case DRR_FREEOBJECTS: 2024 DO64(drr_freeobjects.drr_firstobj); 2025 DO64(drr_freeobjects.drr_numobjs); 2026 DO64(drr_freeobjects.drr_toguid); 2027 break; 2028 case DRR_WRITE: 2029 DO64(drr_write.drr_object); 2030 DO32(drr_write.drr_type); 2031 DO64(drr_write.drr_offset); 2032 DO64(drr_write.drr_logical_size); 2033 DO64(drr_write.drr_toguid); 2034 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum); 2035 DO64(drr_write.drr_key.ddk_prop); 2036 DO64(drr_write.drr_compressed_size); 2037 break; 2038 case DRR_WRITE_BYREF: 2039 DO64(drr_write_byref.drr_object); 2040 DO64(drr_write_byref.drr_offset); 2041 DO64(drr_write_byref.drr_length); 2042 DO64(drr_write_byref.drr_toguid); 2043 DO64(drr_write_byref.drr_refguid); 2044 DO64(drr_write_byref.drr_refobject); 2045 DO64(drr_write_byref.drr_refoffset); 2046 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref. 2047 drr_key.ddk_cksum); 2048 DO64(drr_write_byref.drr_key.ddk_prop); 2049 break; 2050 case DRR_WRITE_EMBEDDED: 2051 DO64(drr_write_embedded.drr_object); 2052 DO64(drr_write_embedded.drr_offset); 2053 DO64(drr_write_embedded.drr_length); 2054 DO64(drr_write_embedded.drr_toguid); 2055 DO32(drr_write_embedded.drr_lsize); 2056 DO32(drr_write_embedded.drr_psize); 2057 break; 2058 case DRR_FREE: 2059 DO64(drr_free.drr_object); 2060 DO64(drr_free.drr_offset); 2061 DO64(drr_free.drr_length); 2062 DO64(drr_free.drr_toguid); 2063 break; 2064 case DRR_SPILL: 2065 DO64(drr_spill.drr_object); 2066 DO64(drr_spill.drr_length); 2067 DO64(drr_spill.drr_toguid); 2068 break; 2069 case DRR_END: 2070 DO64(drr_end.drr_toguid); 2071 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum); 2072 break; 2073 } 2074 2075 if (drr->drr_type != DRR_BEGIN) { 2076 ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum); 2077 } 2078 2079#undef DO64 2080#undef DO32 2081} 2082 2083static inline uint8_t 2084deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size) 2085{ 2086 if (bonus_type == DMU_OT_SA) { 2087 return (1); 2088 } else { 2089 return (1 + 2090 ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT)); 2091 } 2092} 2093 2094static void 2095save_resume_state(struct receive_writer_arg *rwa, 2096 uint64_t object, uint64_t offset, dmu_tx_t *tx) 2097{ 2098 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; 2099 2100 if (!rwa->resumable) 2101 return; 2102 2103 /* 2104 * We use ds_resume_bytes[] != 0 to indicate that we need to 2105 * update this on disk, so it must not be 0. 2106 */ 2107 ASSERT(rwa->bytes_read != 0); 2108 2109 /* 2110 * We only resume from write records, which have a valid 2111 * (non-meta-dnode) object number. 2112 */ 2113 ASSERT(object != 0); 2114 2115 /* 2116 * For resuming to work correctly, we must receive records in order, 2117 * sorted by object,offset. This is checked by the callers, but 2118 * assert it here for good measure. 2119 */ 2120 ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]); 2121 ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] || 2122 offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]); 2123 ASSERT3U(rwa->bytes_read, >=, 2124 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]); 2125 2126 rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object; 2127 rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset; 2128 rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read; 2129} 2130 2131static int 2132receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, 2133 void *data) 2134{ 2135 dmu_object_info_t doi; 2136 dmu_tx_t *tx; 2137 uint64_t object; 2138 int err; 2139 2140 if (drro->drr_type == DMU_OT_NONE || 2141 !DMU_OT_IS_VALID(drro->drr_type) || 2142 !DMU_OT_IS_VALID(drro->drr_bonustype) || 2143 drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS || 2144 drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS || 2145 P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) || 2146 drro->drr_blksz < SPA_MINBLOCKSIZE || 2147 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) || 2148 drro->drr_bonuslen > DN_MAX_BONUSLEN) { 2149 return (SET_ERROR(EINVAL)); 2150 } 2151 2152 err = dmu_object_info(rwa->os, drro->drr_object, &doi); 2153 2154 if (err != 0 && err != ENOENT) 2155 return (SET_ERROR(EINVAL)); 2156 object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT; 2157 2158 if (drro->drr_object > rwa->max_object) 2159 rwa->max_object = drro->drr_object; 2160 2161 /* 2162 * If we are losing blkptrs or changing the block size this must 2163 * be a new file instance. We must clear out the previous file 2164 * contents before we can change this type of metadata in the dnode. 2165 */ 2166 if (err == 0) { 2167 int nblkptr; 2168 2169 nblkptr = deduce_nblkptr(drro->drr_bonustype, 2170 drro->drr_bonuslen); 2171 2172 if (drro->drr_blksz != doi.doi_data_block_size || 2173 nblkptr < doi.doi_nblkptr) { 2174 err = dmu_free_long_range(rwa->os, drro->drr_object, 2175 0, DMU_OBJECT_END); 2176 if (err != 0) 2177 return (SET_ERROR(EINVAL)); 2178 } 2179 } 2180 2181 tx = dmu_tx_create(rwa->os); 2182 dmu_tx_hold_bonus(tx, object); 2183 err = dmu_tx_assign(tx, TXG_WAIT); 2184 if (err != 0) { 2185 dmu_tx_abort(tx); 2186 return (err); 2187 } 2188 2189 if (object == DMU_NEW_OBJECT) { 2190 /* currently free, want to be allocated */ 2191 err = dmu_object_claim(rwa->os, drro->drr_object, 2192 drro->drr_type, drro->drr_blksz, 2193 drro->drr_bonustype, drro->drr_bonuslen, tx); 2194 } else if (drro->drr_type != doi.doi_type || 2195 drro->drr_blksz != doi.doi_data_block_size || 2196 drro->drr_bonustype != doi.doi_bonus_type || 2197 drro->drr_bonuslen != doi.doi_bonus_size) { 2198 /* currently allocated, but with different properties */ 2199 err = dmu_object_reclaim(rwa->os, drro->drr_object, 2200 drro->drr_type, drro->drr_blksz, 2201 drro->drr_bonustype, drro->drr_bonuslen, tx); 2202 } 2203 if (err != 0) { 2204 dmu_tx_commit(tx); 2205 return (SET_ERROR(EINVAL)); 2206 } 2207 2208 dmu_object_set_checksum(rwa->os, drro->drr_object, 2209 drro->drr_checksumtype, tx); 2210 dmu_object_set_compress(rwa->os, drro->drr_object, 2211 drro->drr_compress, tx); 2212 2213 if (data != NULL) { 2214 dmu_buf_t *db; 2215 2216 VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db)); 2217 dmu_buf_will_dirty(db, tx); 2218 2219 ASSERT3U(db->db_size, >=, drro->drr_bonuslen); 2220 bcopy(data, db->db_data, drro->drr_bonuslen); 2221 if (rwa->byteswap) { 2222 dmu_object_byteswap_t byteswap = 2223 DMU_OT_BYTESWAP(drro->drr_bonustype); 2224 dmu_ot_byteswap[byteswap].ob_func(db->db_data, 2225 drro->drr_bonuslen); 2226 } 2227 dmu_buf_rele(db, FTAG); 2228 } 2229 dmu_tx_commit(tx); 2230 2231 return (0); 2232} 2233 2234/* ARGSUSED */ 2235static int 2236receive_freeobjects(struct receive_writer_arg *rwa, 2237 struct drr_freeobjects *drrfo) 2238{ 2239 uint64_t obj; 2240 int next_err = 0; 2241 2242 if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj) 2243 return (SET_ERROR(EINVAL)); 2244 2245 for (obj = drrfo->drr_firstobj; 2246 obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0; 2247 next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) { 2248 int err; 2249 2250 if (dmu_object_info(rwa->os, obj, NULL) != 0) 2251 continue; 2252 2253 err = dmu_free_long_object(rwa->os, obj); 2254 if (err != 0) 2255 return (err); 2256 2257 if (obj > rwa->max_object) 2258 rwa->max_object = obj; 2259 } 2260 if (next_err != ESRCH) 2261 return (next_err); 2262 return (0); 2263} 2264 2265static int 2266receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw, 2267 arc_buf_t *abuf) 2268{ 2269 dmu_tx_t *tx; 2270 int err; 2271 2272 if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset || 2273 !DMU_OT_IS_VALID(drrw->drr_type)) 2274 return (SET_ERROR(EINVAL)); 2275 2276 /* 2277 * For resuming to work, records must be in increasing order 2278 * by (object, offset). 2279 */ 2280 if (drrw->drr_object < rwa->last_object || 2281 (drrw->drr_object == rwa->last_object && 2282 drrw->drr_offset < rwa->last_offset)) { 2283 return (SET_ERROR(EINVAL)); 2284 } 2285 rwa->last_object = drrw->drr_object; 2286 rwa->last_offset = drrw->drr_offset; 2287 2288 if (rwa->last_object > rwa->max_object) 2289 rwa->max_object = rwa->last_object; 2290 2291 if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0) 2292 return (SET_ERROR(EINVAL)); 2293 2294 tx = dmu_tx_create(rwa->os); 2295 2296 dmu_tx_hold_write(tx, drrw->drr_object, 2297 drrw->drr_offset, drrw->drr_logical_size); 2298 err = dmu_tx_assign(tx, TXG_WAIT); 2299 if (err != 0) { 2300 dmu_tx_abort(tx); 2301 return (err); 2302 } 2303 if (rwa->byteswap) { 2304 dmu_object_byteswap_t byteswap = 2305 DMU_OT_BYTESWAP(drrw->drr_type); 2306 dmu_ot_byteswap[byteswap].ob_func(abuf->b_data, 2307 DRR_WRITE_PAYLOAD_SIZE(drrw)); 2308 } 2309 2310 /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */ 2311 dmu_buf_t *bonus; 2312 if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0) 2313 return (SET_ERROR(EINVAL)); 2314 dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx); 2315 2316 /* 2317 * Note: If the receive fails, we want the resume stream to start 2318 * with the same record that we last successfully received (as opposed 2319 * to the next record), so that we can verify that we are 2320 * resuming from the correct location. 2321 */ 2322 save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx); 2323 dmu_tx_commit(tx); 2324 dmu_buf_rele(bonus, FTAG); 2325 2326 return (0); 2327} 2328 2329/* 2330 * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed 2331 * streams to refer to a copy of the data that is already on the 2332 * system because it came in earlier in the stream. This function 2333 * finds the earlier copy of the data, and uses that copy instead of 2334 * data from the stream to fulfill this write. 2335 */ 2336static int 2337receive_write_byref(struct receive_writer_arg *rwa, 2338 struct drr_write_byref *drrwbr) 2339{ 2340 dmu_tx_t *tx; 2341 int err; 2342 guid_map_entry_t gmesrch; 2343 guid_map_entry_t *gmep; 2344 avl_index_t where; 2345 objset_t *ref_os = NULL; 2346 dmu_buf_t *dbp; 2347 2348 if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset) 2349 return (SET_ERROR(EINVAL)); 2350 2351 /* 2352 * If the GUID of the referenced dataset is different from the 2353 * GUID of the target dataset, find the referenced dataset. 2354 */ 2355 if (drrwbr->drr_toguid != drrwbr->drr_refguid) { 2356 gmesrch.guid = drrwbr->drr_refguid; 2357 if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch, 2358 &where)) == NULL) { 2359 return (SET_ERROR(EINVAL)); 2360 } 2361 if (dmu_objset_from_ds(gmep->gme_ds, &ref_os)) 2362 return (SET_ERROR(EINVAL)); 2363 } else { 2364 ref_os = rwa->os; 2365 } 2366 2367 if (drrwbr->drr_object > rwa->max_object) 2368 rwa->max_object = drrwbr->drr_object; 2369 2370 err = dmu_buf_hold(ref_os, drrwbr->drr_refobject, 2371 drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH); 2372 if (err != 0) 2373 return (err); 2374 2375 tx = dmu_tx_create(rwa->os); 2376 2377 dmu_tx_hold_write(tx, drrwbr->drr_object, 2378 drrwbr->drr_offset, drrwbr->drr_length); 2379 err = dmu_tx_assign(tx, TXG_WAIT); 2380 if (err != 0) { 2381 dmu_tx_abort(tx); 2382 return (err); 2383 } 2384 dmu_write(rwa->os, drrwbr->drr_object, 2385 drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx); 2386 dmu_buf_rele(dbp, FTAG); 2387 2388 /* See comment in restore_write. */ 2389 save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx); 2390 dmu_tx_commit(tx); 2391 return (0); 2392} 2393 2394static int 2395receive_write_embedded(struct receive_writer_arg *rwa, 2396 struct drr_write_embedded *drrwe, void *data) 2397{ 2398 dmu_tx_t *tx; 2399 int err; 2400 2401 if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset) 2402 return (EINVAL); 2403 2404 if (drrwe->drr_psize > BPE_PAYLOAD_SIZE) 2405 return (EINVAL); 2406 2407 if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES) 2408 return (EINVAL); 2409 if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS) 2410 return (EINVAL); 2411 2412 if (drrwe->drr_object > rwa->max_object) 2413 rwa->max_object = drrwe->drr_object; 2414 2415 tx = dmu_tx_create(rwa->os); 2416 2417 dmu_tx_hold_write(tx, drrwe->drr_object, 2418 drrwe->drr_offset, drrwe->drr_length); 2419 err = dmu_tx_assign(tx, TXG_WAIT); 2420 if (err != 0) { 2421 dmu_tx_abort(tx); 2422 return (err); 2423 } 2424 2425 dmu_write_embedded(rwa->os, drrwe->drr_object, 2426 drrwe->drr_offset, data, drrwe->drr_etype, 2427 drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize, 2428 rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx); 2429 2430 /* See comment in restore_write. */ 2431 save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx); 2432 dmu_tx_commit(tx); 2433 return (0); 2434} 2435 2436static int 2437receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs, 2438 void *data) 2439{ 2440 dmu_tx_t *tx; 2441 dmu_buf_t *db, *db_spill; 2442 int err; 2443 2444 if (drrs->drr_length < SPA_MINBLOCKSIZE || 2445 drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os))) 2446 return (SET_ERROR(EINVAL)); 2447 2448 if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0) 2449 return (SET_ERROR(EINVAL)); 2450 2451 if (drrs->drr_object > rwa->max_object) 2452 rwa->max_object = drrs->drr_object; 2453 2454 VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db)); 2455 if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) { 2456 dmu_buf_rele(db, FTAG); 2457 return (err); 2458 } 2459 2460 tx = dmu_tx_create(rwa->os); 2461 2462 dmu_tx_hold_spill(tx, db->db_object); 2463 2464 err = dmu_tx_assign(tx, TXG_WAIT); 2465 if (err != 0) { 2466 dmu_buf_rele(db, FTAG); 2467 dmu_buf_rele(db_spill, FTAG); 2468 dmu_tx_abort(tx); 2469 return (err); 2470 } 2471 dmu_buf_will_dirty(db_spill, tx); 2472 2473 if (db_spill->db_size < drrs->drr_length) 2474 VERIFY(0 == dbuf_spill_set_blksz(db_spill, 2475 drrs->drr_length, tx)); 2476 bcopy(data, db_spill->db_data, drrs->drr_length); 2477 2478 dmu_buf_rele(db, FTAG); 2479 dmu_buf_rele(db_spill, FTAG); 2480 2481 dmu_tx_commit(tx); 2482 return (0); 2483} 2484 2485/* ARGSUSED */ 2486static int 2487receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf) 2488{ 2489 int err; 2490 2491 if (drrf->drr_length != -1ULL && 2492 drrf->drr_offset + drrf->drr_length < drrf->drr_offset) 2493 return (SET_ERROR(EINVAL)); 2494 2495 if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0) 2496 return (SET_ERROR(EINVAL)); 2497 2498 if (drrf->drr_object > rwa->max_object) 2499 rwa->max_object = drrf->drr_object; 2500 2501 err = dmu_free_long_range(rwa->os, drrf->drr_object, 2502 drrf->drr_offset, drrf->drr_length); 2503 2504 return (err); 2505} 2506 2507/* used to destroy the drc_ds on error */ 2508static void 2509dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc) 2510{ 2511 if (drc->drc_resumable) { 2512 /* wait for our resume state to be written to disk */ 2513 txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0); 2514 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2515 } else { 2516 char name[ZFS_MAX_DATASET_NAME_LEN]; 2517 dsl_dataset_name(drc->drc_ds, name); 2518 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 2519 (void) dsl_destroy_head(name); 2520 } 2521} 2522 2523static void 2524receive_cksum(struct receive_arg *ra, int len, void *buf) 2525{ 2526 if (ra->byteswap) { 2527 (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum); 2528 } else { 2529 (void) fletcher_4_incremental_native(buf, len, &ra->cksum); 2530 } 2531} 2532 2533/* 2534 * Read the payload into a buffer of size len, and update the current record's 2535 * payload field. 2536 * Allocate ra->next_rrd and read the next record's header into 2537 * ra->next_rrd->header. 2538 * Verify checksum of payload and next record. 2539 */ 2540static int 2541receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf) 2542{ 2543 int err; 2544 2545 if (len != 0) { 2546 ASSERT3U(len, <=, SPA_MAXBLOCKSIZE); 2547 err = receive_read(ra, len, buf); 2548 if (err != 0) 2549 return (err); 2550 receive_cksum(ra, len, buf); 2551 2552 /* note: rrd is NULL when reading the begin record's payload */ 2553 if (ra->rrd != NULL) { 2554 ra->rrd->payload = buf; 2555 ra->rrd->payload_size = len; 2556 ra->rrd->bytes_read = ra->bytes_read; 2557 } 2558 } 2559 2560 ra->prev_cksum = ra->cksum; 2561 2562 ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP); 2563 err = receive_read(ra, sizeof (ra->next_rrd->header), 2564 &ra->next_rrd->header); 2565 ra->next_rrd->bytes_read = ra->bytes_read; 2566 if (err != 0) { 2567 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2568 ra->next_rrd = NULL; 2569 return (err); 2570 } 2571 if (ra->next_rrd->header.drr_type == DRR_BEGIN) { 2572 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2573 ra->next_rrd = NULL; 2574 return (SET_ERROR(EINVAL)); 2575 } 2576 2577 /* 2578 * Note: checksum is of everything up to but not including the 2579 * checksum itself. 2580 */ 2581 ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2582 ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t)); 2583 receive_cksum(ra, 2584 offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), 2585 &ra->next_rrd->header); 2586 2587 zio_cksum_t cksum_orig = 2588 ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2589 zio_cksum_t *cksump = 2590 &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum; 2591 2592 if (ra->byteswap) 2593 byteswap_record(&ra->next_rrd->header); 2594 2595 if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) && 2596 !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) { 2597 kmem_free(ra->next_rrd, sizeof (*ra->next_rrd)); 2598 ra->next_rrd = NULL; 2599 return (SET_ERROR(ECKSUM)); 2600 } 2601 2602 receive_cksum(ra, sizeof (cksum_orig), &cksum_orig); 2603 2604 return (0); 2605} 2606 2607static void 2608objlist_create(struct objlist *list) 2609{ 2610 list_create(&list->list, sizeof (struct receive_objnode), 2611 offsetof(struct receive_objnode, node)); 2612 list->last_lookup = 0; 2613} 2614 2615static void 2616objlist_destroy(struct objlist *list) 2617{ 2618 for (struct receive_objnode *n = list_remove_head(&list->list); 2619 n != NULL; n = list_remove_head(&list->list)) { 2620 kmem_free(n, sizeof (*n)); 2621 } 2622 list_destroy(&list->list); 2623} 2624 2625/* 2626 * This function looks through the objlist to see if the specified object number 2627 * is contained in the objlist. In the process, it will remove all object 2628 * numbers in the list that are smaller than the specified object number. Thus, 2629 * any lookup of an object number smaller than a previously looked up object 2630 * number will always return false; therefore, all lookups should be done in 2631 * ascending order. 2632 */ 2633static boolean_t 2634objlist_exists(struct objlist *list, uint64_t object) 2635{ 2636 struct receive_objnode *node = list_head(&list->list); 2637 ASSERT3U(object, >=, list->last_lookup); 2638 list->last_lookup = object; 2639 while (node != NULL && node->object < object) { 2640 VERIFY3P(node, ==, list_remove_head(&list->list)); 2641 kmem_free(node, sizeof (*node)); 2642 node = list_head(&list->list); 2643 } 2644 return (node != NULL && node->object == object); 2645} 2646 2647/* 2648 * The objlist is a list of object numbers stored in ascending order. However, 2649 * the insertion of new object numbers does not seek out the correct location to 2650 * store a new object number; instead, it appends it to the list for simplicity. 2651 * Thus, any users must take care to only insert new object numbers in ascending 2652 * order. 2653 */ 2654static void 2655objlist_insert(struct objlist *list, uint64_t object) 2656{ 2657 struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP); 2658 node->object = object; 2659#ifdef ZFS_DEBUG 2660 struct receive_objnode *last_object = list_tail(&list->list); 2661 uint64_t last_objnum = (last_object != NULL ? last_object->object : 0); 2662 ASSERT3U(node->object, >, last_objnum); 2663#endif 2664 list_insert_tail(&list->list, node); 2665} 2666 2667/* 2668 * Issue the prefetch reads for any necessary indirect blocks. 2669 * 2670 * We use the object ignore list to tell us whether or not to issue prefetches 2671 * for a given object. We do this for both correctness (in case the blocksize 2672 * of an object has changed) and performance (if the object doesn't exist, don't 2673 * needlessly try to issue prefetches). We also trim the list as we go through 2674 * the stream to prevent it from growing to an unbounded size. 2675 * 2676 * The object numbers within will always be in sorted order, and any write 2677 * records we see will also be in sorted order, but they're not sorted with 2678 * respect to each other (i.e. we can get several object records before 2679 * receiving each object's write records). As a result, once we've reached a 2680 * given object number, we can safely remove any reference to lower object 2681 * numbers in the ignore list. In practice, we receive up to 32 object records 2682 * before receiving write records, so the list can have up to 32 nodes in it. 2683 */ 2684/* ARGSUSED */ 2685static void 2686receive_read_prefetch(struct receive_arg *ra, 2687 uint64_t object, uint64_t offset, uint64_t length) 2688{ 2689 if (!objlist_exists(&ra->ignore_objlist, object)) { 2690 dmu_prefetch(ra->os, object, 1, offset, length, 2691 ZIO_PRIORITY_SYNC_READ); 2692 } 2693} 2694 2695/* 2696 * Read records off the stream, issuing any necessary prefetches. 2697 */ 2698static int 2699receive_read_record(struct receive_arg *ra) 2700{ 2701 int err; 2702 2703 switch (ra->rrd->header.drr_type) { 2704 case DRR_OBJECT: 2705 { 2706 struct drr_object *drro = &ra->rrd->header.drr_u.drr_object; 2707 uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8); 2708 void *buf = kmem_zalloc(size, KM_SLEEP); 2709 dmu_object_info_t doi; 2710 err = receive_read_payload_and_next_header(ra, size, buf); 2711 if (err != 0) { 2712 kmem_free(buf, size); 2713 return (err); 2714 } 2715 err = dmu_object_info(ra->os, drro->drr_object, &doi); 2716 /* 2717 * See receive_read_prefetch for an explanation why we're 2718 * storing this object in the ignore_obj_list. 2719 */ 2720 if (err == ENOENT || 2721 (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { 2722 objlist_insert(&ra->ignore_objlist, drro->drr_object); 2723 err = 0; 2724 } 2725 return (err); 2726 } 2727 case DRR_FREEOBJECTS: 2728 { 2729 err = receive_read_payload_and_next_header(ra, 0, NULL); 2730 return (err); 2731 } 2732 case DRR_WRITE: 2733 { 2734 struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write; 2735 arc_buf_t *abuf; 2736 boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type); 2737 if (DRR_WRITE_COMPRESSED(drrw)) { 2738 ASSERT3U(drrw->drr_compressed_size, >, 0); 2739 ASSERT3U(drrw->drr_logical_size, >=, 2740 drrw->drr_compressed_size); 2741 ASSERT(!is_meta); 2742 abuf = arc_loan_compressed_buf( 2743 dmu_objset_spa(ra->os), 2744 drrw->drr_compressed_size, drrw->drr_logical_size, 2745 drrw->drr_compressiontype); 2746 } else { 2747 abuf = arc_loan_buf(dmu_objset_spa(ra->os), 2748 is_meta, drrw->drr_logical_size); 2749 } 2750 2751 err = receive_read_payload_and_next_header(ra, 2752 DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data); 2753 if (err != 0) { 2754 dmu_return_arcbuf(abuf); 2755 return (err); 2756 } 2757 ra->rrd->write_buf = abuf; 2758 receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset, 2759 drrw->drr_logical_size); 2760 return (err); 2761 } 2762 case DRR_WRITE_BYREF: 2763 { 2764 struct drr_write_byref *drrwb = 2765 &ra->rrd->header.drr_u.drr_write_byref; 2766 err = receive_read_payload_and_next_header(ra, 0, NULL); 2767 receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset, 2768 drrwb->drr_length); 2769 return (err); 2770 } 2771 case DRR_WRITE_EMBEDDED: 2772 { 2773 struct drr_write_embedded *drrwe = 2774 &ra->rrd->header.drr_u.drr_write_embedded; 2775 uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8); 2776 void *buf = kmem_zalloc(size, KM_SLEEP); 2777 2778 err = receive_read_payload_and_next_header(ra, size, buf); 2779 if (err != 0) { 2780 kmem_free(buf, size); 2781 return (err); 2782 } 2783 2784 receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset, 2785 drrwe->drr_length); 2786 return (err); 2787 } 2788 case DRR_FREE: 2789 { 2790 /* 2791 * It might be beneficial to prefetch indirect blocks here, but 2792 * we don't really have the data to decide for sure. 2793 */ 2794 err = receive_read_payload_and_next_header(ra, 0, NULL); 2795 return (err); 2796 } 2797 case DRR_END: 2798 { 2799 struct drr_end *drre = &ra->rrd->header.drr_u.drr_end; 2800 if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum)) 2801 return (SET_ERROR(ECKSUM)); 2802 return (0); 2803 } 2804 case DRR_SPILL: 2805 { 2806 struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill; 2807 void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP); 2808 err = receive_read_payload_and_next_header(ra, drrs->drr_length, 2809 buf); 2810 if (err != 0) 2811 kmem_free(buf, drrs->drr_length); 2812 return (err); 2813 } 2814 default: 2815 return (SET_ERROR(EINVAL)); 2816 } 2817} 2818 2819/* 2820 * Commit the records to the pool. 2821 */ 2822static int 2823receive_process_record(struct receive_writer_arg *rwa, 2824 struct receive_record_arg *rrd) 2825{ 2826 int err; 2827 2828 /* Processing in order, therefore bytes_read should be increasing. */ 2829 ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read); 2830 rwa->bytes_read = rrd->bytes_read; 2831 2832 switch (rrd->header.drr_type) { 2833 case DRR_OBJECT: 2834 { 2835 struct drr_object *drro = &rrd->header.drr_u.drr_object; 2836 err = receive_object(rwa, drro, rrd->payload); 2837 kmem_free(rrd->payload, rrd->payload_size); 2838 rrd->payload = NULL; 2839 return (err); 2840 } 2841 case DRR_FREEOBJECTS: 2842 { 2843 struct drr_freeobjects *drrfo = 2844 &rrd->header.drr_u.drr_freeobjects; 2845 return (receive_freeobjects(rwa, drrfo)); 2846 } 2847 case DRR_WRITE: 2848 { 2849 struct drr_write *drrw = &rrd->header.drr_u.drr_write; 2850 err = receive_write(rwa, drrw, rrd->write_buf); 2851 /* if receive_write() is successful, it consumes the arc_buf */ 2852 if (err != 0) 2853 dmu_return_arcbuf(rrd->write_buf); 2854 rrd->write_buf = NULL; 2855 rrd->payload = NULL; 2856 return (err); 2857 } 2858 case DRR_WRITE_BYREF: 2859 { 2860 struct drr_write_byref *drrwbr = 2861 &rrd->header.drr_u.drr_write_byref; 2862 return (receive_write_byref(rwa, drrwbr)); 2863 } 2864 case DRR_WRITE_EMBEDDED: 2865 { 2866 struct drr_write_embedded *drrwe = 2867 &rrd->header.drr_u.drr_write_embedded; 2868 err = receive_write_embedded(rwa, drrwe, rrd->payload); 2869 kmem_free(rrd->payload, rrd->payload_size); 2870 rrd->payload = NULL; 2871 return (err); 2872 } 2873 case DRR_FREE: 2874 { 2875 struct drr_free *drrf = &rrd->header.drr_u.drr_free; 2876 return (receive_free(rwa, drrf)); 2877 } 2878 case DRR_SPILL: 2879 { 2880 struct drr_spill *drrs = &rrd->header.drr_u.drr_spill; 2881 err = receive_spill(rwa, drrs, rrd->payload); 2882 kmem_free(rrd->payload, rrd->payload_size); 2883 rrd->payload = NULL; 2884 return (err); 2885 } 2886 default: 2887 return (SET_ERROR(EINVAL)); 2888 } 2889} 2890 2891/* 2892 * dmu_recv_stream's worker thread; pull records off the queue, and then call 2893 * receive_process_record When we're done, signal the main thread and exit. 2894 */ 2895static void 2896receive_writer_thread(void *arg) 2897{ 2898 struct receive_writer_arg *rwa = arg; 2899 struct receive_record_arg *rrd; 2900 for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker; 2901 rrd = bqueue_dequeue(&rwa->q)) { 2902 /* 2903 * If there's an error, the main thread will stop putting things 2904 * on the queue, but we need to clear everything in it before we 2905 * can exit. 2906 */ 2907 if (rwa->err == 0) { 2908 rwa->err = receive_process_record(rwa, rrd); 2909 } else if (rrd->write_buf != NULL) { 2910 dmu_return_arcbuf(rrd->write_buf); 2911 rrd->write_buf = NULL; 2912 rrd->payload = NULL; 2913 } else if (rrd->payload != NULL) { 2914 kmem_free(rrd->payload, rrd->payload_size); 2915 rrd->payload = NULL; 2916 } 2917 kmem_free(rrd, sizeof (*rrd)); 2918 } 2919 kmem_free(rrd, sizeof (*rrd)); 2920 mutex_enter(&rwa->mutex); 2921 rwa->done = B_TRUE; 2922 cv_signal(&rwa->cv); 2923 mutex_exit(&rwa->mutex); 2924 thread_exit(); 2925} 2926 2927static int 2928resume_check(struct receive_arg *ra, nvlist_t *begin_nvl) 2929{ 2930 uint64_t val; 2931 objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset; 2932 uint64_t dsobj = dmu_objset_id(ra->os); 2933 uint64_t resume_obj, resume_off; 2934 2935 if (nvlist_lookup_uint64(begin_nvl, 2936 "resume_object", &resume_obj) != 0 || 2937 nvlist_lookup_uint64(begin_nvl, 2938 "resume_offset", &resume_off) != 0) { 2939 return (SET_ERROR(EINVAL)); 2940 } 2941 VERIFY0(zap_lookup(mos, dsobj, 2942 DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val)); 2943 if (resume_obj != val) 2944 return (SET_ERROR(EINVAL)); 2945 VERIFY0(zap_lookup(mos, dsobj, 2946 DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val)); 2947 if (resume_off != val) 2948 return (SET_ERROR(EINVAL)); 2949 2950 return (0); 2951} 2952 2953/* 2954 * Read in the stream's records, one by one, and apply them to the pool. There 2955 * are two threads involved; the thread that calls this function will spin up a 2956 * worker thread, read the records off the stream one by one, and issue 2957 * prefetches for any necessary indirect blocks. It will then push the records 2958 * onto an internal blocking queue. The worker thread will pull the records off 2959 * the queue, and actually write the data into the DMU. This way, the worker 2960 * thread doesn't have to wait for reads to complete, since everything it needs 2961 * (the indirect blocks) will be prefetched. 2962 * 2963 * NB: callers *must* call dmu_recv_end() if this succeeds. 2964 */ 2965int 2966dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp, 2967 int cleanup_fd, uint64_t *action_handlep) 2968{ 2969 int err = 0; 2970 struct receive_arg ra = { 0 }; 2971 struct receive_writer_arg rwa = { 0 }; 2972 int featureflags; 2973 nvlist_t *begin_nvl = NULL; 2974 2975 ra.byteswap = drc->drc_byteswap; 2976 ra.cksum = drc->drc_cksum; 2977 ra.td = curthread; 2978 ra.fp = fp; 2979 ra.voff = *voffp; 2980 2981 if (dsl_dataset_is_zapified(drc->drc_ds)) { 2982 (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset, 2983 drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES, 2984 sizeof (ra.bytes_read), 1, &ra.bytes_read); 2985 } 2986 2987 objlist_create(&ra.ignore_objlist); 2988 2989 /* these were verified in dmu_recv_begin */ 2990 ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==, 2991 DMU_SUBSTREAM); 2992 ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES); 2993 2994 /* 2995 * Open the objset we are modifying. 2996 */ 2997 VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os)); 2998 2999 ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT); 3000 3001 featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo); 3002 3003 /* if this stream is dedup'ed, set up the avl tree for guid mapping */ 3004 if (featureflags & DMU_BACKUP_FEATURE_DEDUP) { 3005 minor_t minor; 3006 3007 if (cleanup_fd == -1) { 3008 ra.err = SET_ERROR(EBADF); 3009 goto out; 3010 } 3011 ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor); 3012 if (ra.err != 0) { 3013 cleanup_fd = -1; 3014 goto out; 3015 } 3016 3017 if (*action_handlep == 0) { 3018 rwa.guid_to_ds_map = 3019 kmem_alloc(sizeof (avl_tree_t), KM_SLEEP); 3020 avl_create(rwa.guid_to_ds_map, guid_compare, 3021 sizeof (guid_map_entry_t), 3022 offsetof(guid_map_entry_t, avlnode)); 3023 err = zfs_onexit_add_cb(minor, 3024 free_guid_map_onexit, rwa.guid_to_ds_map, 3025 action_handlep); 3026 if (ra.err != 0) 3027 goto out; 3028 } else { 3029 err = zfs_onexit_cb_data(minor, *action_handlep, 3030 (void **)&rwa.guid_to_ds_map); 3031 if (ra.err != 0) 3032 goto out; 3033 } 3034 3035 drc->drc_guid_to_ds_map = rwa.guid_to_ds_map; 3036 } 3037 3038 uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen; 3039 void *payload = NULL; 3040 if (payloadlen != 0) 3041 payload = kmem_alloc(payloadlen, KM_SLEEP); 3042 3043 err = receive_read_payload_and_next_header(&ra, payloadlen, payload); 3044 if (err != 0) { 3045 if (payloadlen != 0) 3046 kmem_free(payload, payloadlen); 3047 goto out; 3048 } 3049 if (payloadlen != 0) { 3050 err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP); 3051 kmem_free(payload, payloadlen); 3052 if (err != 0) 3053 goto out; 3054 } 3055 3056 if (featureflags & DMU_BACKUP_FEATURE_RESUMING) { 3057 err = resume_check(&ra, begin_nvl); 3058 if (err != 0) 3059 goto out; 3060 } 3061 3062 (void) bqueue_init(&rwa.q, zfs_recv_queue_length, 3063 offsetof(struct receive_record_arg, node)); 3064 cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL); 3065 mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL); 3066 rwa.os = ra.os; 3067 rwa.byteswap = drc->drc_byteswap; 3068 rwa.resumable = drc->drc_resumable; 3069 3070 (void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0, 3071 TS_RUN, minclsyspri); 3072 /* 3073 * We're reading rwa.err without locks, which is safe since we are the 3074 * only reader, and the worker thread is the only writer. It's ok if we 3075 * miss a write for an iteration or two of the loop, since the writer 3076 * thread will keep freeing records we send it until we send it an eos 3077 * marker. 3078 * 3079 * We can leave this loop in 3 ways: First, if rwa.err is 3080 * non-zero. In that case, the writer thread will free the rrd we just 3081 * pushed. Second, if we're interrupted; in that case, either it's the 3082 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd 3083 * has been handed off to the writer thread who will free it. Finally, 3084 * if receive_read_record fails or we're at the end of the stream, then 3085 * we free ra.rrd and exit. 3086 */ 3087 while (rwa.err == 0) { 3088 if (issig(JUSTLOOKING) && issig(FORREAL)) { 3089 err = SET_ERROR(EINTR); 3090 break; 3091 } 3092 3093 ASSERT3P(ra.rrd, ==, NULL); 3094 ra.rrd = ra.next_rrd; 3095 ra.next_rrd = NULL; 3096 /* Allocates and loads header into ra.next_rrd */ 3097 err = receive_read_record(&ra); 3098 3099 if (ra.rrd->header.drr_type == DRR_END || err != 0) { 3100 kmem_free(ra.rrd, sizeof (*ra.rrd)); 3101 ra.rrd = NULL; 3102 break; 3103 } 3104 3105 bqueue_enqueue(&rwa.q, ra.rrd, 3106 sizeof (struct receive_record_arg) + ra.rrd->payload_size); 3107 ra.rrd = NULL; 3108 } 3109 if (ra.next_rrd == NULL) 3110 ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP); 3111 ra.next_rrd->eos_marker = B_TRUE; 3112 bqueue_enqueue(&rwa.q, ra.next_rrd, 1); 3113 3114 mutex_enter(&rwa.mutex); 3115 while (!rwa.done) { 3116 cv_wait(&rwa.cv, &rwa.mutex); 3117 } 3118 mutex_exit(&rwa.mutex); 3119 3120 /* 3121 * If we are receiving a full stream as a clone, all object IDs which 3122 * are greater than the maximum ID referenced in the stream are 3123 * by definition unused and must be freed. Note that it's possible that 3124 * we've resumed this send and the first record we received was the END 3125 * record. In that case, max_object would be 0, but we shouldn't start 3126 * freeing all objects from there; instead we should start from the 3127 * resumeobj. 3128 */ 3129 if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) { 3130 uint64_t obj; 3131 if (nvlist_lookup_uint64(begin_nvl, "resume_object", &obj) != 0) 3132 obj = 0; 3133 if (rwa.max_object > obj) 3134 obj = rwa.max_object; 3135 obj++; 3136 int free_err = 0; 3137 int next_err = 0; 3138 3139 while (next_err == 0) { 3140 free_err = dmu_free_long_object(rwa.os, obj); 3141 if (free_err != 0 && free_err != ENOENT) 3142 break; 3143 3144 next_err = dmu_object_next(rwa.os, &obj, FALSE, 0); 3145 } 3146 3147 if (err == 0) { 3148 if (free_err != 0 && free_err != ENOENT) 3149 err = free_err; 3150 else if (next_err != ESRCH) 3151 err = next_err; 3152 } 3153 } 3154 3155 cv_destroy(&rwa.cv); 3156 mutex_destroy(&rwa.mutex); 3157 bqueue_destroy(&rwa.q); 3158 if (err == 0) 3159 err = rwa.err; 3160 3161out: 3162 nvlist_free(begin_nvl); 3163 if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1)) 3164 zfs_onexit_fd_rele(cleanup_fd); 3165 3166 if (err != 0) { 3167 /* 3168 * Clean up references. If receive is not resumable, 3169 * destroy what we created, so we don't leave it in 3170 * the inconsistent state. 3171 */ 3172 dmu_recv_cleanup_ds(drc); 3173 } 3174 3175 *voffp = ra.voff; 3176 objlist_destroy(&ra.ignore_objlist); 3177 return (err); 3178} 3179 3180static int 3181dmu_recv_end_check(void *arg, dmu_tx_t *tx) 3182{ 3183 dmu_recv_cookie_t *drc = arg; 3184 dsl_pool_t *dp = dmu_tx_pool(tx); 3185 int error; 3186 3187 ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag); 3188 3189 if (!drc->drc_newfs) { 3190 dsl_dataset_t *origin_head; 3191 3192 error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head); 3193 if (error != 0) 3194 return (error); 3195 if (drc->drc_force) { 3196 /* 3197 * We will destroy any snapshots in tofs (i.e. before 3198 * origin_head) that are after the origin (which is 3199 * the snap before drc_ds, because drc_ds can not 3200 * have any snaps of its own). 3201 */ 3202 uint64_t obj; 3203 3204 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3205 while (obj != 3206 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3207 dsl_dataset_t *snap; 3208 error = dsl_dataset_hold_obj(dp, obj, FTAG, 3209 &snap); 3210 if (error != 0) 3211 break; 3212 if (snap->ds_dir != origin_head->ds_dir) 3213 error = SET_ERROR(EINVAL); 3214 if (error == 0) { 3215 error = dsl_destroy_snapshot_check_impl( 3216 snap, B_FALSE); 3217 } 3218 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3219 dsl_dataset_rele(snap, FTAG); 3220 if (error != 0) 3221 break; 3222 } 3223 if (error != 0) { 3224 dsl_dataset_rele(origin_head, FTAG); 3225 return (error); 3226 } 3227 } 3228 error = dsl_dataset_clone_swap_check_impl(drc->drc_ds, 3229 origin_head, drc->drc_force, drc->drc_owner, tx); 3230 if (error != 0) { 3231 dsl_dataset_rele(origin_head, FTAG); 3232 return (error); 3233 } 3234 error = dsl_dataset_snapshot_check_impl(origin_head, 3235 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 3236 dsl_dataset_rele(origin_head, FTAG); 3237 if (error != 0) 3238 return (error); 3239 3240 error = dsl_destroy_head_check_impl(drc->drc_ds, 1); 3241 } else { 3242 error = dsl_dataset_snapshot_check_impl(drc->drc_ds, 3243 drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred); 3244 } 3245 return (error); 3246} 3247 3248static void 3249dmu_recv_end_sync(void *arg, dmu_tx_t *tx) 3250{ 3251 dmu_recv_cookie_t *drc = arg; 3252 dsl_pool_t *dp = dmu_tx_pool(tx); 3253 3254 spa_history_log_internal_ds(drc->drc_ds, "finish receiving", 3255 tx, "snap=%s", drc->drc_tosnap); 3256 3257 if (!drc->drc_newfs) { 3258 dsl_dataset_t *origin_head; 3259 3260 VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG, 3261 &origin_head)); 3262 3263 if (drc->drc_force) { 3264 /* 3265 * Destroy any snapshots of drc_tofs (origin_head) 3266 * after the origin (the snap before drc_ds). 3267 */ 3268 uint64_t obj; 3269 3270 obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3271 while (obj != 3272 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) { 3273 dsl_dataset_t *snap; 3274 VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, 3275 &snap)); 3276 ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir); 3277 obj = dsl_dataset_phys(snap)->ds_prev_snap_obj; 3278 dsl_destroy_snapshot_sync_impl(snap, 3279 B_FALSE, tx); 3280 dsl_dataset_rele(snap, FTAG); 3281 } 3282 } 3283 VERIFY3P(drc->drc_ds->ds_prev, ==, 3284 origin_head->ds_prev); 3285 3286 dsl_dataset_clone_swap_sync_impl(drc->drc_ds, 3287 origin_head, tx); 3288 dsl_dataset_snapshot_sync_impl(origin_head, 3289 drc->drc_tosnap, tx); 3290 3291 /* set snapshot's creation time and guid */ 3292 dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx); 3293 dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time = 3294 drc->drc_drrb->drr_creation_time; 3295 dsl_dataset_phys(origin_head->ds_prev)->ds_guid = 3296 drc->drc_drrb->drr_toguid; 3297 dsl_dataset_phys(origin_head->ds_prev)->ds_flags &= 3298 ~DS_FLAG_INCONSISTENT; 3299 3300 dmu_buf_will_dirty(origin_head->ds_dbuf, tx); 3301 dsl_dataset_phys(origin_head)->ds_flags &= 3302 ~DS_FLAG_INCONSISTENT; 3303 3304 drc->drc_newsnapobj = 3305 dsl_dataset_phys(origin_head)->ds_prev_snap_obj; 3306 3307 dsl_dataset_rele(origin_head, FTAG); 3308 dsl_destroy_head_sync_impl(drc->drc_ds, tx); 3309 3310 if (drc->drc_owner != NULL) 3311 VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner); 3312 } else { 3313 dsl_dataset_t *ds = drc->drc_ds; 3314 3315 dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx); 3316 3317 /* set snapshot's creation time and guid */ 3318 dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx); 3319 dsl_dataset_phys(ds->ds_prev)->ds_creation_time = 3320 drc->drc_drrb->drr_creation_time; 3321 dsl_dataset_phys(ds->ds_prev)->ds_guid = 3322 drc->drc_drrb->drr_toguid; 3323 dsl_dataset_phys(ds->ds_prev)->ds_flags &= 3324 ~DS_FLAG_INCONSISTENT; 3325 3326 dmu_buf_will_dirty(ds->ds_dbuf, tx); 3327 dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT; 3328 if (dsl_dataset_has_resume_receive_state(ds)) { 3329 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3330 DS_FIELD_RESUME_FROMGUID, tx); 3331 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3332 DS_FIELD_RESUME_OBJECT, tx); 3333 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3334 DS_FIELD_RESUME_OFFSET, tx); 3335 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3336 DS_FIELD_RESUME_BYTES, tx); 3337 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3338 DS_FIELD_RESUME_TOGUID, tx); 3339 (void) zap_remove(dp->dp_meta_objset, ds->ds_object, 3340 DS_FIELD_RESUME_TONAME, tx); 3341 } 3342 drc->drc_newsnapobj = 3343 dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj; 3344 } 3345 /* 3346 * Release the hold from dmu_recv_begin. This must be done before 3347 * we return to open context, so that when we free the dataset's dnode, 3348 * we can evict its bonus buffer. 3349 */ 3350 dsl_dataset_disown(drc->drc_ds, dmu_recv_tag); 3351 drc->drc_ds = NULL; 3352} 3353 3354static int 3355add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj) 3356{ 3357 dsl_pool_t *dp; 3358 dsl_dataset_t *snapds; 3359 guid_map_entry_t *gmep; 3360 int err; 3361 3362 ASSERT(guid_map != NULL); 3363 3364 err = dsl_pool_hold(name, FTAG, &dp); 3365 if (err != 0) 3366 return (err); 3367 gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP); 3368 err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds); 3369 if (err == 0) { 3370 gmep->guid = dsl_dataset_phys(snapds)->ds_guid; 3371 gmep->gme_ds = snapds; 3372 avl_add(guid_map, gmep); 3373 dsl_dataset_long_hold(snapds, gmep); 3374 } else 3375 kmem_free(gmep, sizeof (*gmep)); 3376 3377 dsl_pool_rele(dp, FTAG); 3378 return (err); 3379} 3380 3381static int dmu_recv_end_modified_blocks = 3; 3382 3383static int 3384dmu_recv_existing_end(dmu_recv_cookie_t *drc) 3385{ 3386#ifdef _KERNEL 3387 /* 3388 * We will be destroying the ds; make sure its origin is unmounted if 3389 * necessary. 3390 */ 3391 char name[ZFS_MAX_DATASET_NAME_LEN]; 3392 dsl_dataset_name(drc->drc_ds, name); 3393 zfs_destroy_unmount_origin(name); 3394#endif 3395 3396 return (dsl_sync_task(drc->drc_tofs, 3397 dmu_recv_end_check, dmu_recv_end_sync, drc, 3398 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3399} 3400 3401static int 3402dmu_recv_new_end(dmu_recv_cookie_t *drc) 3403{ 3404 return (dsl_sync_task(drc->drc_tofs, 3405 dmu_recv_end_check, dmu_recv_end_sync, drc, 3406 dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL)); 3407} 3408 3409int 3410dmu_recv_end(dmu_recv_cookie_t *drc, void *owner) 3411{ 3412 int error; 3413 3414 drc->drc_owner = owner; 3415 3416 if (drc->drc_newfs) 3417 error = dmu_recv_new_end(drc); 3418 else 3419 error = dmu_recv_existing_end(drc); 3420 3421 if (error != 0) { 3422 dmu_recv_cleanup_ds(drc); 3423 } else if (drc->drc_guid_to_ds_map != NULL) { 3424 (void) add_ds_to_guidmap(drc->drc_tofs, 3425 drc->drc_guid_to_ds_map, 3426 drc->drc_newsnapobj); 3427 } 3428 return (error); 3429} 3430 3431/* 3432 * Return TRUE if this objset is currently being received into. 3433 */ 3434boolean_t 3435dmu_objset_is_receiving(objset_t *os) 3436{ 3437 return (os->os_dsl_dataset != NULL && 3438 os->os_dsl_dataset->ds_owner == dmu_recv_tag); 3439} 3440