dmu_send.c revision 290757
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
28 */
29
30#include <sys/dmu.h>
31#include <sys/dmu_impl.h>
32#include <sys/dmu_tx.h>
33#include <sys/dbuf.h>
34#include <sys/dnode.h>
35#include <sys/zfs_context.h>
36#include <sys/dmu_objset.h>
37#include <sys/dmu_traverse.h>
38#include <sys/dsl_dataset.h>
39#include <sys/dsl_dir.h>
40#include <sys/dsl_prop.h>
41#include <sys/dsl_pool.h>
42#include <sys/dsl_synctask.h>
43#include <sys/zfs_ioctl.h>
44#include <sys/zap.h>
45#include <sys/zio_checksum.h>
46#include <sys/zfs_znode.h>
47#include <zfs_fletcher.h>
48#include <sys/avl.h>
49#include <sys/ddt.h>
50#include <sys/zfs_onexit.h>
51#include <sys/dmu_send.h>
52#include <sys/dsl_destroy.h>
53#include <sys/blkptr.h>
54#include <sys/dsl_bookmark.h>
55#include <sys/zfeature.h>
56#include <sys/bqueue.h>
57
58#ifdef __FreeBSD__
59#undef dump_write
60#define dump_write dmu_dump_write
61#endif
62
63/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
64int zfs_send_corrupt_data = B_FALSE;
65int zfs_send_queue_length = 16 * 1024 * 1024;
66int zfs_recv_queue_length = 16 * 1024 * 1024;
67
68static char *dmu_recv_tag = "dmu_recv_tag";
69const char *recv_clone_name = "%recv";
70
71#define	BP_SPAN(datablkszsec, indblkshift, level) \
72	(((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
73	(level) * (indblkshift - SPA_BLKPTRSHIFT)))
74
75static void byteswap_record(dmu_replay_record_t *drr);
76
77struct send_thread_arg {
78	bqueue_t	q;
79	dsl_dataset_t	*ds;		/* Dataset to traverse */
80	uint64_t	fromtxg;	/* Traverse from this txg */
81	int		flags;		/* flags to pass to traverse_dataset */
82	int		error_code;
83	boolean_t	cancel;
84	zbookmark_phys_t resume;
85};
86
87struct send_block_record {
88	boolean_t		eos_marker; /* Marks the end of the stream */
89	blkptr_t		bp;
90	zbookmark_phys_t	zb;
91	uint8_t			indblkshift;
92	uint16_t		datablkszsec;
93	bqueue_node_t		ln;
94};
95
96static int
97dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
98{
99	dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
100	struct uio auio;
101	struct iovec aiov;
102	ASSERT0(len % 8);
103
104	aiov.iov_base = buf;
105	aiov.iov_len = len;
106	auio.uio_iov = &aiov;
107	auio.uio_iovcnt = 1;
108	auio.uio_resid = len;
109	auio.uio_segflg = UIO_SYSSPACE;
110	auio.uio_rw = UIO_WRITE;
111	auio.uio_offset = (off_t)-1;
112	auio.uio_td = dsp->dsa_td;
113#ifdef _KERNEL
114	if (dsp->dsa_fp->f_type == DTYPE_VNODE)
115		bwillwrite();
116	dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
117	    dsp->dsa_td);
118#else
119	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
120	dsp->dsa_err = EOPNOTSUPP;
121#endif
122	mutex_enter(&ds->ds_sendstream_lock);
123	*dsp->dsa_off += len;
124	mutex_exit(&ds->ds_sendstream_lock);
125
126	return (dsp->dsa_err);
127}
128
129/*
130 * For all record types except BEGIN, fill in the checksum (overlaid in
131 * drr_u.drr_checksum.drr_checksum).  The checksum verifies everything
132 * up to the start of the checksum itself.
133 */
134static int
135dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
136{
137	ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
138	    ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
139	fletcher_4_incremental_native(dsp->dsa_drr,
140	    offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
141	    &dsp->dsa_zc);
142	if (dsp->dsa_drr->drr_type != DRR_BEGIN) {
143		ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
144		    drr_checksum.drr_checksum));
145		dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
146	}
147	fletcher_4_incremental_native(&dsp->dsa_drr->
148	    drr_u.drr_checksum.drr_checksum,
149	    sizeof (zio_cksum_t), &dsp->dsa_zc);
150	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
151		return (SET_ERROR(EINTR));
152	if (payload_len != 0) {
153		fletcher_4_incremental_native(payload, payload_len,
154		    &dsp->dsa_zc);
155		if (dump_bytes(dsp, payload, payload_len) != 0)
156			return (SET_ERROR(EINTR));
157	}
158	return (0);
159}
160
161static int
162dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
163    uint64_t length)
164{
165	struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
166
167	/*
168	 * When we receive a free record, dbuf_free_range() assumes
169	 * that the receiving system doesn't have any dbufs in the range
170	 * being freed.  This is always true because there is a one-record
171	 * constraint: we only send one WRITE record for any given
172	 * object,offset.  We know that the one-record constraint is
173	 * true because we always send data in increasing order by
174	 * object,offset.
175	 *
176	 * If the increasing-order constraint ever changes, we should find
177	 * another way to assert that the one-record constraint is still
178	 * satisfied.
179	 */
180	ASSERT(object > dsp->dsa_last_data_object ||
181	    (object == dsp->dsa_last_data_object &&
182	    offset > dsp->dsa_last_data_offset));
183
184	/*
185	 * If we are doing a non-incremental send, then there can't
186	 * be any data in the dataset we're receiving into.  Therefore
187	 * a free record would simply be a no-op.  Save space by not
188	 * sending it to begin with.
189	 */
190	if (!dsp->dsa_incremental)
191		return (0);
192
193	if (length != -1ULL && offset + length < offset)
194		length = -1ULL;
195
196	/*
197	 * If there is a pending op, but it's not PENDING_FREE, push it out,
198	 * since free block aggregation can only be done for blocks of the
199	 * same type (i.e., DRR_FREE records can only be aggregated with
200	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
201	 * aggregated with other DRR_FREEOBJECTS records.
202	 */
203	if (dsp->dsa_pending_op != PENDING_NONE &&
204	    dsp->dsa_pending_op != PENDING_FREE) {
205		if (dump_record(dsp, NULL, 0) != 0)
206			return (SET_ERROR(EINTR));
207		dsp->dsa_pending_op = PENDING_NONE;
208	}
209
210	if (dsp->dsa_pending_op == PENDING_FREE) {
211		/*
212		 * There should never be a PENDING_FREE if length is -1
213		 * (because dump_dnode is the only place where this
214		 * function is called with a -1, and only after flushing
215		 * any pending record).
216		 */
217		ASSERT(length != -1ULL);
218		/*
219		 * Check to see whether this free block can be aggregated
220		 * with pending one.
221		 */
222		if (drrf->drr_object == object && drrf->drr_offset +
223		    drrf->drr_length == offset) {
224			drrf->drr_length += length;
225			return (0);
226		} else {
227			/* not a continuation.  Push out pending record */
228			if (dump_record(dsp, NULL, 0) != 0)
229				return (SET_ERROR(EINTR));
230			dsp->dsa_pending_op = PENDING_NONE;
231		}
232	}
233	/* create a FREE record and make it pending */
234	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
235	dsp->dsa_drr->drr_type = DRR_FREE;
236	drrf->drr_object = object;
237	drrf->drr_offset = offset;
238	drrf->drr_length = length;
239	drrf->drr_toguid = dsp->dsa_toguid;
240	if (length == -1ULL) {
241		if (dump_record(dsp, NULL, 0) != 0)
242			return (SET_ERROR(EINTR));
243	} else {
244		dsp->dsa_pending_op = PENDING_FREE;
245	}
246
247	return (0);
248}
249
250static int
251dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
252    uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
253{
254	struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
255
256	/*
257	 * We send data in increasing object, offset order.
258	 * See comment in dump_free() for details.
259	 */
260	ASSERT(object > dsp->dsa_last_data_object ||
261	    (object == dsp->dsa_last_data_object &&
262	    offset > dsp->dsa_last_data_offset));
263	dsp->dsa_last_data_object = object;
264	dsp->dsa_last_data_offset = offset + blksz - 1;
265
266	/*
267	 * If there is any kind of pending aggregation (currently either
268	 * a grouping of free objects or free blocks), push it out to
269	 * the stream, since aggregation can't be done across operations
270	 * of different types.
271	 */
272	if (dsp->dsa_pending_op != PENDING_NONE) {
273		if (dump_record(dsp, NULL, 0) != 0)
274			return (SET_ERROR(EINTR));
275		dsp->dsa_pending_op = PENDING_NONE;
276	}
277	/* write a WRITE record */
278	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
279	dsp->dsa_drr->drr_type = DRR_WRITE;
280	drrw->drr_object = object;
281	drrw->drr_type = type;
282	drrw->drr_offset = offset;
283	drrw->drr_length = blksz;
284	drrw->drr_toguid = dsp->dsa_toguid;
285	if (bp == NULL || BP_IS_EMBEDDED(bp)) {
286		/*
287		 * There's no pre-computed checksum for partial-block
288		 * writes or embedded BP's, so (like
289		 * fletcher4-checkummed blocks) userland will have to
290		 * compute a dedup-capable checksum itself.
291		 */
292		drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
293	} else {
294		drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
295		if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
296		    ZCHECKSUM_FLAG_DEDUP)
297			drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
298		DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
299		DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
300		DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
301		drrw->drr_key.ddk_cksum = bp->blk_cksum;
302	}
303
304	if (dump_record(dsp, data, blksz) != 0)
305		return (SET_ERROR(EINTR));
306	return (0);
307}
308
309static int
310dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
311    int blksz, const blkptr_t *bp)
312{
313	char buf[BPE_PAYLOAD_SIZE];
314	struct drr_write_embedded *drrw =
315	    &(dsp->dsa_drr->drr_u.drr_write_embedded);
316
317	if (dsp->dsa_pending_op != PENDING_NONE) {
318		if (dump_record(dsp, NULL, 0) != 0)
319			return (EINTR);
320		dsp->dsa_pending_op = PENDING_NONE;
321	}
322
323	ASSERT(BP_IS_EMBEDDED(bp));
324
325	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
326	dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
327	drrw->drr_object = object;
328	drrw->drr_offset = offset;
329	drrw->drr_length = blksz;
330	drrw->drr_toguid = dsp->dsa_toguid;
331	drrw->drr_compression = BP_GET_COMPRESS(bp);
332	drrw->drr_etype = BPE_GET_ETYPE(bp);
333	drrw->drr_lsize = BPE_GET_LSIZE(bp);
334	drrw->drr_psize = BPE_GET_PSIZE(bp);
335
336	decode_embedded_bp_compressed(bp, buf);
337
338	if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
339		return (EINTR);
340	return (0);
341}
342
343static int
344dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
345{
346	struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
347
348	if (dsp->dsa_pending_op != PENDING_NONE) {
349		if (dump_record(dsp, NULL, 0) != 0)
350			return (SET_ERROR(EINTR));
351		dsp->dsa_pending_op = PENDING_NONE;
352	}
353
354	/* write a SPILL record */
355	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
356	dsp->dsa_drr->drr_type = DRR_SPILL;
357	drrs->drr_object = object;
358	drrs->drr_length = blksz;
359	drrs->drr_toguid = dsp->dsa_toguid;
360
361	if (dump_record(dsp, data, blksz) != 0)
362		return (SET_ERROR(EINTR));
363	return (0);
364}
365
366static int
367dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
368{
369	struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
370
371	/* See comment in dump_free(). */
372	if (!dsp->dsa_incremental)
373		return (0);
374
375	/*
376	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
377	 * push it out, since free block aggregation can only be done for
378	 * blocks of the same type (i.e., DRR_FREE records can only be
379	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
380	 * can only be aggregated with other DRR_FREEOBJECTS records.
381	 */
382	if (dsp->dsa_pending_op != PENDING_NONE &&
383	    dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
384		if (dump_record(dsp, NULL, 0) != 0)
385			return (SET_ERROR(EINTR));
386		dsp->dsa_pending_op = PENDING_NONE;
387	}
388	if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
389		/*
390		 * See whether this free object array can be aggregated
391		 * with pending one
392		 */
393		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
394			drrfo->drr_numobjs += numobjs;
395			return (0);
396		} else {
397			/* can't be aggregated.  Push out pending record */
398			if (dump_record(dsp, NULL, 0) != 0)
399				return (SET_ERROR(EINTR));
400			dsp->dsa_pending_op = PENDING_NONE;
401		}
402	}
403
404	/* write a FREEOBJECTS record */
405	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
406	dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
407	drrfo->drr_firstobj = firstobj;
408	drrfo->drr_numobjs = numobjs;
409	drrfo->drr_toguid = dsp->dsa_toguid;
410
411	dsp->dsa_pending_op = PENDING_FREEOBJECTS;
412
413	return (0);
414}
415
416static int
417dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
418{
419	struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
420
421	if (object < dsp->dsa_resume_object) {
422		/*
423		 * Note: when resuming, we will visit all the dnodes in
424		 * the block of dnodes that we are resuming from.  In
425		 * this case it's unnecessary to send the dnodes prior to
426		 * the one we are resuming from.  We should be at most one
427		 * block's worth of dnodes behind the resume point.
428		 */
429		ASSERT3U(dsp->dsa_resume_object - object, <,
430		    1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
431		return (0);
432	}
433
434	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
435		return (dump_freeobjects(dsp, object, 1));
436
437	if (dsp->dsa_pending_op != PENDING_NONE) {
438		if (dump_record(dsp, NULL, 0) != 0)
439			return (SET_ERROR(EINTR));
440		dsp->dsa_pending_op = PENDING_NONE;
441	}
442
443	/* write an OBJECT record */
444	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
445	dsp->dsa_drr->drr_type = DRR_OBJECT;
446	drro->drr_object = object;
447	drro->drr_type = dnp->dn_type;
448	drro->drr_bonustype = dnp->dn_bonustype;
449	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
450	drro->drr_bonuslen = dnp->dn_bonuslen;
451	drro->drr_checksumtype = dnp->dn_checksum;
452	drro->drr_compress = dnp->dn_compress;
453	drro->drr_toguid = dsp->dsa_toguid;
454
455	if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
456	    drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
457		drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
458
459	if (dump_record(dsp, DN_BONUS(dnp),
460	    P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
461		return (SET_ERROR(EINTR));
462	}
463
464	/* Free anything past the end of the file. */
465	if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
466	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
467		return (SET_ERROR(EINTR));
468	if (dsp->dsa_err != 0)
469		return (SET_ERROR(EINTR));
470	return (0);
471}
472
473static boolean_t
474backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
475{
476	if (!BP_IS_EMBEDDED(bp))
477		return (B_FALSE);
478
479	/*
480	 * Compression function must be legacy, or explicitly enabled.
481	 */
482	if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
483	    !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
484		return (B_FALSE);
485
486	/*
487	 * Embed type must be explicitly enabled.
488	 */
489	switch (BPE_GET_ETYPE(bp)) {
490	case BP_EMBEDDED_TYPE_DATA:
491		if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
492			return (B_TRUE);
493		break;
494	default:
495		return (B_FALSE);
496	}
497	return (B_FALSE);
498}
499
500/*
501 * This is the callback function to traverse_dataset that acts as the worker
502 * thread for dmu_send_impl.
503 */
504/*ARGSUSED*/
505static int
506send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
507    const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
508{
509	struct send_thread_arg *sta = arg;
510	struct send_block_record *record;
511	uint64_t record_size;
512	int err = 0;
513
514	ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
515	    zb->zb_object >= sta->resume.zb_object);
516
517	if (sta->cancel)
518		return (SET_ERROR(EINTR));
519
520	if (bp == NULL) {
521		ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
522		return (0);
523	} else if (zb->zb_level < 0) {
524		return (0);
525	}
526
527	record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
528	record->eos_marker = B_FALSE;
529	record->bp = *bp;
530	record->zb = *zb;
531	record->indblkshift = dnp->dn_indblkshift;
532	record->datablkszsec = dnp->dn_datablkszsec;
533	record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
534	bqueue_enqueue(&sta->q, record, record_size);
535
536	return (err);
537}
538
539/*
540 * This function kicks off the traverse_dataset.  It also handles setting the
541 * error code of the thread in case something goes wrong, and pushes the End of
542 * Stream record when the traverse_dataset call has finished.  If there is no
543 * dataset to traverse, the thread immediately pushes End of Stream marker.
544 */
545static void
546send_traverse_thread(void *arg)
547{
548	struct send_thread_arg *st_arg = arg;
549	int err;
550	struct send_block_record *data;
551
552	if (st_arg->ds != NULL) {
553		err = traverse_dataset_resume(st_arg->ds,
554		    st_arg->fromtxg, &st_arg->resume,
555		    st_arg->flags, send_cb, st_arg);
556
557		if (err != EINTR)
558			st_arg->error_code = err;
559	}
560	data = kmem_zalloc(sizeof (*data), KM_SLEEP);
561	data->eos_marker = B_TRUE;
562	bqueue_enqueue(&st_arg->q, data, 1);
563	thread_exit();
564}
565
566/*
567 * This function actually handles figuring out what kind of record needs to be
568 * dumped, reading the data (which has hopefully been prefetched), and calling
569 * the appropriate helper function.
570 */
571static int
572do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
573{
574	dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
575	const blkptr_t *bp = &data->bp;
576	const zbookmark_phys_t *zb = &data->zb;
577	uint8_t indblkshift = data->indblkshift;
578	uint16_t dblkszsec = data->datablkszsec;
579	spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
580	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
581	int err = 0;
582
583	ASSERT3U(zb->zb_level, >=, 0);
584
585	ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
586	    zb->zb_object >= dsa->dsa_resume_object);
587
588	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
589	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
590		return (0);
591	} else if (BP_IS_HOLE(bp) &&
592	    zb->zb_object == DMU_META_DNODE_OBJECT) {
593		uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
594		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
595		err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
596	} else if (BP_IS_HOLE(bp)) {
597		uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
598		uint64_t offset = zb->zb_blkid * span;
599		err = dump_free(dsa, zb->zb_object, offset, span);
600	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
601		return (0);
602	} else if (type == DMU_OT_DNODE) {
603		int blksz = BP_GET_LSIZE(bp);
604		arc_flags_t aflags = ARC_FLAG_WAIT;
605		arc_buf_t *abuf;
606
607		ASSERT0(zb->zb_level);
608
609		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
610		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
611		    &aflags, zb) != 0)
612			return (SET_ERROR(EIO));
613
614		dnode_phys_t *blk = abuf->b_data;
615		uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
616		for (int i = 0; i < blksz >> DNODE_SHIFT; i++) {
617			err = dump_dnode(dsa, dnobj + i, blk + i);
618			if (err != 0)
619				break;
620		}
621		(void) arc_buf_remove_ref(abuf, &abuf);
622	} else if (type == DMU_OT_SA) {
623		arc_flags_t aflags = ARC_FLAG_WAIT;
624		arc_buf_t *abuf;
625		int blksz = BP_GET_LSIZE(bp);
626
627		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
628		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
629		    &aflags, zb) != 0)
630			return (SET_ERROR(EIO));
631
632		err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
633		(void) arc_buf_remove_ref(abuf, &abuf);
634	} else if (backup_do_embed(dsa, bp)) {
635		/* it's an embedded level-0 block of a regular object */
636		int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
637		ASSERT0(zb->zb_level);
638		err = dump_write_embedded(dsa, zb->zb_object,
639		    zb->zb_blkid * blksz, blksz, bp);
640	} else {
641		/* it's a level-0 block of a regular object */
642		arc_flags_t aflags = ARC_FLAG_WAIT;
643		arc_buf_t *abuf;
644		int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
645		uint64_t offset;
646
647		ASSERT0(zb->zb_level);
648		ASSERT(zb->zb_object > dsa->dsa_resume_object ||
649		    (zb->zb_object == dsa->dsa_resume_object &&
650		    zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
651
652		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
653		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
654		    &aflags, zb) != 0) {
655			if (zfs_send_corrupt_data) {
656				/* Send a block filled with 0x"zfs badd bloc" */
657				abuf = arc_buf_alloc(spa, blksz, &abuf,
658				    ARC_BUFC_DATA);
659				uint64_t *ptr;
660				for (ptr = abuf->b_data;
661				    (char *)ptr < (char *)abuf->b_data + blksz;
662				    ptr++)
663					*ptr = 0x2f5baddb10cULL;
664			} else {
665				return (SET_ERROR(EIO));
666			}
667		}
668
669		offset = zb->zb_blkid * blksz;
670
671		if (!(dsa->dsa_featureflags &
672		    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
673		    blksz > SPA_OLD_MAXBLOCKSIZE) {
674			char *buf = abuf->b_data;
675			while (blksz > 0 && err == 0) {
676				int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
677				err = dump_write(dsa, type, zb->zb_object,
678				    offset, n, NULL, buf);
679				offset += n;
680				buf += n;
681				blksz -= n;
682			}
683		} else {
684			err = dump_write(dsa, type, zb->zb_object,
685			    offset, blksz, bp, abuf->b_data);
686		}
687		(void) arc_buf_remove_ref(abuf, &abuf);
688	}
689
690	ASSERT(err == 0 || err == EINTR);
691	return (err);
692}
693
694/*
695 * Pop the new data off the queue, and free the old data.
696 */
697static struct send_block_record *
698get_next_record(bqueue_t *bq, struct send_block_record *data)
699{
700	struct send_block_record *tmp = bqueue_dequeue(bq);
701	kmem_free(data, sizeof (*data));
702	return (tmp);
703}
704
705/*
706 * Actually do the bulk of the work in a zfs send.
707 *
708 * Note: Releases dp using the specified tag.
709 */
710static int
711dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
712    zfs_bookmark_phys_t *ancestor_zb,
713    boolean_t is_clone, boolean_t embedok, boolean_t large_block_ok, int outfd,
714    uint64_t resumeobj, uint64_t resumeoff,
715#ifdef illumos
716    vnode_t *vp, offset_t *off)
717#else
718    struct file *fp, offset_t *off)
719#endif
720{
721	objset_t *os;
722	dmu_replay_record_t *drr;
723	dmu_sendarg_t *dsp;
724	int err;
725	uint64_t fromtxg = 0;
726	uint64_t featureflags = 0;
727	struct send_thread_arg to_arg = { 0 };
728
729	err = dmu_objset_from_ds(to_ds, &os);
730	if (err != 0) {
731		dsl_pool_rele(dp, tag);
732		return (err);
733	}
734
735	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
736	drr->drr_type = DRR_BEGIN;
737	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
738	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
739	    DMU_SUBSTREAM);
740
741#ifdef _KERNEL
742	if (dmu_objset_type(os) == DMU_OST_ZFS) {
743		uint64_t version;
744		if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
745			kmem_free(drr, sizeof (dmu_replay_record_t));
746			dsl_pool_rele(dp, tag);
747			return (SET_ERROR(EINVAL));
748		}
749		if (version >= ZPL_VERSION_SA) {
750			featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
751		}
752	}
753#endif
754
755	if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
756		featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
757	if (embedok &&
758	    spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
759		featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
760		if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
761			featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
762	}
763
764	if (resumeobj != 0 || resumeoff != 0) {
765		featureflags |= DMU_BACKUP_FEATURE_RESUMING;
766	}
767
768	DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
769	    featureflags);
770
771	drr->drr_u.drr_begin.drr_creation_time =
772	    dsl_dataset_phys(to_ds)->ds_creation_time;
773	drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
774	if (is_clone)
775		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
776	drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
777	if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
778		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
779
780	if (ancestor_zb != NULL) {
781		drr->drr_u.drr_begin.drr_fromguid =
782		    ancestor_zb->zbm_guid;
783		fromtxg = ancestor_zb->zbm_creation_txg;
784	}
785	dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
786	if (!to_ds->ds_is_snapshot) {
787		(void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
788		    sizeof (drr->drr_u.drr_begin.drr_toname));
789	}
790
791	dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
792
793	dsp->dsa_drr = drr;
794	dsp->dsa_outfd = outfd;
795	dsp->dsa_proc = curproc;
796	dsp->dsa_td = curthread;
797	dsp->dsa_fp = fp;
798	dsp->dsa_os = os;
799	dsp->dsa_off = off;
800	dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
801	dsp->dsa_pending_op = PENDING_NONE;
802	dsp->dsa_incremental = (ancestor_zb != NULL);
803	dsp->dsa_featureflags = featureflags;
804	dsp->dsa_resume_object = resumeobj;
805	dsp->dsa_resume_offset = resumeoff;
806
807	mutex_enter(&to_ds->ds_sendstream_lock);
808	list_insert_head(&to_ds->ds_sendstreams, dsp);
809	mutex_exit(&to_ds->ds_sendstream_lock);
810
811	dsl_dataset_long_hold(to_ds, FTAG);
812	dsl_pool_rele(dp, tag);
813
814	void *payload = NULL;
815	size_t payload_len = 0;
816	if (resumeobj != 0 || resumeoff != 0) {
817		dmu_object_info_t to_doi;
818		err = dmu_object_info(os, resumeobj, &to_doi);
819		if (err != 0)
820			goto out;
821		SET_BOOKMARK(&to_arg.resume, to_ds->ds_object, resumeobj, 0,
822		    resumeoff / to_doi.doi_data_block_size);
823
824		nvlist_t *nvl = fnvlist_alloc();
825		fnvlist_add_uint64(nvl, "resume_object", resumeobj);
826		fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
827		payload = fnvlist_pack(nvl, &payload_len);
828		drr->drr_payloadlen = payload_len;
829		fnvlist_free(nvl);
830	}
831
832	err = dump_record(dsp, payload, payload_len);
833	fnvlist_pack_free(payload, payload_len);
834	if (err != 0) {
835		err = dsp->dsa_err;
836		goto out;
837	}
838
839	err = bqueue_init(&to_arg.q, zfs_send_queue_length,
840	    offsetof(struct send_block_record, ln));
841	to_arg.error_code = 0;
842	to_arg.cancel = B_FALSE;
843	to_arg.ds = to_ds;
844	to_arg.fromtxg = fromtxg;
845	to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
846	(void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, &p0,
847	    TS_RUN, minclsyspri);
848
849	struct send_block_record *to_data;
850	to_data = bqueue_dequeue(&to_arg.q);
851
852	while (!to_data->eos_marker && err == 0) {
853		err = do_dump(dsp, to_data);
854		to_data = get_next_record(&to_arg.q, to_data);
855		if (issig(JUSTLOOKING) && issig(FORREAL))
856			err = EINTR;
857	}
858
859	if (err != 0) {
860		to_arg.cancel = B_TRUE;
861		while (!to_data->eos_marker) {
862			to_data = get_next_record(&to_arg.q, to_data);
863		}
864	}
865	kmem_free(to_data, sizeof (*to_data));
866
867	bqueue_destroy(&to_arg.q);
868
869	if (err == 0 && to_arg.error_code != 0)
870		err = to_arg.error_code;
871
872	if (err != 0)
873		goto out;
874
875	if (dsp->dsa_pending_op != PENDING_NONE)
876		if (dump_record(dsp, NULL, 0) != 0)
877			err = SET_ERROR(EINTR);
878
879	if (err != 0) {
880		if (err == EINTR && dsp->dsa_err != 0)
881			err = dsp->dsa_err;
882		goto out;
883	}
884
885	bzero(drr, sizeof (dmu_replay_record_t));
886	drr->drr_type = DRR_END;
887	drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
888	drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
889
890	if (dump_record(dsp, NULL, 0) != 0)
891		err = dsp->dsa_err;
892
893out:
894	mutex_enter(&to_ds->ds_sendstream_lock);
895	list_remove(&to_ds->ds_sendstreams, dsp);
896	mutex_exit(&to_ds->ds_sendstream_lock);
897
898	kmem_free(drr, sizeof (dmu_replay_record_t));
899	kmem_free(dsp, sizeof (dmu_sendarg_t));
900
901	dsl_dataset_long_rele(to_ds, FTAG);
902
903	return (err);
904}
905
906int
907dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
908    boolean_t embedok, boolean_t large_block_ok,
909#ifdef illumos
910    int outfd, vnode_t *vp, offset_t *off)
911#else
912    int outfd, struct file *fp, offset_t *off)
913#endif
914{
915	dsl_pool_t *dp;
916	dsl_dataset_t *ds;
917	dsl_dataset_t *fromds = NULL;
918	int err;
919
920	err = dsl_pool_hold(pool, FTAG, &dp);
921	if (err != 0)
922		return (err);
923
924	err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
925	if (err != 0) {
926		dsl_pool_rele(dp, FTAG);
927		return (err);
928	}
929
930	if (fromsnap != 0) {
931		zfs_bookmark_phys_t zb;
932		boolean_t is_clone;
933
934		err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
935		if (err != 0) {
936			dsl_dataset_rele(ds, FTAG);
937			dsl_pool_rele(dp, FTAG);
938			return (err);
939		}
940		if (!dsl_dataset_is_before(ds, fromds, 0))
941			err = SET_ERROR(EXDEV);
942		zb.zbm_creation_time =
943		    dsl_dataset_phys(fromds)->ds_creation_time;
944		zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
945		zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
946		is_clone = (fromds->ds_dir != ds->ds_dir);
947		dsl_dataset_rele(fromds, FTAG);
948		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
949		    embedok, large_block_ok, outfd, 0, 0, fp, off);
950	} else {
951		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
952		    embedok, large_block_ok, outfd, 0, 0, fp, off);
953	}
954	dsl_dataset_rele(ds, FTAG);
955	return (err);
956}
957
958int
959dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
960    boolean_t large_block_ok, int outfd, uint64_t resumeobj, uint64_t resumeoff,
961#ifdef illumos
962    vnode_t *vp, offset_t *off)
963#else
964    struct file *fp, offset_t *off)
965#endif
966{
967	dsl_pool_t *dp;
968	dsl_dataset_t *ds;
969	int err;
970	boolean_t owned = B_FALSE;
971
972	if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
973		return (SET_ERROR(EINVAL));
974
975	err = dsl_pool_hold(tosnap, FTAG, &dp);
976	if (err != 0)
977		return (err);
978
979	if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
980		/*
981		 * We are sending a filesystem or volume.  Ensure
982		 * that it doesn't change by owning the dataset.
983		 */
984		err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
985		owned = B_TRUE;
986	} else {
987		err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
988	}
989	if (err != 0) {
990		dsl_pool_rele(dp, FTAG);
991		return (err);
992	}
993
994	if (fromsnap != NULL) {
995		zfs_bookmark_phys_t zb;
996		boolean_t is_clone = B_FALSE;
997		int fsnamelen = strchr(tosnap, '@') - tosnap;
998
999		/*
1000		 * If the fromsnap is in a different filesystem, then
1001		 * mark the send stream as a clone.
1002		 */
1003		if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
1004		    (fromsnap[fsnamelen] != '@' &&
1005		    fromsnap[fsnamelen] != '#')) {
1006			is_clone = B_TRUE;
1007		}
1008
1009		if (strchr(fromsnap, '@')) {
1010			dsl_dataset_t *fromds;
1011			err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
1012			if (err == 0) {
1013				if (!dsl_dataset_is_before(ds, fromds, 0))
1014					err = SET_ERROR(EXDEV);
1015				zb.zbm_creation_time =
1016				    dsl_dataset_phys(fromds)->ds_creation_time;
1017				zb.zbm_creation_txg =
1018				    dsl_dataset_phys(fromds)->ds_creation_txg;
1019				zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
1020				is_clone = (ds->ds_dir != fromds->ds_dir);
1021				dsl_dataset_rele(fromds, FTAG);
1022			}
1023		} else {
1024			err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
1025		}
1026		if (err != 0) {
1027			dsl_dataset_rele(ds, FTAG);
1028			dsl_pool_rele(dp, FTAG);
1029			return (err);
1030		}
1031		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
1032		    embedok, large_block_ok,
1033		    outfd, resumeobj, resumeoff, fp, off);
1034	} else {
1035		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
1036		    embedok, large_block_ok,
1037		    outfd, resumeobj, resumeoff, fp, off);
1038	}
1039	if (owned)
1040		dsl_dataset_disown(ds, FTAG);
1041	else
1042		dsl_dataset_rele(ds, FTAG);
1043	return (err);
1044}
1045
1046static int
1047dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
1048    uint64_t *sizep)
1049{
1050	int err;
1051	/*
1052	 * Assume that space (both on-disk and in-stream) is dominated by
1053	 * data.  We will adjust for indirect blocks and the copies property,
1054	 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
1055	 */
1056
1057	/*
1058	 * Subtract out approximate space used by indirect blocks.
1059	 * Assume most space is used by data blocks (non-indirect, non-dnode).
1060	 * Assume all blocks are recordsize.  Assume ditto blocks and
1061	 * internal fragmentation counter out compression.
1062	 *
1063	 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1064	 * block, which we observe in practice.
1065	 */
1066	uint64_t recordsize;
1067	err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
1068	if (err != 0)
1069		return (err);
1070	size -= size / recordsize * sizeof (blkptr_t);
1071
1072	/* Add in the space for the record associated with each block. */
1073	size += size / recordsize * sizeof (dmu_replay_record_t);
1074
1075	*sizep = size;
1076
1077	return (0);
1078}
1079
1080int
1081dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
1082{
1083	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1084	int err;
1085	uint64_t size;
1086
1087	ASSERT(dsl_pool_config_held(dp));
1088
1089	/* tosnap must be a snapshot */
1090	if (!ds->ds_is_snapshot)
1091		return (SET_ERROR(EINVAL));
1092
1093	/* fromsnap, if provided, must be a snapshot */
1094	if (fromds != NULL && !fromds->ds_is_snapshot)
1095		return (SET_ERROR(EINVAL));
1096
1097	/*
1098	 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1099	 * or the origin's fs.
1100	 */
1101	if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1102		return (SET_ERROR(EXDEV));
1103
1104	/* Get uncompressed size estimate of changed data. */
1105	if (fromds == NULL) {
1106		size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1107	} else {
1108		uint64_t used, comp;
1109		err = dsl_dataset_space_written(fromds, ds,
1110		    &used, &comp, &size);
1111		if (err != 0)
1112			return (err);
1113	}
1114
1115	err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1116	return (err);
1117}
1118
1119/*
1120 * Simple callback used to traverse the blocks of a snapshot and sum their
1121 * uncompressed size
1122 */
1123/* ARGSUSED */
1124static int
1125dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1126    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1127{
1128	uint64_t *spaceptr = arg;
1129	if (bp != NULL && !BP_IS_HOLE(bp)) {
1130		*spaceptr += BP_GET_UCSIZE(bp);
1131	}
1132	return (0);
1133}
1134
1135/*
1136 * Given a desination snapshot and a TXG, calculate the approximate size of a
1137 * send stream sent from that TXG. from_txg may be zero, indicating that the
1138 * whole snapshot will be sent.
1139 */
1140int
1141dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1142    uint64_t *sizep)
1143{
1144	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1145	int err;
1146	uint64_t size = 0;
1147
1148	ASSERT(dsl_pool_config_held(dp));
1149
1150	/* tosnap must be a snapshot */
1151	if (!dsl_dataset_is_snapshot(ds))
1152		return (SET_ERROR(EINVAL));
1153
1154	/* verify that from_txg is before the provided snapshot was taken */
1155	if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1156		return (SET_ERROR(EXDEV));
1157	}
1158
1159	/*
1160	 * traverse the blocks of the snapshot with birth times after
1161	 * from_txg, summing their uncompressed size
1162	 */
1163	err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1164	    dmu_calculate_send_traversal, &size);
1165	if (err)
1166		return (err);
1167
1168	err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1169	return (err);
1170}
1171
1172typedef struct dmu_recv_begin_arg {
1173	const char *drba_origin;
1174	dmu_recv_cookie_t *drba_cookie;
1175	cred_t *drba_cred;
1176	uint64_t drba_snapobj;
1177} dmu_recv_begin_arg_t;
1178
1179static int
1180recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1181    uint64_t fromguid)
1182{
1183	uint64_t val;
1184	int error;
1185	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1186
1187	/* temporary clone name must not exist */
1188	error = zap_lookup(dp->dp_meta_objset,
1189	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1190	    8, 1, &val);
1191	if (error != ENOENT)
1192		return (error == 0 ? EBUSY : error);
1193
1194	/* new snapshot name must not exist */
1195	error = zap_lookup(dp->dp_meta_objset,
1196	    dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1197	    drba->drba_cookie->drc_tosnap, 8, 1, &val);
1198	if (error != ENOENT)
1199		return (error == 0 ? EEXIST : error);
1200
1201	/*
1202	 * Check snapshot limit before receiving. We'll recheck again at the
1203	 * end, but might as well abort before receiving if we're already over
1204	 * the limit.
1205	 *
1206	 * Note that we do not check the file system limit with
1207	 * dsl_dir_fscount_check because the temporary %clones don't count
1208	 * against that limit.
1209	 */
1210	error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1211	    NULL, drba->drba_cred);
1212	if (error != 0)
1213		return (error);
1214
1215	if (fromguid != 0) {
1216		dsl_dataset_t *snap;
1217		uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1218
1219		/* Find snapshot in this dir that matches fromguid. */
1220		while (obj != 0) {
1221			error = dsl_dataset_hold_obj(dp, obj, FTAG,
1222			    &snap);
1223			if (error != 0)
1224				return (SET_ERROR(ENODEV));
1225			if (snap->ds_dir != ds->ds_dir) {
1226				dsl_dataset_rele(snap, FTAG);
1227				return (SET_ERROR(ENODEV));
1228			}
1229			if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1230				break;
1231			obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1232			dsl_dataset_rele(snap, FTAG);
1233		}
1234		if (obj == 0)
1235			return (SET_ERROR(ENODEV));
1236
1237		if (drba->drba_cookie->drc_force) {
1238			drba->drba_snapobj = obj;
1239		} else {
1240			/*
1241			 * If we are not forcing, there must be no
1242			 * changes since fromsnap.
1243			 */
1244			if (dsl_dataset_modified_since_snap(ds, snap)) {
1245				dsl_dataset_rele(snap, FTAG);
1246				return (SET_ERROR(ETXTBSY));
1247			}
1248			drba->drba_snapobj = ds->ds_prev->ds_object;
1249		}
1250
1251		dsl_dataset_rele(snap, FTAG);
1252	} else {
1253		/* if full, then must be forced */
1254		if (!drba->drba_cookie->drc_force)
1255			return (SET_ERROR(EEXIST));
1256		/* start from $ORIGIN@$ORIGIN, if supported */
1257		drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1258		    dp->dp_origin_snap->ds_object : 0;
1259	}
1260
1261	return (0);
1262
1263}
1264
1265static int
1266dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1267{
1268	dmu_recv_begin_arg_t *drba = arg;
1269	dsl_pool_t *dp = dmu_tx_pool(tx);
1270	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1271	uint64_t fromguid = drrb->drr_fromguid;
1272	int flags = drrb->drr_flags;
1273	int error;
1274	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1275	dsl_dataset_t *ds;
1276	const char *tofs = drba->drba_cookie->drc_tofs;
1277
1278	/* already checked */
1279	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1280	ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
1281
1282	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1283	    DMU_COMPOUNDSTREAM ||
1284	    drrb->drr_type >= DMU_OST_NUMTYPES ||
1285	    ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1286		return (SET_ERROR(EINVAL));
1287
1288	/* Verify pool version supports SA if SA_SPILL feature set */
1289	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1290	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
1291		return (SET_ERROR(ENOTSUP));
1292
1293	if (drba->drba_cookie->drc_resumable &&
1294	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
1295		return (SET_ERROR(ENOTSUP));
1296
1297	/*
1298	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1299	 * record to a plan WRITE record, so the pool must have the
1300	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1301	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1302	 */
1303	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1304	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1305		return (SET_ERROR(ENOTSUP));
1306	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1307	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1308		return (SET_ERROR(ENOTSUP));
1309
1310	/*
1311	 * The receiving code doesn't know how to translate large blocks
1312	 * to smaller ones, so the pool must have the LARGE_BLOCKS
1313	 * feature enabled if the stream has LARGE_BLOCKS.
1314	 */
1315	if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1316	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1317		return (SET_ERROR(ENOTSUP));
1318
1319	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1320	if (error == 0) {
1321		/* target fs already exists; recv into temp clone */
1322
1323		/* Can't recv a clone into an existing fs */
1324		if (flags & DRR_FLAG_CLONE) {
1325			dsl_dataset_rele(ds, FTAG);
1326			return (SET_ERROR(EINVAL));
1327		}
1328
1329		error = recv_begin_check_existing_impl(drba, ds, fromguid);
1330		dsl_dataset_rele(ds, FTAG);
1331	} else if (error == ENOENT) {
1332		/* target fs does not exist; must be a full backup or clone */
1333		char buf[MAXNAMELEN];
1334
1335		/*
1336		 * If it's a non-clone incremental, we are missing the
1337		 * target fs, so fail the recv.
1338		 */
1339		if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1340		    drba->drba_origin))
1341			return (SET_ERROR(ENOENT));
1342
1343		/* Open the parent of tofs */
1344		ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1345		(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1346		error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1347		if (error != 0)
1348			return (error);
1349
1350		/*
1351		 * Check filesystem and snapshot limits before receiving. We'll
1352		 * recheck snapshot limits again at the end (we create the
1353		 * filesystems and increment those counts during begin_sync).
1354		 */
1355		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1356		    ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1357		if (error != 0) {
1358			dsl_dataset_rele(ds, FTAG);
1359			return (error);
1360		}
1361
1362		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1363		    ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1364		if (error != 0) {
1365			dsl_dataset_rele(ds, FTAG);
1366			return (error);
1367		}
1368
1369		if (drba->drba_origin != NULL) {
1370			dsl_dataset_t *origin;
1371			error = dsl_dataset_hold(dp, drba->drba_origin,
1372			    FTAG, &origin);
1373			if (error != 0) {
1374				dsl_dataset_rele(ds, FTAG);
1375				return (error);
1376			}
1377			if (!origin->ds_is_snapshot) {
1378				dsl_dataset_rele(origin, FTAG);
1379				dsl_dataset_rele(ds, FTAG);
1380				return (SET_ERROR(EINVAL));
1381			}
1382			if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1383				dsl_dataset_rele(origin, FTAG);
1384				dsl_dataset_rele(ds, FTAG);
1385				return (SET_ERROR(ENODEV));
1386			}
1387			dsl_dataset_rele(origin, FTAG);
1388		}
1389		dsl_dataset_rele(ds, FTAG);
1390		error = 0;
1391	}
1392	return (error);
1393}
1394
1395static void
1396dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1397{
1398	dmu_recv_begin_arg_t *drba = arg;
1399	dsl_pool_t *dp = dmu_tx_pool(tx);
1400	objset_t *mos = dp->dp_meta_objset;
1401	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1402	const char *tofs = drba->drba_cookie->drc_tofs;
1403	dsl_dataset_t *ds, *newds;
1404	uint64_t dsobj;
1405	int error;
1406	uint64_t crflags = 0;
1407
1408	if (drrb->drr_flags & DRR_FLAG_CI_DATA)
1409		crflags |= DS_FLAG_CI_DATASET;
1410
1411	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1412	if (error == 0) {
1413		/* create temporary clone */
1414		dsl_dataset_t *snap = NULL;
1415		if (drba->drba_snapobj != 0) {
1416			VERIFY0(dsl_dataset_hold_obj(dp,
1417			    drba->drba_snapobj, FTAG, &snap));
1418		}
1419		dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1420		    snap, crflags, drba->drba_cred, tx);
1421		if (drba->drba_snapobj != 0)
1422			dsl_dataset_rele(snap, FTAG);
1423		dsl_dataset_rele(ds, FTAG);
1424	} else {
1425		dsl_dir_t *dd;
1426		const char *tail;
1427		dsl_dataset_t *origin = NULL;
1428
1429		VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1430
1431		if (drba->drba_origin != NULL) {
1432			VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1433			    FTAG, &origin));
1434		}
1435
1436		/* Create new dataset. */
1437		dsobj = dsl_dataset_create_sync(dd,
1438		    strrchr(tofs, '/') + 1,
1439		    origin, crflags, drba->drba_cred, tx);
1440		if (origin != NULL)
1441			dsl_dataset_rele(origin, FTAG);
1442		dsl_dir_rele(dd, FTAG);
1443		drba->drba_cookie->drc_newfs = B_TRUE;
1444	}
1445	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1446
1447	if (drba->drba_cookie->drc_resumable) {
1448		dsl_dataset_zapify(newds, tx);
1449		if (drrb->drr_fromguid != 0) {
1450			VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
1451			    8, 1, &drrb->drr_fromguid, tx));
1452		}
1453		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
1454		    8, 1, &drrb->drr_toguid, tx));
1455		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
1456		    1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
1457		uint64_t one = 1;
1458		uint64_t zero = 0;
1459		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
1460		    8, 1, &one, tx));
1461		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
1462		    8, 1, &zero, tx));
1463		VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
1464		    8, 1, &zero, tx));
1465		if (DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1466		    DMU_BACKUP_FEATURE_EMBED_DATA) {
1467			VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
1468			    8, 1, &one, tx));
1469		}
1470	}
1471
1472	dmu_buf_will_dirty(newds->ds_dbuf, tx);
1473	dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1474
1475	/*
1476	 * If we actually created a non-clone, we need to create the
1477	 * objset in our new dataset.
1478	 */
1479	if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1480		(void) dmu_objset_create_impl(dp->dp_spa,
1481		    newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1482	}
1483
1484	drba->drba_cookie->drc_ds = newds;
1485
1486	spa_history_log_internal_ds(newds, "receive", tx, "");
1487}
1488
1489static int
1490dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
1491{
1492	dmu_recv_begin_arg_t *drba = arg;
1493	dsl_pool_t *dp = dmu_tx_pool(tx);
1494	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1495	int error;
1496	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1497	dsl_dataset_t *ds;
1498	const char *tofs = drba->drba_cookie->drc_tofs;
1499
1500	/* already checked */
1501	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1502	ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
1503
1504	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1505	    DMU_COMPOUNDSTREAM ||
1506	    drrb->drr_type >= DMU_OST_NUMTYPES)
1507		return (SET_ERROR(EINVAL));
1508
1509	/* Verify pool version supports SA if SA_SPILL feature set */
1510	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1511	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
1512		return (SET_ERROR(ENOTSUP));
1513
1514	/*
1515	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1516	 * record to a plain WRITE record, so the pool must have the
1517	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1518	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1519	 */
1520	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1521	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1522		return (SET_ERROR(ENOTSUP));
1523	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1524	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1525		return (SET_ERROR(ENOTSUP));
1526
1527	char recvname[ZFS_MAXNAMELEN];
1528
1529	(void) snprintf(recvname, sizeof (recvname), "%s/%s",
1530	    tofs, recv_clone_name);
1531
1532	if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1533		/* %recv does not exist; continue in tofs */
1534		error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1535		if (error != 0)
1536			return (error);
1537	}
1538
1539	/* check that ds is marked inconsistent */
1540	if (!DS_IS_INCONSISTENT(ds)) {
1541		dsl_dataset_rele(ds, FTAG);
1542		return (SET_ERROR(EINVAL));
1543	}
1544
1545	/* check that there is resuming data, and that the toguid matches */
1546	if (!dsl_dataset_is_zapified(ds)) {
1547		dsl_dataset_rele(ds, FTAG);
1548		return (SET_ERROR(EINVAL));
1549	}
1550	uint64_t val;
1551	error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
1552	    DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
1553	if (error != 0 || drrb->drr_toguid != val) {
1554		dsl_dataset_rele(ds, FTAG);
1555		return (SET_ERROR(EINVAL));
1556	}
1557
1558	/*
1559	 * Check if the receive is still running.  If so, it will be owned.
1560	 * Note that nothing else can own the dataset (e.g. after the receive
1561	 * fails) because it will be marked inconsistent.
1562	 */
1563	if (dsl_dataset_has_owner(ds)) {
1564		dsl_dataset_rele(ds, FTAG);
1565		return (SET_ERROR(EBUSY));
1566	}
1567
1568	/* There should not be any snapshots of this fs yet. */
1569	if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
1570		dsl_dataset_rele(ds, FTAG);
1571		return (SET_ERROR(EINVAL));
1572	}
1573
1574	/*
1575	 * Note: resume point will be checked when we process the first WRITE
1576	 * record.
1577	 */
1578
1579	/* check that the origin matches */
1580	val = 0;
1581	(void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
1582	    DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
1583	if (drrb->drr_fromguid != val) {
1584		dsl_dataset_rele(ds, FTAG);
1585		return (SET_ERROR(EINVAL));
1586	}
1587
1588	dsl_dataset_rele(ds, FTAG);
1589	return (0);
1590}
1591
1592static void
1593dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
1594{
1595	dmu_recv_begin_arg_t *drba = arg;
1596	dsl_pool_t *dp = dmu_tx_pool(tx);
1597	const char *tofs = drba->drba_cookie->drc_tofs;
1598	dsl_dataset_t *ds;
1599	uint64_t dsobj;
1600	char recvname[ZFS_MAXNAMELEN];
1601
1602	(void) snprintf(recvname, sizeof (recvname), "%s/%s",
1603	    tofs, recv_clone_name);
1604
1605	if (dsl_dataset_hold(dp, recvname, FTAG, &ds) != 0) {
1606		/* %recv does not exist; continue in tofs */
1607		VERIFY0(dsl_dataset_hold(dp, tofs, FTAG, &ds));
1608		drba->drba_cookie->drc_newfs = B_TRUE;
1609	}
1610
1611	/* clear the inconsistent flag so that we can own it */
1612	ASSERT(DS_IS_INCONSISTENT(ds));
1613	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1614	dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
1615	dsobj = ds->ds_object;
1616	dsl_dataset_rele(ds, FTAG);
1617
1618	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &ds));
1619
1620	dmu_buf_will_dirty(ds->ds_dbuf, tx);
1621	dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
1622
1623	ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)));
1624
1625	drba->drba_cookie->drc_ds = ds;
1626
1627	spa_history_log_internal_ds(ds, "resume receive", tx, "");
1628}
1629
1630/*
1631 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1632 * succeeds; otherwise we will leak the holds on the datasets.
1633 */
1634int
1635dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
1636    boolean_t force, boolean_t resumable, char *origin, dmu_recv_cookie_t *drc)
1637{
1638	dmu_recv_begin_arg_t drba = { 0 };
1639
1640	bzero(drc, sizeof (dmu_recv_cookie_t));
1641	drc->drc_drr_begin = drr_begin;
1642	drc->drc_drrb = &drr_begin->drr_u.drr_begin;
1643	drc->drc_tosnap = tosnap;
1644	drc->drc_tofs = tofs;
1645	drc->drc_force = force;
1646	drc->drc_resumable = resumable;
1647	drc->drc_cred = CRED();
1648
1649	if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
1650		drc->drc_byteswap = B_TRUE;
1651		fletcher_4_incremental_byteswap(drr_begin,
1652		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1653		byteswap_record(drr_begin);
1654	} else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
1655		fletcher_4_incremental_native(drr_begin,
1656		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1657	} else {
1658		return (SET_ERROR(EINVAL));
1659	}
1660
1661	drba.drba_origin = origin;
1662	drba.drba_cookie = drc;
1663	drba.drba_cred = CRED();
1664
1665	if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
1666	    DMU_BACKUP_FEATURE_RESUMING) {
1667		return (dsl_sync_task(tofs,
1668		    dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
1669		    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1670	} else  {
1671		return (dsl_sync_task(tofs,
1672		    dmu_recv_begin_check, dmu_recv_begin_sync,
1673		    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1674	}
1675}
1676
1677struct receive_record_arg {
1678	dmu_replay_record_t header;
1679	void *payload; /* Pointer to a buffer containing the payload */
1680	/*
1681	 * If the record is a write, pointer to the arc_buf_t containing the
1682	 * payload.
1683	 */
1684	arc_buf_t *write_buf;
1685	int payload_size;
1686	uint64_t bytes_read; /* bytes read from stream when record created */
1687	boolean_t eos_marker; /* Marks the end of the stream */
1688	bqueue_node_t node;
1689};
1690
1691struct receive_writer_arg {
1692	objset_t *os;
1693	boolean_t byteswap;
1694	bqueue_t q;
1695
1696	/*
1697	 * These three args are used to signal to the main thread that we're
1698	 * done.
1699	 */
1700	kmutex_t mutex;
1701	kcondvar_t cv;
1702	boolean_t done;
1703
1704	int err;
1705	/* A map from guid to dataset to help handle dedup'd streams. */
1706	avl_tree_t *guid_to_ds_map;
1707	boolean_t resumable;
1708	uint64_t last_object, last_offset;
1709	uint64_t bytes_read; /* bytes read when current record created */
1710};
1711
1712struct receive_arg  {
1713	objset_t *os;
1714	kthread_t *td;
1715	struct file *fp;
1716	uint64_t voff; /* The current offset in the stream */
1717	uint64_t bytes_read;
1718	/*
1719	 * A record that has had its payload read in, but hasn't yet been handed
1720	 * off to the worker thread.
1721	 */
1722	struct receive_record_arg *rrd;
1723	/* A record that has had its header read in, but not its payload. */
1724	struct receive_record_arg *next_rrd;
1725	zio_cksum_t cksum;
1726	zio_cksum_t prev_cksum;
1727	int err;
1728	boolean_t byteswap;
1729	/* Sorted list of objects not to issue prefetches for. */
1730	list_t ignore_obj_list;
1731};
1732
1733struct receive_ign_obj_node {
1734	list_node_t node;
1735	uint64_t object;
1736};
1737
1738typedef struct guid_map_entry {
1739	uint64_t	guid;
1740	dsl_dataset_t	*gme_ds;
1741	avl_node_t	avlnode;
1742} guid_map_entry_t;
1743
1744static int
1745guid_compare(const void *arg1, const void *arg2)
1746{
1747	const guid_map_entry_t *gmep1 = arg1;
1748	const guid_map_entry_t *gmep2 = arg2;
1749
1750	if (gmep1->guid < gmep2->guid)
1751		return (-1);
1752	else if (gmep1->guid > gmep2->guid)
1753		return (1);
1754	return (0);
1755}
1756
1757static void
1758free_guid_map_onexit(void *arg)
1759{
1760	avl_tree_t *ca = arg;
1761	void *cookie = NULL;
1762	guid_map_entry_t *gmep;
1763
1764	while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1765		dsl_dataset_long_rele(gmep->gme_ds, gmep);
1766		dsl_dataset_rele(gmep->gme_ds, gmep);
1767		kmem_free(gmep, sizeof (guid_map_entry_t));
1768	}
1769	avl_destroy(ca);
1770	kmem_free(ca, sizeof (avl_tree_t));
1771}
1772
1773static int
1774restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid)
1775{
1776	struct uio auio;
1777	struct iovec aiov;
1778	int error;
1779
1780	aiov.iov_base = buf;
1781	aiov.iov_len = len;
1782	auio.uio_iov = &aiov;
1783	auio.uio_iovcnt = 1;
1784	auio.uio_resid = len;
1785	auio.uio_segflg = UIO_SYSSPACE;
1786	auio.uio_rw = UIO_READ;
1787	auio.uio_offset = off;
1788	auio.uio_td = ra->td;
1789#ifdef _KERNEL
1790	error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1791#else
1792	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1793	error = EOPNOTSUPP;
1794#endif
1795	*resid = auio.uio_resid;
1796	return (error);
1797}
1798
1799static int
1800receive_read(struct receive_arg *ra, int len, void *buf)
1801{
1802	int done = 0;
1803
1804	/* some things will require 8-byte alignment, so everything must */
1805	ASSERT0(len % 8);
1806
1807	while (done < len) {
1808		ssize_t resid;
1809
1810		ra->err = restore_bytes(ra, buf + done,
1811		    len - done, ra->voff, &resid);
1812
1813		if (resid == len - done) {
1814			/*
1815			 * Note: ECKSUM indicates that the receive
1816			 * was interrupted and can potentially be resumed.
1817			 */
1818			ra->err = SET_ERROR(ECKSUM);
1819		}
1820		ra->voff += len - done - resid;
1821		done = len - resid;
1822		if (ra->err != 0)
1823			return (ra->err);
1824	}
1825
1826	ra->bytes_read += len;
1827
1828	ASSERT3U(done, ==, len);
1829	return (0);
1830}
1831
1832static void
1833byteswap_record(dmu_replay_record_t *drr)
1834{
1835#define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1836#define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1837	drr->drr_type = BSWAP_32(drr->drr_type);
1838	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1839
1840	switch (drr->drr_type) {
1841	case DRR_BEGIN:
1842		DO64(drr_begin.drr_magic);
1843		DO64(drr_begin.drr_versioninfo);
1844		DO64(drr_begin.drr_creation_time);
1845		DO32(drr_begin.drr_type);
1846		DO32(drr_begin.drr_flags);
1847		DO64(drr_begin.drr_toguid);
1848		DO64(drr_begin.drr_fromguid);
1849		break;
1850	case DRR_OBJECT:
1851		DO64(drr_object.drr_object);
1852		DO32(drr_object.drr_type);
1853		DO32(drr_object.drr_bonustype);
1854		DO32(drr_object.drr_blksz);
1855		DO32(drr_object.drr_bonuslen);
1856		DO64(drr_object.drr_toguid);
1857		break;
1858	case DRR_FREEOBJECTS:
1859		DO64(drr_freeobjects.drr_firstobj);
1860		DO64(drr_freeobjects.drr_numobjs);
1861		DO64(drr_freeobjects.drr_toguid);
1862		break;
1863	case DRR_WRITE:
1864		DO64(drr_write.drr_object);
1865		DO32(drr_write.drr_type);
1866		DO64(drr_write.drr_offset);
1867		DO64(drr_write.drr_length);
1868		DO64(drr_write.drr_toguid);
1869		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1870		DO64(drr_write.drr_key.ddk_prop);
1871		break;
1872	case DRR_WRITE_BYREF:
1873		DO64(drr_write_byref.drr_object);
1874		DO64(drr_write_byref.drr_offset);
1875		DO64(drr_write_byref.drr_length);
1876		DO64(drr_write_byref.drr_toguid);
1877		DO64(drr_write_byref.drr_refguid);
1878		DO64(drr_write_byref.drr_refobject);
1879		DO64(drr_write_byref.drr_refoffset);
1880		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1881		    drr_key.ddk_cksum);
1882		DO64(drr_write_byref.drr_key.ddk_prop);
1883		break;
1884	case DRR_WRITE_EMBEDDED:
1885		DO64(drr_write_embedded.drr_object);
1886		DO64(drr_write_embedded.drr_offset);
1887		DO64(drr_write_embedded.drr_length);
1888		DO64(drr_write_embedded.drr_toguid);
1889		DO32(drr_write_embedded.drr_lsize);
1890		DO32(drr_write_embedded.drr_psize);
1891		break;
1892	case DRR_FREE:
1893		DO64(drr_free.drr_object);
1894		DO64(drr_free.drr_offset);
1895		DO64(drr_free.drr_length);
1896		DO64(drr_free.drr_toguid);
1897		break;
1898	case DRR_SPILL:
1899		DO64(drr_spill.drr_object);
1900		DO64(drr_spill.drr_length);
1901		DO64(drr_spill.drr_toguid);
1902		break;
1903	case DRR_END:
1904		DO64(drr_end.drr_toguid);
1905		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1906		break;
1907	}
1908
1909	if (drr->drr_type != DRR_BEGIN) {
1910		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1911	}
1912
1913#undef DO64
1914#undef DO32
1915}
1916
1917static inline uint8_t
1918deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1919{
1920	if (bonus_type == DMU_OT_SA) {
1921		return (1);
1922	} else {
1923		return (1 +
1924		    ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1925	}
1926}
1927
1928static void
1929save_resume_state(struct receive_writer_arg *rwa,
1930    uint64_t object, uint64_t offset, dmu_tx_t *tx)
1931{
1932	int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1933
1934	if (!rwa->resumable)
1935		return;
1936
1937	/*
1938	 * We use ds_resume_bytes[] != 0 to indicate that we need to
1939	 * update this on disk, so it must not be 0.
1940	 */
1941	ASSERT(rwa->bytes_read != 0);
1942
1943	/*
1944	 * We only resume from write records, which have a valid
1945	 * (non-meta-dnode) object number.
1946	 */
1947	ASSERT(object != 0);
1948
1949	/*
1950	 * For resuming to work correctly, we must receive records in order,
1951	 * sorted by object,offset.  This is checked by the callers, but
1952	 * assert it here for good measure.
1953	 */
1954	ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
1955	ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
1956	    offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
1957	ASSERT3U(rwa->bytes_read, >=,
1958	    rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
1959
1960	rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
1961	rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
1962	rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
1963}
1964
1965static int
1966receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1967    void *data)
1968{
1969	dmu_object_info_t doi;
1970	dmu_tx_t *tx;
1971	uint64_t object;
1972	int err;
1973
1974	if (drro->drr_type == DMU_OT_NONE ||
1975	    !DMU_OT_IS_VALID(drro->drr_type) ||
1976	    !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1977	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1978	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1979	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1980	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
1981	    drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1982	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1983		return (SET_ERROR(EINVAL));
1984	}
1985
1986	err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1987
1988	if (err != 0 && err != ENOENT)
1989		return (SET_ERROR(EINVAL));
1990	object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1991
1992	/*
1993	 * If we are losing blkptrs or changing the block size this must
1994	 * be a new file instance.  We must clear out the previous file
1995	 * contents before we can change this type of metadata in the dnode.
1996	 */
1997	if (err == 0) {
1998		int nblkptr;
1999
2000		nblkptr = deduce_nblkptr(drro->drr_bonustype,
2001		    drro->drr_bonuslen);
2002
2003		if (drro->drr_blksz != doi.doi_data_block_size ||
2004		    nblkptr < doi.doi_nblkptr) {
2005			err = dmu_free_long_range(rwa->os, drro->drr_object,
2006			    0, DMU_OBJECT_END);
2007			if (err != 0)
2008				return (SET_ERROR(EINVAL));
2009		}
2010	}
2011
2012	tx = dmu_tx_create(rwa->os);
2013	dmu_tx_hold_bonus(tx, object);
2014	err = dmu_tx_assign(tx, TXG_WAIT);
2015	if (err != 0) {
2016		dmu_tx_abort(tx);
2017		return (err);
2018	}
2019
2020	if (object == DMU_NEW_OBJECT) {
2021		/* currently free, want to be allocated */
2022		err = dmu_object_claim(rwa->os, drro->drr_object,
2023		    drro->drr_type, drro->drr_blksz,
2024		    drro->drr_bonustype, drro->drr_bonuslen, tx);
2025	} else if (drro->drr_type != doi.doi_type ||
2026	    drro->drr_blksz != doi.doi_data_block_size ||
2027	    drro->drr_bonustype != doi.doi_bonus_type ||
2028	    drro->drr_bonuslen != doi.doi_bonus_size) {
2029		/* currently allocated, but with different properties */
2030		err = dmu_object_reclaim(rwa->os, drro->drr_object,
2031		    drro->drr_type, drro->drr_blksz,
2032		    drro->drr_bonustype, drro->drr_bonuslen, tx);
2033	}
2034	if (err != 0) {
2035		dmu_tx_commit(tx);
2036		return (SET_ERROR(EINVAL));
2037	}
2038
2039	dmu_object_set_checksum(rwa->os, drro->drr_object,
2040	    drro->drr_checksumtype, tx);
2041	dmu_object_set_compress(rwa->os, drro->drr_object,
2042	    drro->drr_compress, tx);
2043
2044	if (data != NULL) {
2045		dmu_buf_t *db;
2046
2047		VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
2048		dmu_buf_will_dirty(db, tx);
2049
2050		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
2051		bcopy(data, db->db_data, drro->drr_bonuslen);
2052		if (rwa->byteswap) {
2053			dmu_object_byteswap_t byteswap =
2054			    DMU_OT_BYTESWAP(drro->drr_bonustype);
2055			dmu_ot_byteswap[byteswap].ob_func(db->db_data,
2056			    drro->drr_bonuslen);
2057		}
2058		dmu_buf_rele(db, FTAG);
2059	}
2060	dmu_tx_commit(tx);
2061
2062	return (0);
2063}
2064
2065/* ARGSUSED */
2066static int
2067receive_freeobjects(struct receive_writer_arg *rwa,
2068    struct drr_freeobjects *drrfo)
2069{
2070	uint64_t obj;
2071
2072	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
2073		return (SET_ERROR(EINVAL));
2074
2075	for (obj = drrfo->drr_firstobj;
2076	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
2077	    (void) dmu_object_next(rwa->os, &obj, FALSE, 0)) {
2078		int err;
2079
2080		if (dmu_object_info(rwa->os, obj, NULL) != 0)
2081			continue;
2082
2083		err = dmu_free_long_object(rwa->os, obj);
2084		if (err != 0)
2085			return (err);
2086	}
2087
2088	return (0);
2089}
2090
2091static int
2092receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
2093    arc_buf_t *abuf)
2094{
2095	dmu_tx_t *tx;
2096	int err;
2097
2098	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
2099	    !DMU_OT_IS_VALID(drrw->drr_type))
2100		return (SET_ERROR(EINVAL));
2101
2102	/*
2103	 * For resuming to work, records must be in increasing order
2104	 * by (object, offset).
2105	 */
2106	if (drrw->drr_object < rwa->last_object ||
2107	    (drrw->drr_object == rwa->last_object &&
2108	    drrw->drr_offset < rwa->last_offset)) {
2109		return (SET_ERROR(EINVAL));
2110	}
2111	rwa->last_object = drrw->drr_object;
2112	rwa->last_offset = drrw->drr_offset;
2113
2114	if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
2115		return (SET_ERROR(EINVAL));
2116
2117	tx = dmu_tx_create(rwa->os);
2118
2119	dmu_tx_hold_write(tx, drrw->drr_object,
2120	    drrw->drr_offset, drrw->drr_length);
2121	err = dmu_tx_assign(tx, TXG_WAIT);
2122	if (err != 0) {
2123		dmu_tx_abort(tx);
2124		return (err);
2125	}
2126	if (rwa->byteswap) {
2127		dmu_object_byteswap_t byteswap =
2128		    DMU_OT_BYTESWAP(drrw->drr_type);
2129		dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
2130		    drrw->drr_length);
2131	}
2132
2133	dmu_buf_t *bonus;
2134	if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
2135		return (SET_ERROR(EINVAL));
2136	dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
2137
2138	/*
2139	 * Note: If the receive fails, we want the resume stream to start
2140	 * with the same record that we last successfully received (as opposed
2141	 * to the next record), so that we can verify that we are
2142	 * resuming from the correct location.
2143	 */
2144	save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
2145	dmu_tx_commit(tx);
2146	dmu_buf_rele(bonus, FTAG);
2147
2148	return (0);
2149}
2150
2151/*
2152 * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
2153 * streams to refer to a copy of the data that is already on the
2154 * system because it came in earlier in the stream.  This function
2155 * finds the earlier copy of the data, and uses that copy instead of
2156 * data from the stream to fulfill this write.
2157 */
2158static int
2159receive_write_byref(struct receive_writer_arg *rwa,
2160    struct drr_write_byref *drrwbr)
2161{
2162	dmu_tx_t *tx;
2163	int err;
2164	guid_map_entry_t gmesrch;
2165	guid_map_entry_t *gmep;
2166	avl_index_t where;
2167	objset_t *ref_os = NULL;
2168	dmu_buf_t *dbp;
2169
2170	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
2171		return (SET_ERROR(EINVAL));
2172
2173	/*
2174	 * If the GUID of the referenced dataset is different from the
2175	 * GUID of the target dataset, find the referenced dataset.
2176	 */
2177	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
2178		gmesrch.guid = drrwbr->drr_refguid;
2179		if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
2180		    &where)) == NULL) {
2181			return (SET_ERROR(EINVAL));
2182		}
2183		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
2184			return (SET_ERROR(EINVAL));
2185	} else {
2186		ref_os = rwa->os;
2187	}
2188
2189	err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
2190	    drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
2191	if (err != 0)
2192		return (err);
2193
2194	tx = dmu_tx_create(rwa->os);
2195
2196	dmu_tx_hold_write(tx, drrwbr->drr_object,
2197	    drrwbr->drr_offset, drrwbr->drr_length);
2198	err = dmu_tx_assign(tx, TXG_WAIT);
2199	if (err != 0) {
2200		dmu_tx_abort(tx);
2201		return (err);
2202	}
2203	dmu_write(rwa->os, drrwbr->drr_object,
2204	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
2205	dmu_buf_rele(dbp, FTAG);
2206
2207	/* See comment in restore_write. */
2208	save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
2209	dmu_tx_commit(tx);
2210	return (0);
2211}
2212
2213static int
2214receive_write_embedded(struct receive_writer_arg *rwa,
2215    struct drr_write_embedded *drrwe, void *data)
2216{
2217	dmu_tx_t *tx;
2218	int err;
2219
2220	if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
2221		return (EINVAL);
2222
2223	if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
2224		return (EINVAL);
2225
2226	if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
2227		return (EINVAL);
2228	if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
2229		return (EINVAL);
2230
2231	tx = dmu_tx_create(rwa->os);
2232
2233	dmu_tx_hold_write(tx, drrwe->drr_object,
2234	    drrwe->drr_offset, drrwe->drr_length);
2235	err = dmu_tx_assign(tx, TXG_WAIT);
2236	if (err != 0) {
2237		dmu_tx_abort(tx);
2238		return (err);
2239	}
2240
2241	dmu_write_embedded(rwa->os, drrwe->drr_object,
2242	    drrwe->drr_offset, data, drrwe->drr_etype,
2243	    drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
2244	    rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
2245
2246	/* See comment in restore_write. */
2247	save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
2248	dmu_tx_commit(tx);
2249	return (0);
2250}
2251
2252static int
2253receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
2254    void *data)
2255{
2256	dmu_tx_t *tx;
2257	dmu_buf_t *db, *db_spill;
2258	int err;
2259
2260	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
2261	    drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
2262		return (SET_ERROR(EINVAL));
2263
2264	if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
2265		return (SET_ERROR(EINVAL));
2266
2267	VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
2268	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
2269		dmu_buf_rele(db, FTAG);
2270		return (err);
2271	}
2272
2273	tx = dmu_tx_create(rwa->os);
2274
2275	dmu_tx_hold_spill(tx, db->db_object);
2276
2277	err = dmu_tx_assign(tx, TXG_WAIT);
2278	if (err != 0) {
2279		dmu_buf_rele(db, FTAG);
2280		dmu_buf_rele(db_spill, FTAG);
2281		dmu_tx_abort(tx);
2282		return (err);
2283	}
2284	dmu_buf_will_dirty(db_spill, tx);
2285
2286	if (db_spill->db_size < drrs->drr_length)
2287		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
2288		    drrs->drr_length, tx));
2289	bcopy(data, db_spill->db_data, drrs->drr_length);
2290
2291	dmu_buf_rele(db, FTAG);
2292	dmu_buf_rele(db_spill, FTAG);
2293
2294	dmu_tx_commit(tx);
2295	return (0);
2296}
2297
2298/* ARGSUSED */
2299static int
2300receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
2301{
2302	int err;
2303
2304	if (drrf->drr_length != -1ULL &&
2305	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2306		return (SET_ERROR(EINVAL));
2307
2308	if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2309		return (SET_ERROR(EINVAL));
2310
2311	err = dmu_free_long_range(rwa->os, drrf->drr_object,
2312	    drrf->drr_offset, drrf->drr_length);
2313
2314	return (err);
2315}
2316
2317/* used to destroy the drc_ds on error */
2318static void
2319dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2320{
2321	if (drc->drc_resumable) {
2322		/* wait for our resume state to be written to disk */
2323		txg_wait_synced(drc->drc_ds->ds_dir->dd_pool, 0);
2324		dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2325	} else {
2326		char name[MAXNAMELEN];
2327		dsl_dataset_name(drc->drc_ds, name);
2328		dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2329		(void) dsl_destroy_head(name);
2330	}
2331}
2332
2333static void
2334receive_cksum(struct receive_arg *ra, int len, void *buf)
2335{
2336	if (ra->byteswap) {
2337		fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2338	} else {
2339		fletcher_4_incremental_native(buf, len, &ra->cksum);
2340	}
2341}
2342
2343/*
2344 * Read the payload into a buffer of size len, and update the current record's
2345 * payload field.
2346 * Allocate ra->next_rrd and read the next record's header into
2347 * ra->next_rrd->header.
2348 * Verify checksum of payload and next record.
2349 */
2350static int
2351receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2352{
2353	int err;
2354
2355	if (len != 0) {
2356		ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2357		err = receive_read(ra, len, buf);
2358		if (err != 0)
2359			return (err);
2360		receive_cksum(ra, len, buf);
2361
2362		/* note: rrd is NULL when reading the begin record's payload */
2363		if (ra->rrd != NULL) {
2364			ra->rrd->payload = buf;
2365			ra->rrd->payload_size = len;
2366			ra->rrd->bytes_read = ra->bytes_read;
2367		}
2368	}
2369
2370	ra->prev_cksum = ra->cksum;
2371
2372	ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2373	err = receive_read(ra, sizeof (ra->next_rrd->header),
2374	    &ra->next_rrd->header);
2375	ra->next_rrd->bytes_read = ra->bytes_read;
2376	if (err != 0) {
2377		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2378		ra->next_rrd = NULL;
2379		return (err);
2380	}
2381	if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2382		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2383		ra->next_rrd = NULL;
2384		return (SET_ERROR(EINVAL));
2385	}
2386
2387	/*
2388	 * Note: checksum is of everything up to but not including the
2389	 * checksum itself.
2390	 */
2391	ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2392	    ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2393	receive_cksum(ra,
2394	    offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2395	    &ra->next_rrd->header);
2396
2397	zio_cksum_t cksum_orig =
2398	    ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2399	zio_cksum_t *cksump =
2400	    &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2401
2402	if (ra->byteswap)
2403		byteswap_record(&ra->next_rrd->header);
2404
2405	if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2406	    !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2407		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2408		ra->next_rrd = NULL;
2409		return (SET_ERROR(ECKSUM));
2410	}
2411
2412	receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2413
2414	return (0);
2415}
2416
2417/*
2418 * Issue the prefetch reads for any necessary indirect blocks.
2419 *
2420 * We use the object ignore list to tell us whether or not to issue prefetches
2421 * for a given object.  We do this for both correctness (in case the blocksize
2422 * of an object has changed) and performance (if the object doesn't exist, don't
2423 * needlessly try to issue prefetches).  We also trim the list as we go through
2424 * the stream to prevent it from growing to an unbounded size.
2425 *
2426 * The object numbers within will always be in sorted order, and any write
2427 * records we see will also be in sorted order, but they're not sorted with
2428 * respect to each other (i.e. we can get several object records before
2429 * receiving each object's write records).  As a result, once we've reached a
2430 * given object number, we can safely remove any reference to lower object
2431 * numbers in the ignore list. In practice, we receive up to 32 object records
2432 * before receiving write records, so the list can have up to 32 nodes in it.
2433 */
2434/* ARGSUSED */
2435static void
2436receive_read_prefetch(struct receive_arg *ra,
2437    uint64_t object, uint64_t offset, uint64_t length)
2438{
2439	struct receive_ign_obj_node *node = list_head(&ra->ignore_obj_list);
2440	while (node != NULL && node->object < object) {
2441		VERIFY3P(node, ==, list_remove_head(&ra->ignore_obj_list));
2442		kmem_free(node, sizeof (*node));
2443		node = list_head(&ra->ignore_obj_list);
2444	}
2445	if (node == NULL || node->object > object) {
2446		dmu_prefetch(ra->os, object, 1, offset, length,
2447		    ZIO_PRIORITY_SYNC_READ);
2448	}
2449}
2450
2451/*
2452 * Read records off the stream, issuing any necessary prefetches.
2453 */
2454static int
2455receive_read_record(struct receive_arg *ra)
2456{
2457	int err;
2458
2459	switch (ra->rrd->header.drr_type) {
2460	case DRR_OBJECT:
2461	{
2462		struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2463		uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2464		void *buf = kmem_zalloc(size, KM_SLEEP);
2465		dmu_object_info_t doi;
2466		err = receive_read_payload_and_next_header(ra, size, buf);
2467		if (err != 0) {
2468			kmem_free(buf, size);
2469			return (err);
2470		}
2471		err = dmu_object_info(ra->os, drro->drr_object, &doi);
2472		/*
2473		 * See receive_read_prefetch for an explanation why we're
2474		 * storing this object in the ignore_obj_list.
2475		 */
2476		if (err == ENOENT ||
2477		    (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2478			struct receive_ign_obj_node *node =
2479			    kmem_zalloc(sizeof (*node),
2480			    KM_SLEEP);
2481			node->object = drro->drr_object;
2482#ifdef ZFS_DEBUG
2483			struct receive_ign_obj_node *last_object =
2484			    list_tail(&ra->ignore_obj_list);
2485			uint64_t last_objnum = (last_object != NULL ?
2486			    last_object->object : 0);
2487			ASSERT3U(node->object, >, last_objnum);
2488#endif
2489			list_insert_tail(&ra->ignore_obj_list, node);
2490			err = 0;
2491		}
2492		return (err);
2493	}
2494	case DRR_FREEOBJECTS:
2495	{
2496		err = receive_read_payload_and_next_header(ra, 0, NULL);
2497		return (err);
2498	}
2499	case DRR_WRITE:
2500	{
2501		struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2502		arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2503		    drrw->drr_length);
2504
2505		err = receive_read_payload_and_next_header(ra,
2506		    drrw->drr_length, abuf->b_data);
2507		if (err != 0) {
2508			dmu_return_arcbuf(abuf);
2509			return (err);
2510		}
2511		ra->rrd->write_buf = abuf;
2512		receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2513		    drrw->drr_length);
2514		return (err);
2515	}
2516	case DRR_WRITE_BYREF:
2517	{
2518		struct drr_write_byref *drrwb =
2519		    &ra->rrd->header.drr_u.drr_write_byref;
2520		err = receive_read_payload_and_next_header(ra, 0, NULL);
2521		receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2522		    drrwb->drr_length);
2523		return (err);
2524	}
2525	case DRR_WRITE_EMBEDDED:
2526	{
2527		struct drr_write_embedded *drrwe =
2528		    &ra->rrd->header.drr_u.drr_write_embedded;
2529		uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2530		void *buf = kmem_zalloc(size, KM_SLEEP);
2531
2532		err = receive_read_payload_and_next_header(ra, size, buf);
2533		if (err != 0) {
2534			kmem_free(buf, size);
2535			return (err);
2536		}
2537
2538		receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2539		    drrwe->drr_length);
2540		return (err);
2541	}
2542	case DRR_FREE:
2543	{
2544		/*
2545		 * It might be beneficial to prefetch indirect blocks here, but
2546		 * we don't really have the data to decide for sure.
2547		 */
2548		err = receive_read_payload_and_next_header(ra, 0, NULL);
2549		return (err);
2550	}
2551	case DRR_END:
2552	{
2553		struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2554		if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2555			return (SET_ERROR(ECKSUM));
2556		return (0);
2557	}
2558	case DRR_SPILL:
2559	{
2560		struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2561		void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2562		err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2563		    buf);
2564		if (err != 0)
2565			kmem_free(buf, drrs->drr_length);
2566		return (err);
2567	}
2568	default:
2569		return (SET_ERROR(EINVAL));
2570	}
2571}
2572
2573/*
2574 * Commit the records to the pool.
2575 */
2576static int
2577receive_process_record(struct receive_writer_arg *rwa,
2578    struct receive_record_arg *rrd)
2579{
2580	int err;
2581
2582	/* Processing in order, therefore bytes_read should be increasing. */
2583	ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
2584	rwa->bytes_read = rrd->bytes_read;
2585
2586	switch (rrd->header.drr_type) {
2587	case DRR_OBJECT:
2588	{
2589		struct drr_object *drro = &rrd->header.drr_u.drr_object;
2590		err = receive_object(rwa, drro, rrd->payload);
2591		kmem_free(rrd->payload, rrd->payload_size);
2592		rrd->payload = NULL;
2593		return (err);
2594	}
2595	case DRR_FREEOBJECTS:
2596	{
2597		struct drr_freeobjects *drrfo =
2598		    &rrd->header.drr_u.drr_freeobjects;
2599		return (receive_freeobjects(rwa, drrfo));
2600	}
2601	case DRR_WRITE:
2602	{
2603		struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2604		err = receive_write(rwa, drrw, rrd->write_buf);
2605		/* if receive_write() is successful, it consumes the arc_buf */
2606		if (err != 0)
2607			dmu_return_arcbuf(rrd->write_buf);
2608		rrd->write_buf = NULL;
2609		rrd->payload = NULL;
2610		return (err);
2611	}
2612	case DRR_WRITE_BYREF:
2613	{
2614		struct drr_write_byref *drrwbr =
2615		    &rrd->header.drr_u.drr_write_byref;
2616		return (receive_write_byref(rwa, drrwbr));
2617	}
2618	case DRR_WRITE_EMBEDDED:
2619	{
2620		struct drr_write_embedded *drrwe =
2621		    &rrd->header.drr_u.drr_write_embedded;
2622		err = receive_write_embedded(rwa, drrwe, rrd->payload);
2623		kmem_free(rrd->payload, rrd->payload_size);
2624		rrd->payload = NULL;
2625		return (err);
2626	}
2627	case DRR_FREE:
2628	{
2629		struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2630		return (receive_free(rwa, drrf));
2631	}
2632	case DRR_SPILL:
2633	{
2634		struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2635		err = receive_spill(rwa, drrs, rrd->payload);
2636		kmem_free(rrd->payload, rrd->payload_size);
2637		rrd->payload = NULL;
2638		return (err);
2639	}
2640	default:
2641		return (SET_ERROR(EINVAL));
2642	}
2643}
2644
2645/*
2646 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2647 * receive_process_record  When we're done, signal the main thread and exit.
2648 */
2649static void
2650receive_writer_thread(void *arg)
2651{
2652	struct receive_writer_arg *rwa = arg;
2653	struct receive_record_arg *rrd;
2654	for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2655	    rrd = bqueue_dequeue(&rwa->q)) {
2656		/*
2657		 * If there's an error, the main thread will stop putting things
2658		 * on the queue, but we need to clear everything in it before we
2659		 * can exit.
2660		 */
2661		if (rwa->err == 0) {
2662			rwa->err = receive_process_record(rwa, rrd);
2663		} else if (rrd->write_buf != NULL) {
2664			dmu_return_arcbuf(rrd->write_buf);
2665			rrd->write_buf = NULL;
2666			rrd->payload = NULL;
2667		} else if (rrd->payload != NULL) {
2668			kmem_free(rrd->payload, rrd->payload_size);
2669			rrd->payload = NULL;
2670		}
2671		kmem_free(rrd, sizeof (*rrd));
2672	}
2673	kmem_free(rrd, sizeof (*rrd));
2674	mutex_enter(&rwa->mutex);
2675	rwa->done = B_TRUE;
2676	cv_signal(&rwa->cv);
2677	mutex_exit(&rwa->mutex);
2678	thread_exit();
2679}
2680
2681static int
2682resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
2683{
2684	uint64_t val;
2685	objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
2686	uint64_t dsobj = dmu_objset_id(ra->os);
2687	uint64_t resume_obj, resume_off;
2688
2689	if (nvlist_lookup_uint64(begin_nvl,
2690	    "resume_object", &resume_obj) != 0 ||
2691	    nvlist_lookup_uint64(begin_nvl,
2692	    "resume_offset", &resume_off) != 0) {
2693		return (SET_ERROR(EINVAL));
2694	}
2695	VERIFY0(zap_lookup(mos, dsobj,
2696	    DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
2697	if (resume_obj != val)
2698		return (SET_ERROR(EINVAL));
2699	VERIFY0(zap_lookup(mos, dsobj,
2700	    DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
2701	if (resume_off != val)
2702		return (SET_ERROR(EINVAL));
2703
2704	return (0);
2705}
2706
2707
2708/*
2709 * Read in the stream's records, one by one, and apply them to the pool.  There
2710 * are two threads involved; the thread that calls this function will spin up a
2711 * worker thread, read the records off the stream one by one, and issue
2712 * prefetches for any necessary indirect blocks.  It will then push the records
2713 * onto an internal blocking queue.  The worker thread will pull the records off
2714 * the queue, and actually write the data into the DMU.  This way, the worker
2715 * thread doesn't have to wait for reads to complete, since everything it needs
2716 * (the indirect blocks) will be prefetched.
2717 *
2718 * NB: callers *must* call dmu_recv_end() if this succeeds.
2719 */
2720int
2721dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
2722    int cleanup_fd, uint64_t *action_handlep)
2723{
2724	int err = 0;
2725	struct receive_arg ra = { 0 };
2726	struct receive_writer_arg rwa = { 0 };
2727	int featureflags;
2728	nvlist_t *begin_nvl = NULL;
2729
2730	ra.byteswap = drc->drc_byteswap;
2731	ra.cksum = drc->drc_cksum;
2732	ra.td = curthread;
2733	ra.fp = fp;
2734	ra.voff = *voffp;
2735
2736	if (dsl_dataset_is_zapified(drc->drc_ds)) {
2737		(void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
2738		    drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
2739		    sizeof (ra.bytes_read), 1, &ra.bytes_read);
2740	}
2741
2742	list_create(&ra.ignore_obj_list, sizeof (struct receive_ign_obj_node),
2743	    offsetof(struct receive_ign_obj_node, node));
2744
2745	/* these were verified in dmu_recv_begin */
2746	ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2747	    DMU_SUBSTREAM);
2748	ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2749
2750	/*
2751	 * Open the objset we are modifying.
2752	 */
2753	VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2754
2755	ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2756
2757	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2758
2759	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
2760	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2761		minor_t minor;
2762
2763		if (cleanup_fd == -1) {
2764			ra.err = SET_ERROR(EBADF);
2765			goto out;
2766		}
2767		ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2768		if (ra.err != 0) {
2769			cleanup_fd = -1;
2770			goto out;
2771		}
2772
2773		if (*action_handlep == 0) {
2774			rwa.guid_to_ds_map =
2775			    kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2776			avl_create(rwa.guid_to_ds_map, guid_compare,
2777			    sizeof (guid_map_entry_t),
2778			    offsetof(guid_map_entry_t, avlnode));
2779			err = zfs_onexit_add_cb(minor,
2780			    free_guid_map_onexit, rwa.guid_to_ds_map,
2781			    action_handlep);
2782			if (ra.err != 0)
2783				goto out;
2784		} else {
2785			err = zfs_onexit_cb_data(minor, *action_handlep,
2786			    (void **)&rwa.guid_to_ds_map);
2787			if (ra.err != 0)
2788				goto out;
2789		}
2790
2791		drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
2792	}
2793
2794	uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
2795	void *payload = NULL;
2796	if (payloadlen != 0)
2797		payload = kmem_alloc(payloadlen, KM_SLEEP);
2798
2799	err = receive_read_payload_and_next_header(&ra, payloadlen, payload);
2800	if (err != 0) {
2801		if (payloadlen != 0)
2802			kmem_free(payload, payloadlen);
2803		goto out;
2804	}
2805	if (payloadlen != 0) {
2806		err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
2807		kmem_free(payload, payloadlen);
2808		if (err != 0)
2809			goto out;
2810	}
2811
2812	if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
2813		err = resume_check(&ra, begin_nvl);
2814		if (err != 0)
2815			goto out;
2816	}
2817
2818	(void) bqueue_init(&rwa.q, zfs_recv_queue_length,
2819	    offsetof(struct receive_record_arg, node));
2820	cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
2821	mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
2822	rwa.os = ra.os;
2823	rwa.byteswap = drc->drc_byteswap;
2824	rwa.resumable = drc->drc_resumable;
2825
2826	(void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, &p0,
2827	    TS_RUN, minclsyspri);
2828	/*
2829	 * We're reading rwa.err without locks, which is safe since we are the
2830	 * only reader, and the worker thread is the only writer.  It's ok if we
2831	 * miss a write for an iteration or two of the loop, since the writer
2832	 * thread will keep freeing records we send it until we send it an eos
2833	 * marker.
2834	 *
2835	 * We can leave this loop in 3 ways:  First, if rwa.err is
2836	 * non-zero.  In that case, the writer thread will free the rrd we just
2837	 * pushed.  Second, if  we're interrupted; in that case, either it's the
2838	 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2839	 * has been handed off to the writer thread who will free it.  Finally,
2840	 * if receive_read_record fails or we're at the end of the stream, then
2841	 * we free ra.rrd and exit.
2842	 */
2843	while (rwa.err == 0) {
2844		if (issig(JUSTLOOKING) && issig(FORREAL)) {
2845			err = SET_ERROR(EINTR);
2846			break;
2847		}
2848
2849		ASSERT3P(ra.rrd, ==, NULL);
2850		ra.rrd = ra.next_rrd;
2851		ra.next_rrd = NULL;
2852		/* Allocates and loads header into ra.next_rrd */
2853		err = receive_read_record(&ra);
2854
2855		if (ra.rrd->header.drr_type == DRR_END || err != 0) {
2856			kmem_free(ra.rrd, sizeof (*ra.rrd));
2857			ra.rrd = NULL;
2858			break;
2859		}
2860
2861		bqueue_enqueue(&rwa.q, ra.rrd,
2862		    sizeof (struct receive_record_arg) + ra.rrd->payload_size);
2863		ra.rrd = NULL;
2864	}
2865	if (ra.next_rrd == NULL)
2866		ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
2867	ra.next_rrd->eos_marker = B_TRUE;
2868	bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
2869
2870	mutex_enter(&rwa.mutex);
2871	while (!rwa.done) {
2872		cv_wait(&rwa.cv, &rwa.mutex);
2873	}
2874	mutex_exit(&rwa.mutex);
2875
2876	cv_destroy(&rwa.cv);
2877	mutex_destroy(&rwa.mutex);
2878	bqueue_destroy(&rwa.q);
2879	if (err == 0)
2880		err = rwa.err;
2881
2882out:
2883	nvlist_free(begin_nvl);
2884	if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2885		zfs_onexit_fd_rele(cleanup_fd);
2886
2887	if (err != 0) {
2888		/*
2889		 * Clean up references. If receive is not resumable,
2890		 * destroy what we created, so we don't leave it in
2891		 * the inconsistent state.
2892		 */
2893		dmu_recv_cleanup_ds(drc);
2894	}
2895
2896	*voffp = ra.voff;
2897	for (struct receive_ign_obj_node *n =
2898	    list_remove_head(&ra.ignore_obj_list); n != NULL;
2899	    n = list_remove_head(&ra.ignore_obj_list)) {
2900		kmem_free(n, sizeof (*n));
2901	}
2902	list_destroy(&ra.ignore_obj_list);
2903	return (err);
2904}
2905
2906static int
2907dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2908{
2909	dmu_recv_cookie_t *drc = arg;
2910	dsl_pool_t *dp = dmu_tx_pool(tx);
2911	int error;
2912
2913	ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2914
2915	if (!drc->drc_newfs) {
2916		dsl_dataset_t *origin_head;
2917
2918		error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2919		if (error != 0)
2920			return (error);
2921		if (drc->drc_force) {
2922			/*
2923			 * We will destroy any snapshots in tofs (i.e. before
2924			 * origin_head) that are after the origin (which is
2925			 * the snap before drc_ds, because drc_ds can not
2926			 * have any snaps of its own).
2927			 */
2928			uint64_t obj;
2929
2930			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2931			while (obj !=
2932			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2933				dsl_dataset_t *snap;
2934				error = dsl_dataset_hold_obj(dp, obj, FTAG,
2935				    &snap);
2936				if (error != 0)
2937					break;
2938				if (snap->ds_dir != origin_head->ds_dir)
2939					error = SET_ERROR(EINVAL);
2940				if (error == 0)  {
2941					error = dsl_destroy_snapshot_check_impl(
2942					    snap, B_FALSE);
2943				}
2944				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2945				dsl_dataset_rele(snap, FTAG);
2946				if (error != 0)
2947					break;
2948			}
2949			if (error != 0) {
2950				dsl_dataset_rele(origin_head, FTAG);
2951				return (error);
2952			}
2953		}
2954		error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2955		    origin_head, drc->drc_force, drc->drc_owner, tx);
2956		if (error != 0) {
2957			dsl_dataset_rele(origin_head, FTAG);
2958			return (error);
2959		}
2960		error = dsl_dataset_snapshot_check_impl(origin_head,
2961		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2962		dsl_dataset_rele(origin_head, FTAG);
2963		if (error != 0)
2964			return (error);
2965
2966		error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2967	} else {
2968		error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2969		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2970	}
2971	return (error);
2972}
2973
2974static void
2975dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2976{
2977	dmu_recv_cookie_t *drc = arg;
2978	dsl_pool_t *dp = dmu_tx_pool(tx);
2979
2980	spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2981	    tx, "snap=%s", drc->drc_tosnap);
2982
2983	if (!drc->drc_newfs) {
2984		dsl_dataset_t *origin_head;
2985
2986		VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2987		    &origin_head));
2988
2989		if (drc->drc_force) {
2990			/*
2991			 * Destroy any snapshots of drc_tofs (origin_head)
2992			 * after the origin (the snap before drc_ds).
2993			 */
2994			uint64_t obj;
2995
2996			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2997			while (obj !=
2998			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2999				dsl_dataset_t *snap;
3000				VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
3001				    &snap));
3002				ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
3003				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
3004				dsl_destroy_snapshot_sync_impl(snap,
3005				    B_FALSE, tx);
3006				dsl_dataset_rele(snap, FTAG);
3007			}
3008		}
3009		VERIFY3P(drc->drc_ds->ds_prev, ==,
3010		    origin_head->ds_prev);
3011
3012		dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
3013		    origin_head, tx);
3014		dsl_dataset_snapshot_sync_impl(origin_head,
3015		    drc->drc_tosnap, tx);
3016
3017		/* set snapshot's creation time and guid */
3018		dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
3019		dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
3020		    drc->drc_drrb->drr_creation_time;
3021		dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
3022		    drc->drc_drrb->drr_toguid;
3023		dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
3024		    ~DS_FLAG_INCONSISTENT;
3025
3026		dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
3027		dsl_dataset_phys(origin_head)->ds_flags &=
3028		    ~DS_FLAG_INCONSISTENT;
3029
3030		dsl_dataset_rele(origin_head, FTAG);
3031		dsl_destroy_head_sync_impl(drc->drc_ds, tx);
3032
3033		if (drc->drc_owner != NULL)
3034			VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
3035	} else {
3036		dsl_dataset_t *ds = drc->drc_ds;
3037
3038		dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
3039
3040		/* set snapshot's creation time and guid */
3041		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
3042		dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
3043		    drc->drc_drrb->drr_creation_time;
3044		dsl_dataset_phys(ds->ds_prev)->ds_guid =
3045		    drc->drc_drrb->drr_toguid;
3046		dsl_dataset_phys(ds->ds_prev)->ds_flags &=
3047		    ~DS_FLAG_INCONSISTENT;
3048
3049		dmu_buf_will_dirty(ds->ds_dbuf, tx);
3050		dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
3051		if (dsl_dataset_has_resume_receive_state(ds)) {
3052			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3053			    DS_FIELD_RESUME_FROMGUID, tx);
3054			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3055			    DS_FIELD_RESUME_OBJECT, tx);
3056			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3057			    DS_FIELD_RESUME_OFFSET, tx);
3058			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3059			    DS_FIELD_RESUME_BYTES, tx);
3060			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3061			    DS_FIELD_RESUME_TOGUID, tx);
3062			(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
3063			    DS_FIELD_RESUME_TONAME, tx);
3064		}
3065	}
3066	drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
3067	/*
3068	 * Release the hold from dmu_recv_begin.  This must be done before
3069	 * we return to open context, so that when we free the dataset's dnode,
3070	 * we can evict its bonus buffer.
3071	 */
3072	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
3073	drc->drc_ds = NULL;
3074}
3075
3076static int
3077add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
3078{
3079	dsl_pool_t *dp;
3080	dsl_dataset_t *snapds;
3081	guid_map_entry_t *gmep;
3082	int err;
3083
3084	ASSERT(guid_map != NULL);
3085
3086	err = dsl_pool_hold(name, FTAG, &dp);
3087	if (err != 0)
3088		return (err);
3089	gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
3090	err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
3091	if (err == 0) {
3092		gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
3093		gmep->gme_ds = snapds;
3094		avl_add(guid_map, gmep);
3095		dsl_dataset_long_hold(snapds, gmep);
3096	} else
3097		kmem_free(gmep, sizeof (*gmep));
3098
3099	dsl_pool_rele(dp, FTAG);
3100	return (err);
3101}
3102
3103static int dmu_recv_end_modified_blocks = 3;
3104
3105static int
3106dmu_recv_existing_end(dmu_recv_cookie_t *drc)
3107{
3108	int error;
3109	char name[MAXNAMELEN];
3110
3111#ifdef _KERNEL
3112	/*
3113	 * We will be destroying the ds; make sure its origin is unmounted if
3114	 * necessary.
3115	 */
3116	dsl_dataset_name(drc->drc_ds, name);
3117	zfs_destroy_unmount_origin(name);
3118#endif
3119
3120	error = dsl_sync_task(drc->drc_tofs,
3121	    dmu_recv_end_check, dmu_recv_end_sync, drc,
3122	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
3123
3124	if (error != 0)
3125		dmu_recv_cleanup_ds(drc);
3126	return (error);
3127}
3128
3129static int
3130dmu_recv_new_end(dmu_recv_cookie_t *drc)
3131{
3132	int error;
3133
3134	error = dsl_sync_task(drc->drc_tofs,
3135	    dmu_recv_end_check, dmu_recv_end_sync, drc,
3136	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
3137
3138	if (error != 0) {
3139		dmu_recv_cleanup_ds(drc);
3140	} else if (drc->drc_guid_to_ds_map != NULL) {
3141		(void) add_ds_to_guidmap(drc->drc_tofs,
3142		    drc->drc_guid_to_ds_map,
3143		    drc->drc_newsnapobj);
3144	}
3145	return (error);
3146}
3147
3148int
3149dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
3150{
3151	drc->drc_owner = owner;
3152
3153	if (drc->drc_newfs)
3154		return (dmu_recv_new_end(drc));
3155	else
3156		return (dmu_recv_existing_end(drc));
3157}
3158
3159/*
3160 * Return TRUE if this objset is currently being received into.
3161 */
3162boolean_t
3163dmu_objset_is_receiving(objset_t *os)
3164{
3165	return (os->os_dsl_dataset != NULL &&
3166	    os->os_dsl_dataset->ds_owner == dmu_recv_tag);
3167}
3168