dmu_send.c revision 284313
138032Speter/*
238032Speter * CDDL HEADER START
364562Sgshapiro *
464562Sgshapiro * The contents of this file are subject to the terms of the
538032Speter * Common Development and Distribution License (the "License").
638032Speter * You may not use this file except in compliance with the License.
738032Speter *
838032Speter * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
938032Speter * or http://www.opensolaris.org/os/licensing.
1038032Speter * See the License for the specific language governing permissions
1138032Speter * and limitations under the License.
1238032Speter *
1338032Speter * When distributing Covered Code, include this CDDL HEADER in each
1438032Speter * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
1538032Speter * If applicable, add the following below this CDDL HEADER, with the
1638032Speter * fields enclosed by brackets "[]" replaced with your own identifying
1738032Speter * information: Portions Copyright [yyyy] [name of copyright owner]
1838032Speter *
1938032Speter * CDDL HEADER END
2038032Speter */
2138032Speter/*
2238032Speter * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
2338032Speter * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
2438032Speter * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
2538032Speter * Copyright (c) 2014, Joyent, Inc. All rights reserved.
2638032Speter * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
2798121Sgshapiro * Copyright 2014 HybridCluster. All rights reserved.
2838032Speter */
2938032Speter
3038032Speter#include <sys/dmu.h>
3138032Speter#include <sys/dmu_impl.h>
3238032Speter#include <sys/dmu_tx.h>
3338032Speter#include <sys/dbuf.h>
3438032Speter#include <sys/dnode.h>
3538032Speter#include <sys/zfs_context.h>
3638032Speter#include <sys/dmu_objset.h>
3738032Speter#include <sys/dmu_traverse.h>
3838032Speter#include <sys/dsl_dataset.h>
3938032Speter#include <sys/dsl_dir.h>
4038032Speter#include <sys/dsl_prop.h>
4138032Speter#include <sys/dsl_pool.h>
4238032Speter#include <sys/dsl_synctask.h>
43#include <sys/zfs_ioctl.h>
44#include <sys/zap.h>
45#include <sys/zio_checksum.h>
46#include <sys/zfs_znode.h>
47#include <zfs_fletcher.h>
48#include <sys/avl.h>
49#include <sys/ddt.h>
50#include <sys/zfs_onexit.h>
51#include <sys/dmu_send.h>
52#include <sys/dsl_destroy.h>
53#include <sys/blkptr.h>
54#include <sys/dsl_bookmark.h>
55#include <sys/zfeature.h>
56
57#ifdef __FreeBSD__
58#undef dump_write
59#define dump_write dmu_dump_write
60#endif
61
62/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
63int zfs_send_corrupt_data = B_FALSE;
64
65static char *dmu_recv_tag = "dmu_recv_tag";
66static const char *recv_clone_name = "%recv";
67
68static int
69dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
70{
71	dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
72	struct uio auio;
73	struct iovec aiov;
74	ASSERT0(len % 8);
75
76	fletcher_4_incremental_native(buf, len, &dsp->dsa_zc);
77	aiov.iov_base = buf;
78	aiov.iov_len = len;
79	auio.uio_iov = &aiov;
80	auio.uio_iovcnt = 1;
81	auio.uio_resid = len;
82	auio.uio_segflg = UIO_SYSSPACE;
83	auio.uio_rw = UIO_WRITE;
84	auio.uio_offset = (off_t)-1;
85	auio.uio_td = dsp->dsa_td;
86#ifdef _KERNEL
87	if (dsp->dsa_fp->f_type == DTYPE_VNODE)
88		bwillwrite();
89	dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
90	    dsp->dsa_td);
91#else
92	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
93	dsp->dsa_err = EOPNOTSUPP;
94#endif
95	mutex_enter(&ds->ds_sendstream_lock);
96	*dsp->dsa_off += len;
97	mutex_exit(&ds->ds_sendstream_lock);
98
99	return (dsp->dsa_err);
100}
101
102static int
103dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
104    uint64_t length)
105{
106	struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
107
108	/*
109	 * When we receive a free record, dbuf_free_range() assumes
110	 * that the receiving system doesn't have any dbufs in the range
111	 * being freed.  This is always true because there is a one-record
112	 * constraint: we only send one WRITE record for any given
113	 * object+offset.  We know that the one-record constraint is
114	 * true because we always send data in increasing order by
115	 * object,offset.
116	 *
117	 * If the increasing-order constraint ever changes, we should find
118	 * another way to assert that the one-record constraint is still
119	 * satisfied.
120	 */
121	ASSERT(object > dsp->dsa_last_data_object ||
122	    (object == dsp->dsa_last_data_object &&
123	    offset > dsp->dsa_last_data_offset));
124
125	/*
126	 * If we are doing a non-incremental send, then there can't
127	 * be any data in the dataset we're receiving into.  Therefore
128	 * a free record would simply be a no-op.  Save space by not
129	 * sending it to begin with.
130	 */
131	if (!dsp->dsa_incremental)
132		return (0);
133
134	if (length != -1ULL && offset + length < offset)
135		length = -1ULL;
136
137	/*
138	 * If there is a pending op, but it's not PENDING_FREE, push it out,
139	 * since free block aggregation can only be done for blocks of the
140	 * same type (i.e., DRR_FREE records can only be aggregated with
141	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
142	 * aggregated with other DRR_FREEOBJECTS records.
143	 */
144	if (dsp->dsa_pending_op != PENDING_NONE &&
145	    dsp->dsa_pending_op != PENDING_FREE) {
146		if (dump_bytes(dsp, dsp->dsa_drr,
147		    sizeof (dmu_replay_record_t)) != 0)
148			return (SET_ERROR(EINTR));
149		dsp->dsa_pending_op = PENDING_NONE;
150	}
151
152	if (dsp->dsa_pending_op == PENDING_FREE) {
153		/*
154		 * There should never be a PENDING_FREE if length is -1
155		 * (because dump_dnode is the only place where this
156		 * function is called with a -1, and only after flushing
157		 * any pending record).
158		 */
159		ASSERT(length != -1ULL);
160		/*
161		 * Check to see whether this free block can be aggregated
162		 * with pending one.
163		 */
164		if (drrf->drr_object == object && drrf->drr_offset +
165		    drrf->drr_length == offset) {
166			drrf->drr_length += length;
167			return (0);
168		} else {
169			/* not a continuation.  Push out pending record */
170			if (dump_bytes(dsp, dsp->dsa_drr,
171			    sizeof (dmu_replay_record_t)) != 0)
172				return (SET_ERROR(EINTR));
173			dsp->dsa_pending_op = PENDING_NONE;
174		}
175	}
176	/* create a FREE record and make it pending */
177	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
178	dsp->dsa_drr->drr_type = DRR_FREE;
179	drrf->drr_object = object;
180	drrf->drr_offset = offset;
181	drrf->drr_length = length;
182	drrf->drr_toguid = dsp->dsa_toguid;
183	if (length == -1ULL) {
184		if (dump_bytes(dsp, dsp->dsa_drr,
185		    sizeof (dmu_replay_record_t)) != 0)
186			return (SET_ERROR(EINTR));
187	} else {
188		dsp->dsa_pending_op = PENDING_FREE;
189	}
190
191	return (0);
192}
193
194static int
195dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
196    uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
197{
198	struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
199
200	/*
201	 * We send data in increasing object, offset order.
202	 * See comment in dump_free() for details.
203	 */
204	ASSERT(object > dsp->dsa_last_data_object ||
205	    (object == dsp->dsa_last_data_object &&
206	    offset > dsp->dsa_last_data_offset));
207	dsp->dsa_last_data_object = object;
208	dsp->dsa_last_data_offset = offset + blksz - 1;
209
210	/*
211	 * If there is any kind of pending aggregation (currently either
212	 * a grouping of free objects or free blocks), push it out to
213	 * the stream, since aggregation can't be done across operations
214	 * of different types.
215	 */
216	if (dsp->dsa_pending_op != PENDING_NONE) {
217		if (dump_bytes(dsp, dsp->dsa_drr,
218		    sizeof (dmu_replay_record_t)) != 0)
219			return (SET_ERROR(EINTR));
220		dsp->dsa_pending_op = PENDING_NONE;
221	}
222	/* write a DATA record */
223	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
224	dsp->dsa_drr->drr_type = DRR_WRITE;
225	drrw->drr_object = object;
226	drrw->drr_type = type;
227	drrw->drr_offset = offset;
228	drrw->drr_length = blksz;
229	drrw->drr_toguid = dsp->dsa_toguid;
230	if (bp == NULL || BP_IS_EMBEDDED(bp)) {
231		/*
232		 * There's no pre-computed checksum for partial-block
233		 * writes or embedded BP's, so (like
234		 * fletcher4-checkummed blocks) userland will have to
235		 * compute a dedup-capable checksum itself.
236		 */
237		drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
238	} else {
239		drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
240		if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
241			drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
242		DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
243		DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
244		DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
245		drrw->drr_key.ddk_cksum = bp->blk_cksum;
246	}
247
248	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
249		return (SET_ERROR(EINTR));
250	if (dump_bytes(dsp, data, blksz) != 0)
251		return (SET_ERROR(EINTR));
252	return (0);
253}
254
255static int
256dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
257    int blksz, const blkptr_t *bp)
258{
259	char buf[BPE_PAYLOAD_SIZE];
260	struct drr_write_embedded *drrw =
261	    &(dsp->dsa_drr->drr_u.drr_write_embedded);
262
263	if (dsp->dsa_pending_op != PENDING_NONE) {
264		if (dump_bytes(dsp, dsp->dsa_drr,
265		    sizeof (dmu_replay_record_t)) != 0)
266			return (EINTR);
267		dsp->dsa_pending_op = PENDING_NONE;
268	}
269
270	ASSERT(BP_IS_EMBEDDED(bp));
271
272	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
273	dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
274	drrw->drr_object = object;
275	drrw->drr_offset = offset;
276	drrw->drr_length = blksz;
277	drrw->drr_toguid = dsp->dsa_toguid;
278	drrw->drr_compression = BP_GET_COMPRESS(bp);
279	drrw->drr_etype = BPE_GET_ETYPE(bp);
280	drrw->drr_lsize = BPE_GET_LSIZE(bp);
281	drrw->drr_psize = BPE_GET_PSIZE(bp);
282
283	decode_embedded_bp_compressed(bp, buf);
284
285	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
286		return (EINTR);
287	if (dump_bytes(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
288		return (EINTR);
289	return (0);
290}
291
292static int
293dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
294{
295	struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
296
297	if (dsp->dsa_pending_op != PENDING_NONE) {
298		if (dump_bytes(dsp, dsp->dsa_drr,
299		    sizeof (dmu_replay_record_t)) != 0)
300			return (SET_ERROR(EINTR));
301		dsp->dsa_pending_op = PENDING_NONE;
302	}
303
304	/* write a SPILL record */
305	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
306	dsp->dsa_drr->drr_type = DRR_SPILL;
307	drrs->drr_object = object;
308	drrs->drr_length = blksz;
309	drrs->drr_toguid = dsp->dsa_toguid;
310
311	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
312		return (SET_ERROR(EINTR));
313	if (dump_bytes(dsp, data, blksz))
314		return (SET_ERROR(EINTR));
315	return (0);
316}
317
318static int
319dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
320{
321	struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
322
323	/* See comment in dump_free(). */
324	if (!dsp->dsa_incremental)
325		return (0);
326
327	/*
328	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
329	 * push it out, since free block aggregation can only be done for
330	 * blocks of the same type (i.e., DRR_FREE records can only be
331	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
332	 * can only be aggregated with other DRR_FREEOBJECTS records.
333	 */
334	if (dsp->dsa_pending_op != PENDING_NONE &&
335	    dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
336		if (dump_bytes(dsp, dsp->dsa_drr,
337		    sizeof (dmu_replay_record_t)) != 0)
338			return (SET_ERROR(EINTR));
339		dsp->dsa_pending_op = PENDING_NONE;
340	}
341	if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
342		/*
343		 * See whether this free object array can be aggregated
344		 * with pending one
345		 */
346		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
347			drrfo->drr_numobjs += numobjs;
348			return (0);
349		} else {
350			/* can't be aggregated.  Push out pending record */
351			if (dump_bytes(dsp, dsp->dsa_drr,
352			    sizeof (dmu_replay_record_t)) != 0)
353				return (SET_ERROR(EINTR));
354			dsp->dsa_pending_op = PENDING_NONE;
355		}
356	}
357
358	/* write a FREEOBJECTS record */
359	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
360	dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
361	drrfo->drr_firstobj = firstobj;
362	drrfo->drr_numobjs = numobjs;
363	drrfo->drr_toguid = dsp->dsa_toguid;
364
365	dsp->dsa_pending_op = PENDING_FREEOBJECTS;
366
367	return (0);
368}
369
370static int
371dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
372{
373	struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
374
375	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
376		return (dump_freeobjects(dsp, object, 1));
377
378	if (dsp->dsa_pending_op != PENDING_NONE) {
379		if (dump_bytes(dsp, dsp->dsa_drr,
380		    sizeof (dmu_replay_record_t)) != 0)
381			return (SET_ERROR(EINTR));
382		dsp->dsa_pending_op = PENDING_NONE;
383	}
384
385	/* write an OBJECT record */
386	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
387	dsp->dsa_drr->drr_type = DRR_OBJECT;
388	drro->drr_object = object;
389	drro->drr_type = dnp->dn_type;
390	drro->drr_bonustype = dnp->dn_bonustype;
391	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
392	drro->drr_bonuslen = dnp->dn_bonuslen;
393	drro->drr_checksumtype = dnp->dn_checksum;
394	drro->drr_compress = dnp->dn_compress;
395	drro->drr_toguid = dsp->dsa_toguid;
396
397	if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
398	    drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
399		drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
400
401	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
402		return (SET_ERROR(EINTR));
403
404	if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
405		return (SET_ERROR(EINTR));
406
407	/* Free anything past the end of the file. */
408	if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
409	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
410		return (SET_ERROR(EINTR));
411	if (dsp->dsa_err != 0)
412		return (SET_ERROR(EINTR));
413	return (0);
414}
415
416static boolean_t
417backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
418{
419	if (!BP_IS_EMBEDDED(bp))
420		return (B_FALSE);
421
422	/*
423	 * Compression function must be legacy, or explicitly enabled.
424	 */
425	if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
426	    !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
427		return (B_FALSE);
428
429	/*
430	 * Embed type must be explicitly enabled.
431	 */
432	switch (BPE_GET_ETYPE(bp)) {
433	case BP_EMBEDDED_TYPE_DATA:
434		if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
435			return (B_TRUE);
436		break;
437	default:
438		return (B_FALSE);
439	}
440	return (B_FALSE);
441}
442
443#define	BP_SPAN(dnp, level) \
444	(((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
445	(level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
446
447/* ARGSUSED */
448static int
449backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
450    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
451{
452	dmu_sendarg_t *dsp = arg;
453	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
454	int err = 0;
455
456	if (issig(JUSTLOOKING) && issig(FORREAL))
457		return (SET_ERROR(EINTR));
458
459	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
460	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
461		return (0);
462	} else if (zb->zb_level == ZB_ZIL_LEVEL) {
463		/*
464		 * If we are sending a non-snapshot (which is allowed on
465		 * read-only pools), it may have a ZIL, which must be ignored.
466		 */
467		return (0);
468	} else if (BP_IS_HOLE(bp) &&
469	    zb->zb_object == DMU_META_DNODE_OBJECT) {
470		uint64_t span = BP_SPAN(dnp, zb->zb_level);
471		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
472		err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
473	} else if (BP_IS_HOLE(bp)) {
474		uint64_t span = BP_SPAN(dnp, zb->zb_level);
475		err = dump_free(dsp, zb->zb_object, zb->zb_blkid * span, span);
476	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
477		return (0);
478	} else if (type == DMU_OT_DNODE) {
479		dnode_phys_t *blk;
480		int i;
481		int blksz = BP_GET_LSIZE(bp);
482		arc_flags_t aflags = ARC_FLAG_WAIT;
483		arc_buf_t *abuf;
484
485		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
486		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
487		    &aflags, zb) != 0)
488			return (SET_ERROR(EIO));
489
490		blk = abuf->b_data;
491		for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
492			uint64_t dnobj = (zb->zb_blkid <<
493			    (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
494			err = dump_dnode(dsp, dnobj, blk+i);
495			if (err != 0)
496				break;
497		}
498		(void) arc_buf_remove_ref(abuf, &abuf);
499	} else if (type == DMU_OT_SA) {
500		arc_flags_t aflags = ARC_FLAG_WAIT;
501		arc_buf_t *abuf;
502		int blksz = BP_GET_LSIZE(bp);
503
504		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
505		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
506		    &aflags, zb) != 0)
507			return (SET_ERROR(EIO));
508
509		err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
510		(void) arc_buf_remove_ref(abuf, &abuf);
511	} else if (backup_do_embed(dsp, bp)) {
512		/* it's an embedded level-0 block of a regular object */
513		int blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
514		err = dump_write_embedded(dsp, zb->zb_object,
515		    zb->zb_blkid * blksz, blksz, bp);
516	} else { /* it's a level-0 block of a regular object */
517		arc_flags_t aflags = ARC_FLAG_WAIT;
518		arc_buf_t *abuf;
519		int blksz = BP_GET_LSIZE(bp);
520		uint64_t offset;
521
522		ASSERT3U(blksz, ==, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
523		ASSERT0(zb->zb_level);
524		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
525		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
526		    &aflags, zb) != 0) {
527			if (zfs_send_corrupt_data) {
528				/* Send a block filled with 0x"zfs badd bloc" */
529				abuf = arc_buf_alloc(spa, blksz, &abuf,
530				    ARC_BUFC_DATA);
531				uint64_t *ptr;
532				for (ptr = abuf->b_data;
533				    (char *)ptr < (char *)abuf->b_data + blksz;
534				    ptr++)
535					*ptr = 0x2f5baddb10c;
536			} else {
537				return (SET_ERROR(EIO));
538			}
539		}
540
541		offset = zb->zb_blkid * blksz;
542
543		if (!(dsp->dsa_featureflags &
544		    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
545		    blksz > SPA_OLD_MAXBLOCKSIZE) {
546			char *buf = abuf->b_data;
547			while (blksz > 0 && err == 0) {
548				int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
549				err = dump_write(dsp, type, zb->zb_object,
550				    offset, n, NULL, buf);
551				offset += n;
552				buf += n;
553				blksz -= n;
554			}
555		} else {
556			err = dump_write(dsp, type, zb->zb_object,
557			    offset, blksz, bp, abuf->b_data);
558		}
559		(void) arc_buf_remove_ref(abuf, &abuf);
560	}
561
562	ASSERT(err == 0 || err == EINTR);
563	return (err);
564}
565
566/*
567 * Releases dp using the specified tag.
568 */
569static int
570dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
571    zfs_bookmark_phys_t *fromzb, boolean_t is_clone, boolean_t embedok,
572#ifdef illumos
573    boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
574#else
575    boolean_t large_block_ok, int outfd, struct file *fp, offset_t *off)
576#endif
577{
578	objset_t *os;
579	dmu_replay_record_t *drr;
580	dmu_sendarg_t *dsp;
581	int err;
582	uint64_t fromtxg = 0;
583	uint64_t featureflags = 0;
584
585	err = dmu_objset_from_ds(ds, &os);
586	if (err != 0) {
587		dsl_pool_rele(dp, tag);
588		return (err);
589	}
590
591	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
592	drr->drr_type = DRR_BEGIN;
593	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
594	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
595	    DMU_SUBSTREAM);
596
597#ifdef _KERNEL
598	if (dmu_objset_type(os) == DMU_OST_ZFS) {
599		uint64_t version;
600		if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
601			kmem_free(drr, sizeof (dmu_replay_record_t));
602			dsl_pool_rele(dp, tag);
603			return (SET_ERROR(EINVAL));
604		}
605		if (version >= ZPL_VERSION_SA) {
606			featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
607		}
608	}
609#endif
610
611	if (large_block_ok && ds->ds_large_blocks)
612		featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
613	if (embedok &&
614	    spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
615		featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
616		if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
617			featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
618	} else {
619		embedok = B_FALSE;
620	}
621
622	DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
623	    featureflags);
624
625	drr->drr_u.drr_begin.drr_creation_time =
626	    dsl_dataset_phys(ds)->ds_creation_time;
627	drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
628	if (is_clone)
629		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
630	drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(ds)->ds_guid;
631	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
632		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
633
634	if (fromzb != NULL) {
635		drr->drr_u.drr_begin.drr_fromguid = fromzb->zbm_guid;
636		fromtxg = fromzb->zbm_creation_txg;
637	}
638	dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
639	if (!dsl_dataset_is_snapshot(ds)) {
640		(void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
641		    sizeof (drr->drr_u.drr_begin.drr_toname));
642	}
643
644	dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
645
646	dsp->dsa_drr = drr;
647	dsp->dsa_outfd = outfd;
648	dsp->dsa_proc = curproc;
649	dsp->dsa_td = curthread;
650	dsp->dsa_fp = fp;
651	dsp->dsa_os = os;
652	dsp->dsa_off = off;
653	dsp->dsa_toguid = dsl_dataset_phys(ds)->ds_guid;
654	ZIO_SET_CHECKSUM(&dsp->dsa_zc, 0, 0, 0, 0);
655	dsp->dsa_pending_op = PENDING_NONE;
656	dsp->dsa_incremental = (fromzb != NULL);
657	dsp->dsa_featureflags = featureflags;
658
659	mutex_enter(&ds->ds_sendstream_lock);
660	list_insert_head(&ds->ds_sendstreams, dsp);
661	mutex_exit(&ds->ds_sendstream_lock);
662
663	dsl_dataset_long_hold(ds, FTAG);
664	dsl_pool_rele(dp, tag);
665
666	if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
667		err = dsp->dsa_err;
668		goto out;
669	}
670
671	err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
672	    backup_cb, dsp);
673
674	if (dsp->dsa_pending_op != PENDING_NONE)
675		if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
676			err = SET_ERROR(EINTR);
677
678	if (err != 0) {
679		if (err == EINTR && dsp->dsa_err != 0)
680			err = dsp->dsa_err;
681		goto out;
682	}
683
684	bzero(drr, sizeof (dmu_replay_record_t));
685	drr->drr_type = DRR_END;
686	drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
687	drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
688
689	if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0) {
690		err = dsp->dsa_err;
691		goto out;
692	}
693
694out:
695	mutex_enter(&ds->ds_sendstream_lock);
696	list_remove(&ds->ds_sendstreams, dsp);
697	mutex_exit(&ds->ds_sendstream_lock);
698
699	kmem_free(drr, sizeof (dmu_replay_record_t));
700	kmem_free(dsp, sizeof (dmu_sendarg_t));
701
702	dsl_dataset_long_rele(ds, FTAG);
703
704	return (err);
705}
706
707int
708dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
709    boolean_t embedok, boolean_t large_block_ok,
710#ifdef illumos
711    int outfd, vnode_t *vp, offset_t *off)
712#else
713    int outfd, struct file *fp, offset_t *off)
714#endif
715{
716	dsl_pool_t *dp;
717	dsl_dataset_t *ds;
718	dsl_dataset_t *fromds = NULL;
719	int err;
720
721	err = dsl_pool_hold(pool, FTAG, &dp);
722	if (err != 0)
723		return (err);
724
725	err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
726	if (err != 0) {
727		dsl_pool_rele(dp, FTAG);
728		return (err);
729	}
730
731	if (fromsnap != 0) {
732		zfs_bookmark_phys_t zb;
733		boolean_t is_clone;
734
735		err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
736		if (err != 0) {
737			dsl_dataset_rele(ds, FTAG);
738			dsl_pool_rele(dp, FTAG);
739			return (err);
740		}
741		if (!dsl_dataset_is_before(ds, fromds, 0))
742			err = SET_ERROR(EXDEV);
743		zb.zbm_creation_time =
744		    dsl_dataset_phys(fromds)->ds_creation_time;
745		zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
746		zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
747		is_clone = (fromds->ds_dir != ds->ds_dir);
748		dsl_dataset_rele(fromds, FTAG);
749		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
750		    embedok, large_block_ok, outfd, fp, off);
751	} else {
752		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
753		    embedok, large_block_ok, outfd, fp, off);
754	}
755	dsl_dataset_rele(ds, FTAG);
756	return (err);
757}
758
759int
760dmu_send(const char *tosnap, const char *fromsnap,
761    boolean_t embedok, boolean_t large_block_ok,
762#ifdef illumos
763    int outfd, vnode_t *vp, offset_t *off)
764#else
765    int outfd, struct file *fp, offset_t *off)
766#endif
767{
768	dsl_pool_t *dp;
769	dsl_dataset_t *ds;
770	int err;
771	boolean_t owned = B_FALSE;
772
773	if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
774		return (SET_ERROR(EINVAL));
775
776	err = dsl_pool_hold(tosnap, FTAG, &dp);
777	if (err != 0)
778		return (err);
779
780	if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
781		/*
782		 * We are sending a filesystem or volume.  Ensure
783		 * that it doesn't change by owning the dataset.
784		 */
785		err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
786		owned = B_TRUE;
787	} else {
788		err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
789	}
790	if (err != 0) {
791		dsl_pool_rele(dp, FTAG);
792		return (err);
793	}
794
795	if (fromsnap != NULL) {
796		zfs_bookmark_phys_t zb;
797		boolean_t is_clone = B_FALSE;
798		int fsnamelen = strchr(tosnap, '@') - tosnap;
799
800		/*
801		 * If the fromsnap is in a different filesystem, then
802		 * mark the send stream as a clone.
803		 */
804		if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
805		    (fromsnap[fsnamelen] != '@' &&
806		    fromsnap[fsnamelen] != '#')) {
807			is_clone = B_TRUE;
808		}
809
810		if (strchr(fromsnap, '@')) {
811			dsl_dataset_t *fromds;
812			err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
813			if (err == 0) {
814				if (!dsl_dataset_is_before(ds, fromds, 0))
815					err = SET_ERROR(EXDEV);
816				zb.zbm_creation_time =
817				    dsl_dataset_phys(fromds)->ds_creation_time;
818				zb.zbm_creation_txg =
819				    dsl_dataset_phys(fromds)->ds_creation_txg;
820				zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
821				is_clone = (ds->ds_dir != fromds->ds_dir);
822				dsl_dataset_rele(fromds, FTAG);
823			}
824		} else {
825			err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
826		}
827		if (err != 0) {
828			dsl_dataset_rele(ds, FTAG);
829			dsl_pool_rele(dp, FTAG);
830			return (err);
831		}
832		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
833		    embedok, large_block_ok, outfd, fp, off);
834	} else {
835		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
836		    embedok, large_block_ok, outfd, fp, off);
837	}
838	if (owned)
839		dsl_dataset_disown(ds, FTAG);
840	else
841		dsl_dataset_rele(ds, FTAG);
842	return (err);
843}
844
845int
846dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
847{
848	dsl_pool_t *dp = ds->ds_dir->dd_pool;
849	int err;
850	uint64_t size;
851
852	ASSERT(dsl_pool_config_held(dp));
853
854	/* tosnap must be a snapshot */
855	if (!dsl_dataset_is_snapshot(ds))
856		return (SET_ERROR(EINVAL));
857
858	/*
859	 * fromsnap must be an earlier snapshot from the same fs as tosnap,
860	 * or the origin's fs.
861	 */
862	if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
863		return (SET_ERROR(EXDEV));
864
865	/* Get uncompressed size estimate of changed data. */
866	if (fromds == NULL) {
867		size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
868	} else {
869		uint64_t used, comp;
870		err = dsl_dataset_space_written(fromds, ds,
871		    &used, &comp, &size);
872		if (err != 0)
873			return (err);
874	}
875
876	/*
877	 * Assume that space (both on-disk and in-stream) is dominated by
878	 * data.  We will adjust for indirect blocks and the copies property,
879	 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
880	 */
881
882	/*
883	 * Subtract out approximate space used by indirect blocks.
884	 * Assume most space is used by data blocks (non-indirect, non-dnode).
885	 * Assume all blocks are recordsize.  Assume ditto blocks and
886	 * internal fragmentation counter out compression.
887	 *
888	 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
889	 * block, which we observe in practice.
890	 */
891	uint64_t recordsize;
892	err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
893	if (err != 0)
894		return (err);
895	size -= size / recordsize * sizeof (blkptr_t);
896
897	/* Add in the space for the record associated with each block. */
898	size += size / recordsize * sizeof (dmu_replay_record_t);
899
900	*sizep = size;
901
902	return (0);
903}
904
905typedef struct dmu_recv_begin_arg {
906	const char *drba_origin;
907	dmu_recv_cookie_t *drba_cookie;
908	cred_t *drba_cred;
909	uint64_t drba_snapobj;
910} dmu_recv_begin_arg_t;
911
912static int
913recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
914    uint64_t fromguid)
915{
916	uint64_t val;
917	int error;
918	dsl_pool_t *dp = ds->ds_dir->dd_pool;
919
920	/* temporary clone name must not exist */
921	error = zap_lookup(dp->dp_meta_objset,
922	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
923	    8, 1, &val);
924	if (error != ENOENT)
925		return (error == 0 ? EBUSY : error);
926
927	/* new snapshot name must not exist */
928	error = zap_lookup(dp->dp_meta_objset,
929	    dsl_dataset_phys(ds)->ds_snapnames_zapobj,
930	    drba->drba_cookie->drc_tosnap, 8, 1, &val);
931	if (error != ENOENT)
932		return (error == 0 ? EEXIST : error);
933
934	/*
935	 * Check snapshot limit before receiving. We'll recheck again at the
936	 * end, but might as well abort before receiving if we're already over
937	 * the limit.
938	 *
939	 * Note that we do not check the file system limit with
940	 * dsl_dir_fscount_check because the temporary %clones don't count
941	 * against that limit.
942	 */
943	error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
944	    NULL, drba->drba_cred);
945	if (error != 0)
946		return (error);
947
948	if (fromguid != 0) {
949		dsl_dataset_t *snap;
950		uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
951
952		/* Find snapshot in this dir that matches fromguid. */
953		while (obj != 0) {
954			error = dsl_dataset_hold_obj(dp, obj, FTAG,
955			    &snap);
956			if (error != 0)
957				return (SET_ERROR(ENODEV));
958			if (snap->ds_dir != ds->ds_dir) {
959				dsl_dataset_rele(snap, FTAG);
960				return (SET_ERROR(ENODEV));
961			}
962			if (dsl_dataset_phys(snap)->ds_guid == fromguid)
963				break;
964			obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
965			dsl_dataset_rele(snap, FTAG);
966		}
967		if (obj == 0)
968			return (SET_ERROR(ENODEV));
969
970		if (drba->drba_cookie->drc_force) {
971			drba->drba_snapobj = obj;
972		} else {
973			/*
974			 * If we are not forcing, there must be no
975			 * changes since fromsnap.
976			 */
977			if (dsl_dataset_modified_since_snap(ds, snap)) {
978				dsl_dataset_rele(snap, FTAG);
979				return (SET_ERROR(ETXTBSY));
980			}
981			drba->drba_snapobj = ds->ds_prev->ds_object;
982		}
983
984		dsl_dataset_rele(snap, FTAG);
985	} else {
986		/* if full, then must be forced */
987		if (!drba->drba_cookie->drc_force)
988			return (SET_ERROR(EEXIST));
989		/* start from $ORIGIN@$ORIGIN, if supported */
990		drba->drba_snapobj = dp->dp_origin_snap != NULL ?
991		    dp->dp_origin_snap->ds_object : 0;
992	}
993
994	return (0);
995
996}
997
998static int
999dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1000{
1001	dmu_recv_begin_arg_t *drba = arg;
1002	dsl_pool_t *dp = dmu_tx_pool(tx);
1003	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1004	uint64_t fromguid = drrb->drr_fromguid;
1005	int flags = drrb->drr_flags;
1006	int error;
1007	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1008	dsl_dataset_t *ds;
1009	const char *tofs = drba->drba_cookie->drc_tofs;
1010
1011	/* already checked */
1012	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1013
1014	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1015	    DMU_COMPOUNDSTREAM ||
1016	    drrb->drr_type >= DMU_OST_NUMTYPES ||
1017	    ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1018		return (SET_ERROR(EINVAL));
1019
1020	/* Verify pool version supports SA if SA_SPILL feature set */
1021	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1022	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
1023		return (SET_ERROR(ENOTSUP));
1024
1025	/*
1026	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1027	 * record to a plan WRITE record, so the pool must have the
1028	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1029	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1030	 */
1031	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1032	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1033		return (SET_ERROR(ENOTSUP));
1034	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1035	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1036		return (SET_ERROR(ENOTSUP));
1037
1038	/*
1039	 * The receiving code doesn't know how to translate large blocks
1040	 * to smaller ones, so the pool must have the LARGE_BLOCKS
1041	 * feature enabled if the stream has LARGE_BLOCKS.
1042	 */
1043	if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1044	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1045		return (SET_ERROR(ENOTSUP));
1046
1047	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1048	if (error == 0) {
1049		/* target fs already exists; recv into temp clone */
1050
1051		/* Can't recv a clone into an existing fs */
1052		if (flags & DRR_FLAG_CLONE) {
1053			dsl_dataset_rele(ds, FTAG);
1054			return (SET_ERROR(EINVAL));
1055		}
1056
1057		error = recv_begin_check_existing_impl(drba, ds, fromguid);
1058		dsl_dataset_rele(ds, FTAG);
1059	} else if (error == ENOENT) {
1060		/* target fs does not exist; must be a full backup or clone */
1061		char buf[MAXNAMELEN];
1062
1063		/*
1064		 * If it's a non-clone incremental, we are missing the
1065		 * target fs, so fail the recv.
1066		 */
1067		if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
1068			return (SET_ERROR(ENOENT));
1069
1070		/* Open the parent of tofs */
1071		ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1072		(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1073		error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1074		if (error != 0)
1075			return (error);
1076
1077		/*
1078		 * Check filesystem and snapshot limits before receiving. We'll
1079		 * recheck snapshot limits again at the end (we create the
1080		 * filesystems and increment those counts during begin_sync).
1081		 */
1082		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1083		    ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1084		if (error != 0) {
1085			dsl_dataset_rele(ds, FTAG);
1086			return (error);
1087		}
1088
1089		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1090		    ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1091		if (error != 0) {
1092			dsl_dataset_rele(ds, FTAG);
1093			return (error);
1094		}
1095
1096		if (drba->drba_origin != NULL) {
1097			dsl_dataset_t *origin;
1098			error = dsl_dataset_hold(dp, drba->drba_origin,
1099			    FTAG, &origin);
1100			if (error != 0) {
1101				dsl_dataset_rele(ds, FTAG);
1102				return (error);
1103			}
1104			if (!dsl_dataset_is_snapshot(origin)) {
1105				dsl_dataset_rele(origin, FTAG);
1106				dsl_dataset_rele(ds, FTAG);
1107				return (SET_ERROR(EINVAL));
1108			}
1109			if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1110				dsl_dataset_rele(origin, FTAG);
1111				dsl_dataset_rele(ds, FTAG);
1112				return (SET_ERROR(ENODEV));
1113			}
1114			dsl_dataset_rele(origin, FTAG);
1115		}
1116		dsl_dataset_rele(ds, FTAG);
1117		error = 0;
1118	}
1119	return (error);
1120}
1121
1122static void
1123dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1124{
1125	dmu_recv_begin_arg_t *drba = arg;
1126	dsl_pool_t *dp = dmu_tx_pool(tx);
1127	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1128	const char *tofs = drba->drba_cookie->drc_tofs;
1129	dsl_dataset_t *ds, *newds;
1130	uint64_t dsobj;
1131	int error;
1132	uint64_t crflags;
1133
1134	crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
1135	    DS_FLAG_CI_DATASET : 0;
1136
1137	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1138	if (error == 0) {
1139		/* create temporary clone */
1140		dsl_dataset_t *snap = NULL;
1141		if (drba->drba_snapobj != 0) {
1142			VERIFY0(dsl_dataset_hold_obj(dp,
1143			    drba->drba_snapobj, FTAG, &snap));
1144		}
1145		dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1146		    snap, crflags, drba->drba_cred, tx);
1147		if (drba->drba_snapobj != 0)
1148			dsl_dataset_rele(snap, FTAG);
1149		dsl_dataset_rele(ds, FTAG);
1150	} else {
1151		dsl_dir_t *dd;
1152		const char *tail;
1153		dsl_dataset_t *origin = NULL;
1154
1155		VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1156
1157		if (drba->drba_origin != NULL) {
1158			VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1159			    FTAG, &origin));
1160		}
1161
1162		/* Create new dataset. */
1163		dsobj = dsl_dataset_create_sync(dd,
1164		    strrchr(tofs, '/') + 1,
1165		    origin, crflags, drba->drba_cred, tx);
1166		if (origin != NULL)
1167			dsl_dataset_rele(origin, FTAG);
1168		dsl_dir_rele(dd, FTAG);
1169		drba->drba_cookie->drc_newfs = B_TRUE;
1170	}
1171	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1172
1173	if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
1174	    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1175	    !newds->ds_large_blocks) {
1176		dsl_dataset_activate_large_blocks_sync_impl(dsobj, tx);
1177		newds->ds_large_blocks = B_TRUE;
1178	}
1179
1180	dmu_buf_will_dirty(newds->ds_dbuf, tx);
1181	dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1182
1183	/*
1184	 * If we actually created a non-clone, we need to create the
1185	 * objset in our new dataset.
1186	 */
1187	if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1188		(void) dmu_objset_create_impl(dp->dp_spa,
1189		    newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1190	}
1191
1192	drba->drba_cookie->drc_ds = newds;
1193
1194	spa_history_log_internal_ds(newds, "receive", tx, "");
1195}
1196
1197/*
1198 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1199 * succeeds; otherwise we will leak the holds on the datasets.
1200 */
1201int
1202dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
1203    boolean_t force, char *origin, dmu_recv_cookie_t *drc)
1204{
1205	dmu_recv_begin_arg_t drba = { 0 };
1206	dmu_replay_record_t *drr;
1207
1208	bzero(drc, sizeof (dmu_recv_cookie_t));
1209	drc->drc_drrb = drrb;
1210	drc->drc_tosnap = tosnap;
1211	drc->drc_tofs = tofs;
1212	drc->drc_force = force;
1213	drc->drc_cred = CRED();
1214
1215	if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1216		drc->drc_byteswap = B_TRUE;
1217	else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
1218		return (SET_ERROR(EINVAL));
1219
1220	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1221	drr->drr_type = DRR_BEGIN;
1222	drr->drr_u.drr_begin = *drc->drc_drrb;
1223	if (drc->drc_byteswap) {
1224		fletcher_4_incremental_byteswap(drr,
1225		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1226	} else {
1227		fletcher_4_incremental_native(drr,
1228		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1229	}
1230	kmem_free(drr, sizeof (dmu_replay_record_t));
1231
1232	if (drc->drc_byteswap) {
1233		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1234		drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1235		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1236		drrb->drr_type = BSWAP_32(drrb->drr_type);
1237		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1238		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1239	}
1240
1241	drba.drba_origin = origin;
1242	drba.drba_cookie = drc;
1243	drba.drba_cred = CRED();
1244
1245	return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1246	    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1247}
1248
1249struct restorearg {
1250	int err;
1251	boolean_t byteswap;
1252	kthread_t *td;
1253	struct file *fp;
1254	char *buf;
1255	uint64_t voff;
1256	int bufsize; /* amount of memory allocated for buf */
1257	zio_cksum_t cksum;
1258	avl_tree_t *guid_to_ds_map;
1259};
1260
1261typedef struct guid_map_entry {
1262	uint64_t	guid;
1263	dsl_dataset_t	*gme_ds;
1264	avl_node_t	avlnode;
1265} guid_map_entry_t;
1266
1267static int
1268guid_compare(const void *arg1, const void *arg2)
1269{
1270	const guid_map_entry_t *gmep1 = arg1;
1271	const guid_map_entry_t *gmep2 = arg2;
1272
1273	if (gmep1->guid < gmep2->guid)
1274		return (-1);
1275	else if (gmep1->guid > gmep2->guid)
1276		return (1);
1277	return (0);
1278}
1279
1280static void
1281free_guid_map_onexit(void *arg)
1282{
1283	avl_tree_t *ca = arg;
1284	void *cookie = NULL;
1285	guid_map_entry_t *gmep;
1286
1287	while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1288		dsl_dataset_long_rele(gmep->gme_ds, gmep);
1289		dsl_dataset_rele(gmep->gme_ds, gmep);
1290		kmem_free(gmep, sizeof (guid_map_entry_t));
1291	}
1292	avl_destroy(ca);
1293	kmem_free(ca, sizeof (avl_tree_t));
1294}
1295
1296static int
1297restore_bytes(struct restorearg *ra, void *buf, int len, off_t off, ssize_t *resid)
1298{
1299	struct uio auio;
1300	struct iovec aiov;
1301	int error;
1302
1303	aiov.iov_base = buf;
1304	aiov.iov_len = len;
1305	auio.uio_iov = &aiov;
1306	auio.uio_iovcnt = 1;
1307	auio.uio_resid = len;
1308	auio.uio_segflg = UIO_SYSSPACE;
1309	auio.uio_rw = UIO_READ;
1310	auio.uio_offset = off;
1311	auio.uio_td = ra->td;
1312#ifdef _KERNEL
1313	error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1314#else
1315	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1316	error = EOPNOTSUPP;
1317#endif
1318	*resid = auio.uio_resid;
1319	return (error);
1320}
1321
1322static void *
1323restore_read(struct restorearg *ra, int len, char *buf)
1324{
1325	int done = 0;
1326
1327	if (buf == NULL)
1328		buf = ra->buf;
1329
1330	/* some things will require 8-byte alignment, so everything must */
1331	ASSERT0(len % 8);
1332	ASSERT3U(len, <=, ra->bufsize);
1333
1334	while (done < len) {
1335		ssize_t resid;
1336
1337		ra->err = restore_bytes(ra, buf + done,
1338		    len - done, ra->voff, &resid);
1339
1340		if (resid == len - done)
1341			ra->err = SET_ERROR(EINVAL);
1342		ra->voff += len - done - resid;
1343		done = len - resid;
1344		if (ra->err != 0)
1345			return (NULL);
1346	}
1347
1348	ASSERT3U(done, ==, len);
1349	if (ra->byteswap)
1350		fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
1351	else
1352		fletcher_4_incremental_native(buf, len, &ra->cksum);
1353	return (buf);
1354}
1355
1356static void
1357backup_byteswap(dmu_replay_record_t *drr)
1358{
1359#define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1360#define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1361	drr->drr_type = BSWAP_32(drr->drr_type);
1362	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1363	switch (drr->drr_type) {
1364	case DRR_BEGIN:
1365		DO64(drr_begin.drr_magic);
1366		DO64(drr_begin.drr_versioninfo);
1367		DO64(drr_begin.drr_creation_time);
1368		DO32(drr_begin.drr_type);
1369		DO32(drr_begin.drr_flags);
1370		DO64(drr_begin.drr_toguid);
1371		DO64(drr_begin.drr_fromguid);
1372		break;
1373	case DRR_OBJECT:
1374		DO64(drr_object.drr_object);
1375		DO32(drr_object.drr_type);
1376		DO32(drr_object.drr_bonustype);
1377		DO32(drr_object.drr_blksz);
1378		DO32(drr_object.drr_bonuslen);
1379		DO64(drr_object.drr_toguid);
1380		break;
1381	case DRR_FREEOBJECTS:
1382		DO64(drr_freeobjects.drr_firstobj);
1383		DO64(drr_freeobjects.drr_numobjs);
1384		DO64(drr_freeobjects.drr_toguid);
1385		break;
1386	case DRR_WRITE:
1387		DO64(drr_write.drr_object);
1388		DO32(drr_write.drr_type);
1389		DO64(drr_write.drr_offset);
1390		DO64(drr_write.drr_length);
1391		DO64(drr_write.drr_toguid);
1392		DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
1393		DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
1394		DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
1395		DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
1396		DO64(drr_write.drr_key.ddk_prop);
1397		break;
1398	case DRR_WRITE_BYREF:
1399		DO64(drr_write_byref.drr_object);
1400		DO64(drr_write_byref.drr_offset);
1401		DO64(drr_write_byref.drr_length);
1402		DO64(drr_write_byref.drr_toguid);
1403		DO64(drr_write_byref.drr_refguid);
1404		DO64(drr_write_byref.drr_refobject);
1405		DO64(drr_write_byref.drr_refoffset);
1406		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
1407		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
1408		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
1409		DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
1410		DO64(drr_write_byref.drr_key.ddk_prop);
1411		break;
1412	case DRR_WRITE_EMBEDDED:
1413		DO64(drr_write_embedded.drr_object);
1414		DO64(drr_write_embedded.drr_offset);
1415		DO64(drr_write_embedded.drr_length);
1416		DO64(drr_write_embedded.drr_toguid);
1417		DO32(drr_write_embedded.drr_lsize);
1418		DO32(drr_write_embedded.drr_psize);
1419		break;
1420	case DRR_FREE:
1421		DO64(drr_free.drr_object);
1422		DO64(drr_free.drr_offset);
1423		DO64(drr_free.drr_length);
1424		DO64(drr_free.drr_toguid);
1425		break;
1426	case DRR_SPILL:
1427		DO64(drr_spill.drr_object);
1428		DO64(drr_spill.drr_length);
1429		DO64(drr_spill.drr_toguid);
1430		break;
1431	case DRR_END:
1432		DO64(drr_end.drr_checksum.zc_word[0]);
1433		DO64(drr_end.drr_checksum.zc_word[1]);
1434		DO64(drr_end.drr_checksum.zc_word[2]);
1435		DO64(drr_end.drr_checksum.zc_word[3]);
1436		DO64(drr_end.drr_toguid);
1437		break;
1438	}
1439#undef DO64
1440#undef DO32
1441}
1442
1443static inline uint8_t
1444deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1445{
1446	if (bonus_type == DMU_OT_SA) {
1447		return (1);
1448	} else {
1449		return (1 +
1450		    ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1451	}
1452}
1453
1454static int
1455restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
1456{
1457	dmu_object_info_t doi;
1458	dmu_tx_t *tx;
1459	void *data = NULL;
1460	uint64_t object;
1461	int err;
1462
1463	if (drro->drr_type == DMU_OT_NONE ||
1464	    !DMU_OT_IS_VALID(drro->drr_type) ||
1465	    !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1466	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1467	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1468	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1469	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
1470	    drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(os)) ||
1471	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1472		return (SET_ERROR(EINVAL));
1473	}
1474
1475	err = dmu_object_info(os, drro->drr_object, &doi);
1476
1477	if (err != 0 && err != ENOENT)
1478		return (SET_ERROR(EINVAL));
1479	object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1480
1481	if (drro->drr_bonuslen) {
1482		data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8), NULL);
1483		if (ra->err != 0)
1484			return (ra->err);
1485	}
1486
1487	/*
1488	 * If we are losing blkptrs or changing the block size this must
1489	 * be a new file instance.  We must clear out the previous file
1490	 * contents before we can change this type of metadata in the dnode.
1491	 */
1492	if (err == 0) {
1493		int nblkptr;
1494
1495		nblkptr = deduce_nblkptr(drro->drr_bonustype,
1496		    drro->drr_bonuslen);
1497
1498		if (drro->drr_blksz != doi.doi_data_block_size ||
1499		    nblkptr < doi.doi_nblkptr) {
1500			err = dmu_free_long_range(os, drro->drr_object,
1501			    0, DMU_OBJECT_END);
1502			if (err != 0)
1503				return (SET_ERROR(EINVAL));
1504		}
1505	}
1506
1507	tx = dmu_tx_create(os);
1508	dmu_tx_hold_bonus(tx, object);
1509	err = dmu_tx_assign(tx, TXG_WAIT);
1510	if (err != 0) {
1511		dmu_tx_abort(tx);
1512		return (err);
1513	}
1514
1515	if (object == DMU_NEW_OBJECT) {
1516		/* currently free, want to be allocated */
1517		err = dmu_object_claim(os, drro->drr_object,
1518		    drro->drr_type, drro->drr_blksz,
1519		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1520	} else if (drro->drr_type != doi.doi_type ||
1521	    drro->drr_blksz != doi.doi_data_block_size ||
1522	    drro->drr_bonustype != doi.doi_bonus_type ||
1523	    drro->drr_bonuslen != doi.doi_bonus_size) {
1524		/* currently allocated, but with different properties */
1525		err = dmu_object_reclaim(os, drro->drr_object,
1526		    drro->drr_type, drro->drr_blksz,
1527		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1528	}
1529	if (err != 0) {
1530		dmu_tx_commit(tx);
1531		return (SET_ERROR(EINVAL));
1532	}
1533
1534	dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
1535	    tx);
1536	dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
1537
1538	if (data != NULL) {
1539		dmu_buf_t *db;
1540
1541		VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
1542		dmu_buf_will_dirty(db, tx);
1543
1544		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1545		bcopy(data, db->db_data, drro->drr_bonuslen);
1546		if (ra->byteswap) {
1547			dmu_object_byteswap_t byteswap =
1548			    DMU_OT_BYTESWAP(drro->drr_bonustype);
1549			dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1550			    drro->drr_bonuslen);
1551		}
1552		dmu_buf_rele(db, FTAG);
1553	}
1554	dmu_tx_commit(tx);
1555	return (0);
1556}
1557
1558/* ARGSUSED */
1559static int
1560restore_freeobjects(struct restorearg *ra, objset_t *os,
1561    struct drr_freeobjects *drrfo)
1562{
1563	uint64_t obj;
1564
1565	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1566		return (SET_ERROR(EINVAL));
1567
1568	for (obj = drrfo->drr_firstobj;
1569	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1570	    (void) dmu_object_next(os, &obj, FALSE, 0)) {
1571		int err;
1572
1573		if (dmu_object_info(os, obj, NULL) != 0)
1574			continue;
1575
1576		err = dmu_free_long_object(os, obj);
1577		if (err != 0)
1578			return (err);
1579	}
1580	return (0);
1581}
1582
1583static int
1584restore_write(struct restorearg *ra, objset_t *os,
1585    struct drr_write *drrw)
1586{
1587	dmu_tx_t *tx;
1588	void *data;
1589	int err;
1590
1591	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1592	    !DMU_OT_IS_VALID(drrw->drr_type))
1593		return (SET_ERROR(EINVAL));
1594
1595	if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
1596		return (SET_ERROR(EINVAL));
1597
1598	dmu_buf_t *bonus;
1599	if (dmu_bonus_hold(os, drrw->drr_object, FTAG, &bonus) != 0)
1600		return (SET_ERROR(EINVAL));
1601
1602	arc_buf_t *abuf = dmu_request_arcbuf(bonus, drrw->drr_length);
1603
1604	data = restore_read(ra, drrw->drr_length, abuf->b_data);
1605	if (data == NULL) {
1606		dmu_return_arcbuf(abuf);
1607		dmu_buf_rele(bonus, FTAG);
1608		return (ra->err);
1609	}
1610
1611	tx = dmu_tx_create(os);
1612
1613	dmu_tx_hold_write(tx, drrw->drr_object,
1614	    drrw->drr_offset, drrw->drr_length);
1615	err = dmu_tx_assign(tx, TXG_WAIT);
1616	if (err != 0) {
1617		dmu_return_arcbuf(abuf);
1618		dmu_buf_rele(bonus, FTAG);
1619		dmu_tx_abort(tx);
1620		return (err);
1621	}
1622	if (ra->byteswap) {
1623		dmu_object_byteswap_t byteswap =
1624		    DMU_OT_BYTESWAP(drrw->drr_type);
1625		dmu_ot_byteswap[byteswap].ob_func(data, drrw->drr_length);
1626	}
1627	dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
1628	dmu_tx_commit(tx);
1629	dmu_buf_rele(bonus, FTAG);
1630	return (0);
1631}
1632
1633/*
1634 * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
1635 * streams to refer to a copy of the data that is already on the
1636 * system because it came in earlier in the stream.  This function
1637 * finds the earlier copy of the data, and uses that copy instead of
1638 * data from the stream to fulfill this write.
1639 */
1640static int
1641restore_write_byref(struct restorearg *ra, objset_t *os,
1642    struct drr_write_byref *drrwbr)
1643{
1644	dmu_tx_t *tx;
1645	int err;
1646	guid_map_entry_t gmesrch;
1647	guid_map_entry_t *gmep;
1648	avl_index_t where;
1649	objset_t *ref_os = NULL;
1650	dmu_buf_t *dbp;
1651
1652	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1653		return (SET_ERROR(EINVAL));
1654
1655	/*
1656	 * If the GUID of the referenced dataset is different from the
1657	 * GUID of the target dataset, find the referenced dataset.
1658	 */
1659	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1660		gmesrch.guid = drrwbr->drr_refguid;
1661		if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
1662		    &where)) == NULL) {
1663			return (SET_ERROR(EINVAL));
1664		}
1665		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1666			return (SET_ERROR(EINVAL));
1667	} else {
1668		ref_os = os;
1669	}
1670
1671	err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1672	    drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1673	if (err != 0)
1674		return (err);
1675
1676	tx = dmu_tx_create(os);
1677
1678	dmu_tx_hold_write(tx, drrwbr->drr_object,
1679	    drrwbr->drr_offset, drrwbr->drr_length);
1680	err = dmu_tx_assign(tx, TXG_WAIT);
1681	if (err != 0) {
1682		dmu_tx_abort(tx);
1683		return (err);
1684	}
1685	dmu_write(os, drrwbr->drr_object,
1686	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1687	dmu_buf_rele(dbp, FTAG);
1688	dmu_tx_commit(tx);
1689	return (0);
1690}
1691
1692static int
1693restore_write_embedded(struct restorearg *ra, objset_t *os,
1694    struct drr_write_embedded *drrwnp)
1695{
1696	dmu_tx_t *tx;
1697	int err;
1698	void *data;
1699
1700	if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
1701		return (EINVAL);
1702
1703	if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
1704		return (EINVAL);
1705
1706	if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1707		return (EINVAL);
1708	if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1709		return (EINVAL);
1710
1711	data = restore_read(ra, P2ROUNDUP(drrwnp->drr_psize, 8), NULL);
1712	if (data == NULL)
1713		return (ra->err);
1714
1715	tx = dmu_tx_create(os);
1716
1717	dmu_tx_hold_write(tx, drrwnp->drr_object,
1718	    drrwnp->drr_offset, drrwnp->drr_length);
1719	err = dmu_tx_assign(tx, TXG_WAIT);
1720	if (err != 0) {
1721		dmu_tx_abort(tx);
1722		return (err);
1723	}
1724
1725	dmu_write_embedded(os, drrwnp->drr_object,
1726	    drrwnp->drr_offset, data, drrwnp->drr_etype,
1727	    drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
1728	    ra->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1729
1730	dmu_tx_commit(tx);
1731	return (0);
1732}
1733
1734static int
1735restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
1736{
1737	dmu_tx_t *tx;
1738	void *data;
1739	dmu_buf_t *db, *db_spill;
1740	int err;
1741
1742	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1743	    drrs->drr_length > spa_maxblocksize(dmu_objset_spa(os)))
1744		return (SET_ERROR(EINVAL));
1745
1746	data = restore_read(ra, drrs->drr_length, NULL);
1747	if (data == NULL)
1748		return (ra->err);
1749
1750	if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
1751		return (SET_ERROR(EINVAL));
1752
1753	VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
1754	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1755		dmu_buf_rele(db, FTAG);
1756		return (err);
1757	}
1758
1759	tx = dmu_tx_create(os);
1760
1761	dmu_tx_hold_spill(tx, db->db_object);
1762
1763	err = dmu_tx_assign(tx, TXG_WAIT);
1764	if (err != 0) {
1765		dmu_buf_rele(db, FTAG);
1766		dmu_buf_rele(db_spill, FTAG);
1767		dmu_tx_abort(tx);
1768		return (err);
1769	}
1770	dmu_buf_will_dirty(db_spill, tx);
1771
1772	if (db_spill->db_size < drrs->drr_length)
1773		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1774		    drrs->drr_length, tx));
1775	bcopy(data, db_spill->db_data, drrs->drr_length);
1776
1777	dmu_buf_rele(db, FTAG);
1778	dmu_buf_rele(db_spill, FTAG);
1779
1780	dmu_tx_commit(tx);
1781	return (0);
1782}
1783
1784/* ARGSUSED */
1785static int
1786restore_free(struct restorearg *ra, objset_t *os,
1787    struct drr_free *drrf)
1788{
1789	int err;
1790
1791	if (drrf->drr_length != -1ULL &&
1792	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
1793		return (SET_ERROR(EINVAL));
1794
1795	if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
1796		return (SET_ERROR(EINVAL));
1797
1798	err = dmu_free_long_range(os, drrf->drr_object,
1799	    drrf->drr_offset, drrf->drr_length);
1800	return (err);
1801}
1802
1803/* used to destroy the drc_ds on error */
1804static void
1805dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
1806{
1807	char name[MAXNAMELEN];
1808	dsl_dataset_name(drc->drc_ds, name);
1809	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
1810	(void) dsl_destroy_head(name);
1811}
1812
1813/*
1814 * NB: callers *must* call dmu_recv_end() if this succeeds.
1815 */
1816int
1817dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
1818    int cleanup_fd, uint64_t *action_handlep)
1819{
1820	struct restorearg ra = { 0 };
1821	dmu_replay_record_t *drr;
1822	objset_t *os;
1823	zio_cksum_t pcksum;
1824	int featureflags;
1825
1826	ra.byteswap = drc->drc_byteswap;
1827	ra.cksum = drc->drc_cksum;
1828	ra.td = curthread;
1829	ra.fp = fp;
1830	ra.voff = *voffp;
1831	ra.bufsize = SPA_MAXBLOCKSIZE;
1832	ra.buf = kmem_alloc(ra.bufsize, KM_SLEEP);
1833
1834	/* these were verified in dmu_recv_begin */
1835	ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
1836	    DMU_SUBSTREAM);
1837	ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
1838
1839	/*
1840	 * Open the objset we are modifying.
1841	 */
1842	VERIFY0(dmu_objset_from_ds(drc->drc_ds, &os));
1843
1844	ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
1845
1846	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
1847
1848	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
1849	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
1850		minor_t minor;
1851
1852		if (cleanup_fd == -1) {
1853			ra.err = SET_ERROR(EBADF);
1854			goto out;
1855		}
1856		ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
1857		if (ra.err != 0) {
1858			cleanup_fd = -1;
1859			goto out;
1860		}
1861
1862		if (*action_handlep == 0) {
1863			ra.guid_to_ds_map =
1864			    kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
1865			avl_create(ra.guid_to_ds_map, guid_compare,
1866			    sizeof (guid_map_entry_t),
1867			    offsetof(guid_map_entry_t, avlnode));
1868			ra.err = zfs_onexit_add_cb(minor,
1869			    free_guid_map_onexit, ra.guid_to_ds_map,
1870			    action_handlep);
1871			if (ra.err != 0)
1872				goto out;
1873		} else {
1874			ra.err = zfs_onexit_cb_data(minor, *action_handlep,
1875			    (void **)&ra.guid_to_ds_map);
1876			if (ra.err != 0)
1877				goto out;
1878		}
1879
1880		drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
1881	}
1882
1883	/*
1884	 * Read records and process them.
1885	 */
1886	pcksum = ra.cksum;
1887	while (ra.err == 0 &&
1888	    NULL != (drr = restore_read(&ra, sizeof (*drr), NULL))) {
1889		if (issig(JUSTLOOKING) && issig(FORREAL)) {
1890			ra.err = SET_ERROR(EINTR);
1891			goto out;
1892		}
1893
1894		if (ra.byteswap)
1895			backup_byteswap(drr);
1896
1897		switch (drr->drr_type) {
1898		case DRR_OBJECT:
1899		{
1900			/*
1901			 * We need to make a copy of the record header,
1902			 * because restore_{object,write} may need to
1903			 * restore_read(), which will invalidate drr.
1904			 */
1905			struct drr_object drro = drr->drr_u.drr_object;
1906			ra.err = restore_object(&ra, os, &drro);
1907			break;
1908		}
1909		case DRR_FREEOBJECTS:
1910		{
1911			struct drr_freeobjects drrfo =
1912			    drr->drr_u.drr_freeobjects;
1913			ra.err = restore_freeobjects(&ra, os, &drrfo);
1914			break;
1915		}
1916		case DRR_WRITE:
1917		{
1918			struct drr_write drrw = drr->drr_u.drr_write;
1919			ra.err = restore_write(&ra, os, &drrw);
1920			break;
1921		}
1922		case DRR_WRITE_BYREF:
1923		{
1924			struct drr_write_byref drrwbr =
1925			    drr->drr_u.drr_write_byref;
1926			ra.err = restore_write_byref(&ra, os, &drrwbr);
1927			break;
1928		}
1929		case DRR_WRITE_EMBEDDED:
1930		{
1931			struct drr_write_embedded drrwe =
1932			    drr->drr_u.drr_write_embedded;
1933			ra.err = restore_write_embedded(&ra, os, &drrwe);
1934			break;
1935		}
1936		case DRR_FREE:
1937		{
1938			struct drr_free drrf = drr->drr_u.drr_free;
1939			ra.err = restore_free(&ra, os, &drrf);
1940			break;
1941		}
1942		case DRR_END:
1943		{
1944			struct drr_end drre = drr->drr_u.drr_end;
1945			/*
1946			 * We compare against the *previous* checksum
1947			 * value, because the stored checksum is of
1948			 * everything before the DRR_END record.
1949			 */
1950			if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
1951				ra.err = SET_ERROR(ECKSUM);
1952			goto out;
1953		}
1954		case DRR_SPILL:
1955		{
1956			struct drr_spill drrs = drr->drr_u.drr_spill;
1957			ra.err = restore_spill(&ra, os, &drrs);
1958			break;
1959		}
1960		default:
1961			ra.err = SET_ERROR(EINVAL);
1962			goto out;
1963		}
1964		pcksum = ra.cksum;
1965	}
1966	ASSERT(ra.err != 0);
1967
1968out:
1969	if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
1970		zfs_onexit_fd_rele(cleanup_fd);
1971
1972	if (ra.err != 0) {
1973		/*
1974		 * destroy what we created, so we don't leave it in the
1975		 * inconsistent restoring state.
1976		 */
1977		dmu_recv_cleanup_ds(drc);
1978	}
1979
1980	kmem_free(ra.buf, ra.bufsize);
1981	*voffp = ra.voff;
1982	return (ra.err);
1983}
1984
1985static int
1986dmu_recv_end_check(void *arg, dmu_tx_t *tx)
1987{
1988	dmu_recv_cookie_t *drc = arg;
1989	dsl_pool_t *dp = dmu_tx_pool(tx);
1990	int error;
1991
1992	ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
1993
1994	if (!drc->drc_newfs) {
1995		dsl_dataset_t *origin_head;
1996
1997		error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
1998		if (error != 0)
1999			return (error);
2000		if (drc->drc_force) {
2001			/*
2002			 * We will destroy any snapshots in tofs (i.e. before
2003			 * origin_head) that are after the origin (which is
2004			 * the snap before drc_ds, because drc_ds can not
2005			 * have any snaps of its own).
2006			 */
2007			uint64_t obj;
2008
2009			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2010			while (obj !=
2011			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2012				dsl_dataset_t *snap;
2013				error = dsl_dataset_hold_obj(dp, obj, FTAG,
2014				    &snap);
2015				if (error != 0)
2016					break;
2017				if (snap->ds_dir != origin_head->ds_dir)
2018					error = SET_ERROR(EINVAL);
2019				if (error == 0)  {
2020					error = dsl_destroy_snapshot_check_impl(
2021					    snap, B_FALSE);
2022				}
2023				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2024				dsl_dataset_rele(snap, FTAG);
2025				if (error != 0)
2026					break;
2027			}
2028			if (error != 0) {
2029				dsl_dataset_rele(origin_head, FTAG);
2030				return (error);
2031			}
2032		}
2033		error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2034		    origin_head, drc->drc_force, drc->drc_owner, tx);
2035		if (error != 0) {
2036			dsl_dataset_rele(origin_head, FTAG);
2037			return (error);
2038		}
2039		error = dsl_dataset_snapshot_check_impl(origin_head,
2040		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2041		dsl_dataset_rele(origin_head, FTAG);
2042		if (error != 0)
2043			return (error);
2044
2045		error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2046	} else {
2047		error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2048		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2049	}
2050	return (error);
2051}
2052
2053static void
2054dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2055{
2056	dmu_recv_cookie_t *drc = arg;
2057	dsl_pool_t *dp = dmu_tx_pool(tx);
2058
2059	spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2060	    tx, "snap=%s", drc->drc_tosnap);
2061
2062	if (!drc->drc_newfs) {
2063		dsl_dataset_t *origin_head;
2064
2065		VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2066		    &origin_head));
2067
2068		if (drc->drc_force) {
2069			/*
2070			 * Destroy any snapshots of drc_tofs (origin_head)
2071			 * after the origin (the snap before drc_ds).
2072			 */
2073			uint64_t obj;
2074
2075			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2076			while (obj !=
2077			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2078				dsl_dataset_t *snap;
2079				VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2080				    &snap));
2081				ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2082				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2083				dsl_destroy_snapshot_sync_impl(snap,
2084				    B_FALSE, tx);
2085				dsl_dataset_rele(snap, FTAG);
2086			}
2087		}
2088		VERIFY3P(drc->drc_ds->ds_prev, ==,
2089		    origin_head->ds_prev);
2090
2091		dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2092		    origin_head, tx);
2093		dsl_dataset_snapshot_sync_impl(origin_head,
2094		    drc->drc_tosnap, tx);
2095
2096		/* set snapshot's creation time and guid */
2097		dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2098		dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2099		    drc->drc_drrb->drr_creation_time;
2100		dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2101		    drc->drc_drrb->drr_toguid;
2102		dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2103		    ~DS_FLAG_INCONSISTENT;
2104
2105		dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2106		dsl_dataset_phys(origin_head)->ds_flags &=
2107		    ~DS_FLAG_INCONSISTENT;
2108
2109		dsl_dataset_rele(origin_head, FTAG);
2110		dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2111
2112		if (drc->drc_owner != NULL)
2113			VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2114	} else {
2115		dsl_dataset_t *ds = drc->drc_ds;
2116
2117		dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2118
2119		/* set snapshot's creation time and guid */
2120		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2121		dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2122		    drc->drc_drrb->drr_creation_time;
2123		dsl_dataset_phys(ds->ds_prev)->ds_guid =
2124		    drc->drc_drrb->drr_toguid;
2125		dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2126		    ~DS_FLAG_INCONSISTENT;
2127
2128		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2129		dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2130	}
2131	drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2132	/*
2133	 * Release the hold from dmu_recv_begin.  This must be done before
2134	 * we return to open context, so that when we free the dataset's dnode,
2135	 * we can evict its bonus buffer.
2136	 */
2137	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2138	drc->drc_ds = NULL;
2139}
2140
2141static int
2142add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
2143{
2144	dsl_pool_t *dp;
2145	dsl_dataset_t *snapds;
2146	guid_map_entry_t *gmep;
2147	int err;
2148
2149	ASSERT(guid_map != NULL);
2150
2151	err = dsl_pool_hold(name, FTAG, &dp);
2152	if (err != 0)
2153		return (err);
2154	gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2155	err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
2156	if (err == 0) {
2157		gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
2158		gmep->gme_ds = snapds;
2159		avl_add(guid_map, gmep);
2160		dsl_dataset_long_hold(snapds, gmep);
2161	} else
2162		kmem_free(gmep, sizeof (*gmep));
2163
2164	dsl_pool_rele(dp, FTAG);
2165	return (err);
2166}
2167
2168static int dmu_recv_end_modified_blocks = 3;
2169
2170static int
2171dmu_recv_existing_end(dmu_recv_cookie_t *drc)
2172{
2173	int error;
2174	char name[MAXNAMELEN];
2175
2176#ifdef _KERNEL
2177	/*
2178	 * We will be destroying the ds; make sure its origin is unmounted if
2179	 * necessary.
2180	 */
2181	dsl_dataset_name(drc->drc_ds, name);
2182	zfs_destroy_unmount_origin(name);
2183#endif
2184
2185	error = dsl_sync_task(drc->drc_tofs,
2186	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2187	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2188
2189	if (error != 0)
2190		dmu_recv_cleanup_ds(drc);
2191	return (error);
2192}
2193
2194static int
2195dmu_recv_new_end(dmu_recv_cookie_t *drc)
2196{
2197	int error;
2198
2199	error = dsl_sync_task(drc->drc_tofs,
2200	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2201	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2202
2203	if (error != 0) {
2204		dmu_recv_cleanup_ds(drc);
2205	} else if (drc->drc_guid_to_ds_map != NULL) {
2206		(void) add_ds_to_guidmap(drc->drc_tofs,
2207		    drc->drc_guid_to_ds_map,
2208		    drc->drc_newsnapobj);
2209	}
2210	return (error);
2211}
2212
2213int
2214dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
2215{
2216	drc->drc_owner = owner;
2217
2218	if (drc->drc_newfs)
2219		return (dmu_recv_new_end(drc));
2220	else
2221		return (dmu_recv_existing_end(drc));
2222}
2223
2224/*
2225 * Return TRUE if this objset is currently being received into.
2226 */
2227boolean_t
2228dmu_objset_is_receiving(objset_t *os)
2229{
2230	return (os->os_dsl_dataset != NULL &&
2231	    os->os_dsl_dataset->ds_owner == dmu_recv_tag);
2232}
2233