dmu_send.c revision 288572
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
27 * Copyright 2014 HybridCluster. All rights reserved.
28 */
29
30#include <sys/dmu.h>
31#include <sys/dmu_impl.h>
32#include <sys/dmu_tx.h>
33#include <sys/dbuf.h>
34#include <sys/dnode.h>
35#include <sys/zfs_context.h>
36#include <sys/dmu_objset.h>
37#include <sys/dmu_traverse.h>
38#include <sys/dsl_dataset.h>
39#include <sys/dsl_dir.h>
40#include <sys/dsl_prop.h>
41#include <sys/dsl_pool.h>
42#include <sys/dsl_synctask.h>
43#include <sys/zfs_ioctl.h>
44#include <sys/zap.h>
45#include <sys/zio_checksum.h>
46#include <sys/zfs_znode.h>
47#include <zfs_fletcher.h>
48#include <sys/avl.h>
49#include <sys/ddt.h>
50#include <sys/zfs_onexit.h>
51#include <sys/dmu_send.h>
52#include <sys/dsl_destroy.h>
53#include <sys/blkptr.h>
54#include <sys/dsl_bookmark.h>
55#include <sys/zfeature.h>
56#include <sys/bqueue.h>
57
58#ifdef __FreeBSD__
59#undef dump_write
60#define dump_write dmu_dump_write
61#endif
62
63/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
64int zfs_send_corrupt_data = B_FALSE;
65int zfs_send_queue_length = 16 * 1024 * 1024;
66int zfs_recv_queue_length = 16 * 1024 * 1024;
67
68static char *dmu_recv_tag = "dmu_recv_tag";
69static const char *recv_clone_name = "%recv";
70
71#define	BP_SPAN(datablkszsec, indblkshift, level) \
72	(((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
73	(level) * (indblkshift - SPA_BLKPTRSHIFT)))
74
75struct send_thread_arg {
76	bqueue_t	q;
77	dsl_dataset_t	*ds;		/* Dataset to traverse */
78	uint64_t	fromtxg;	/* Traverse from this txg */
79	int		flags;		/* flags to pass to traverse_dataset */
80	int		error_code;
81	boolean_t	cancel;
82};
83
84struct send_block_record {
85	boolean_t		eos_marker; /* Marks the end of the stream */
86	blkptr_t		bp;
87	zbookmark_phys_t	zb;
88	uint8_t			indblkshift;
89	uint16_t		datablkszsec;
90	bqueue_node_t		ln;
91};
92
93static int
94dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
95{
96	dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
97	struct uio auio;
98	struct iovec aiov;
99	ASSERT0(len % 8);
100
101	aiov.iov_base = buf;
102	aiov.iov_len = len;
103	auio.uio_iov = &aiov;
104	auio.uio_iovcnt = 1;
105	auio.uio_resid = len;
106	auio.uio_segflg = UIO_SYSSPACE;
107	auio.uio_rw = UIO_WRITE;
108	auio.uio_offset = (off_t)-1;
109	auio.uio_td = dsp->dsa_td;
110#ifdef _KERNEL
111	if (dsp->dsa_fp->f_type == DTYPE_VNODE)
112		bwillwrite();
113	dsp->dsa_err = fo_write(dsp->dsa_fp, &auio, dsp->dsa_td->td_ucred, 0,
114	    dsp->dsa_td);
115#else
116	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
117	dsp->dsa_err = EOPNOTSUPP;
118#endif
119	mutex_enter(&ds->ds_sendstream_lock);
120	*dsp->dsa_off += len;
121	mutex_exit(&ds->ds_sendstream_lock);
122
123	return (dsp->dsa_err);
124}
125
126/*
127 * For all record types except BEGIN, fill in the checksum (overlaid in
128 * drr_u.drr_checksum.drr_checksum).  The checksum verifies everything
129 * up to the start of the checksum itself.
130 */
131static int
132dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
133{
134	ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
135	    ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
136	fletcher_4_incremental_native(dsp->dsa_drr,
137	    offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
138	    &dsp->dsa_zc);
139	if (dsp->dsa_drr->drr_type != DRR_BEGIN) {
140		ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
141		    drr_checksum.drr_checksum));
142		dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
143	}
144	fletcher_4_incremental_native(&dsp->dsa_drr->
145	    drr_u.drr_checksum.drr_checksum,
146	    sizeof (zio_cksum_t), &dsp->dsa_zc);
147	if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
148		return (SET_ERROR(EINTR));
149	if (payload_len != 0) {
150		fletcher_4_incremental_native(payload, payload_len,
151		    &dsp->dsa_zc);
152		if (dump_bytes(dsp, payload, payload_len) != 0)
153			return (SET_ERROR(EINTR));
154	}
155	return (0);
156}
157
158static int
159dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
160    uint64_t length)
161{
162	struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
163
164	/*
165	 * When we receive a free record, dbuf_free_range() assumes
166	 * that the receiving system doesn't have any dbufs in the range
167	 * being freed.  This is always true because there is a one-record
168	 * constraint: we only send one WRITE record for any given
169	 * object+offset.  We know that the one-record constraint is
170	 * true because we always send data in increasing order by
171	 * object,offset.
172	 *
173	 * If the increasing-order constraint ever changes, we should find
174	 * another way to assert that the one-record constraint is still
175	 * satisfied.
176	 */
177	ASSERT(object > dsp->dsa_last_data_object ||
178	    (object == dsp->dsa_last_data_object &&
179	    offset > dsp->dsa_last_data_offset));
180
181	/*
182	 * If we are doing a non-incremental send, then there can't
183	 * be any data in the dataset we're receiving into.  Therefore
184	 * a free record would simply be a no-op.  Save space by not
185	 * sending it to begin with.
186	 */
187	if (!dsp->dsa_incremental)
188		return (0);
189
190	if (length != -1ULL && offset + length < offset)
191		length = -1ULL;
192
193	/*
194	 * If there is a pending op, but it's not PENDING_FREE, push it out,
195	 * since free block aggregation can only be done for blocks of the
196	 * same type (i.e., DRR_FREE records can only be aggregated with
197	 * other DRR_FREE records.  DRR_FREEOBJECTS records can only be
198	 * aggregated with other DRR_FREEOBJECTS records.
199	 */
200	if (dsp->dsa_pending_op != PENDING_NONE &&
201	    dsp->dsa_pending_op != PENDING_FREE) {
202		if (dump_record(dsp, NULL, 0) != 0)
203			return (SET_ERROR(EINTR));
204		dsp->dsa_pending_op = PENDING_NONE;
205	}
206
207	if (dsp->dsa_pending_op == PENDING_FREE) {
208		/*
209		 * There should never be a PENDING_FREE if length is -1
210		 * (because dump_dnode is the only place where this
211		 * function is called with a -1, and only after flushing
212		 * any pending record).
213		 */
214		ASSERT(length != -1ULL);
215		/*
216		 * Check to see whether this free block can be aggregated
217		 * with pending one.
218		 */
219		if (drrf->drr_object == object && drrf->drr_offset +
220		    drrf->drr_length == offset) {
221			drrf->drr_length += length;
222			return (0);
223		} else {
224			/* not a continuation.  Push out pending record */
225			if (dump_record(dsp, NULL, 0) != 0)
226				return (SET_ERROR(EINTR));
227			dsp->dsa_pending_op = PENDING_NONE;
228		}
229	}
230	/* create a FREE record and make it pending */
231	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
232	dsp->dsa_drr->drr_type = DRR_FREE;
233	drrf->drr_object = object;
234	drrf->drr_offset = offset;
235	drrf->drr_length = length;
236	drrf->drr_toguid = dsp->dsa_toguid;
237	if (length == -1ULL) {
238		if (dump_record(dsp, NULL, 0) != 0)
239			return (SET_ERROR(EINTR));
240	} else {
241		dsp->dsa_pending_op = PENDING_FREE;
242	}
243
244	return (0);
245}
246
247static int
248dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
249    uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
250{
251	struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
252
253	/*
254	 * We send data in increasing object, offset order.
255	 * See comment in dump_free() for details.
256	 */
257	ASSERT(object > dsp->dsa_last_data_object ||
258	    (object == dsp->dsa_last_data_object &&
259	    offset > dsp->dsa_last_data_offset));
260	dsp->dsa_last_data_object = object;
261	dsp->dsa_last_data_offset = offset + blksz - 1;
262
263	/*
264	 * If there is any kind of pending aggregation (currently either
265	 * a grouping of free objects or free blocks), push it out to
266	 * the stream, since aggregation can't be done across operations
267	 * of different types.
268	 */
269	if (dsp->dsa_pending_op != PENDING_NONE) {
270		if (dump_record(dsp, NULL, 0) != 0)
271			return (SET_ERROR(EINTR));
272		dsp->dsa_pending_op = PENDING_NONE;
273	}
274	/* write a WRITE record */
275	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
276	dsp->dsa_drr->drr_type = DRR_WRITE;
277	drrw->drr_object = object;
278	drrw->drr_type = type;
279	drrw->drr_offset = offset;
280	drrw->drr_length = blksz;
281	drrw->drr_toguid = dsp->dsa_toguid;
282	if (bp == NULL || BP_IS_EMBEDDED(bp)) {
283		/*
284		 * There's no pre-computed checksum for partial-block
285		 * writes or embedded BP's, so (like
286		 * fletcher4-checkummed blocks) userland will have to
287		 * compute a dedup-capable checksum itself.
288		 */
289		drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
290	} else {
291		drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
292		if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
293			drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
294		DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
295		DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
296		DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
297		drrw->drr_key.ddk_cksum = bp->blk_cksum;
298	}
299
300	if (dump_record(dsp, data, blksz) != 0)
301		return (SET_ERROR(EINTR));
302	return (0);
303}
304
305static int
306dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
307    int blksz, const blkptr_t *bp)
308{
309	char buf[BPE_PAYLOAD_SIZE];
310	struct drr_write_embedded *drrw =
311	    &(dsp->dsa_drr->drr_u.drr_write_embedded);
312
313	if (dsp->dsa_pending_op != PENDING_NONE) {
314		if (dump_record(dsp, NULL, 0) != 0)
315			return (EINTR);
316		dsp->dsa_pending_op = PENDING_NONE;
317	}
318
319	ASSERT(BP_IS_EMBEDDED(bp));
320
321	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
322	dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
323	drrw->drr_object = object;
324	drrw->drr_offset = offset;
325	drrw->drr_length = blksz;
326	drrw->drr_toguid = dsp->dsa_toguid;
327	drrw->drr_compression = BP_GET_COMPRESS(bp);
328	drrw->drr_etype = BPE_GET_ETYPE(bp);
329	drrw->drr_lsize = BPE_GET_LSIZE(bp);
330	drrw->drr_psize = BPE_GET_PSIZE(bp);
331
332	decode_embedded_bp_compressed(bp, buf);
333
334	if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
335		return (EINTR);
336	return (0);
337}
338
339static int
340dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
341{
342	struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
343
344	if (dsp->dsa_pending_op != PENDING_NONE) {
345		if (dump_record(dsp, NULL, 0) != 0)
346			return (SET_ERROR(EINTR));
347		dsp->dsa_pending_op = PENDING_NONE;
348	}
349
350	/* write a SPILL record */
351	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
352	dsp->dsa_drr->drr_type = DRR_SPILL;
353	drrs->drr_object = object;
354	drrs->drr_length = blksz;
355	drrs->drr_toguid = dsp->dsa_toguid;
356
357	if (dump_record(dsp, data, blksz) != 0)
358		return (SET_ERROR(EINTR));
359	return (0);
360}
361
362static int
363dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
364{
365	struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
366
367	/* See comment in dump_free(). */
368	if (!dsp->dsa_incremental)
369		return (0);
370
371	/*
372	 * If there is a pending op, but it's not PENDING_FREEOBJECTS,
373	 * push it out, since free block aggregation can only be done for
374	 * blocks of the same type (i.e., DRR_FREE records can only be
375	 * aggregated with other DRR_FREE records.  DRR_FREEOBJECTS records
376	 * can only be aggregated with other DRR_FREEOBJECTS records.
377	 */
378	if (dsp->dsa_pending_op != PENDING_NONE &&
379	    dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
380		if (dump_record(dsp, NULL, 0) != 0)
381			return (SET_ERROR(EINTR));
382		dsp->dsa_pending_op = PENDING_NONE;
383	}
384	if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
385		/*
386		 * See whether this free object array can be aggregated
387		 * with pending one
388		 */
389		if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
390			drrfo->drr_numobjs += numobjs;
391			return (0);
392		} else {
393			/* can't be aggregated.  Push out pending record */
394			if (dump_record(dsp, NULL, 0) != 0)
395				return (SET_ERROR(EINTR));
396			dsp->dsa_pending_op = PENDING_NONE;
397		}
398	}
399
400	/* write a FREEOBJECTS record */
401	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
402	dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
403	drrfo->drr_firstobj = firstobj;
404	drrfo->drr_numobjs = numobjs;
405	drrfo->drr_toguid = dsp->dsa_toguid;
406
407	dsp->dsa_pending_op = PENDING_FREEOBJECTS;
408
409	return (0);
410}
411
412static int
413dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
414{
415	struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
416
417	if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
418		return (dump_freeobjects(dsp, object, 1));
419
420	if (dsp->dsa_pending_op != PENDING_NONE) {
421		if (dump_record(dsp, NULL, 0) != 0)
422			return (SET_ERROR(EINTR));
423		dsp->dsa_pending_op = PENDING_NONE;
424	}
425
426	/* write an OBJECT record */
427	bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
428	dsp->dsa_drr->drr_type = DRR_OBJECT;
429	drro->drr_object = object;
430	drro->drr_type = dnp->dn_type;
431	drro->drr_bonustype = dnp->dn_bonustype;
432	drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
433	drro->drr_bonuslen = dnp->dn_bonuslen;
434	drro->drr_checksumtype = dnp->dn_checksum;
435	drro->drr_compress = dnp->dn_compress;
436	drro->drr_toguid = dsp->dsa_toguid;
437
438	if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
439	    drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
440		drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
441
442	if (dump_record(dsp, DN_BONUS(dnp),
443	    P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
444		return (SET_ERROR(EINTR));
445	}
446
447	/* Free anything past the end of the file. */
448	if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
449	    (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
450		return (SET_ERROR(EINTR));
451	if (dsp->dsa_err != 0)
452		return (SET_ERROR(EINTR));
453	return (0);
454}
455
456static boolean_t
457backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
458{
459	if (!BP_IS_EMBEDDED(bp))
460		return (B_FALSE);
461
462	/*
463	 * Compression function must be legacy, or explicitly enabled.
464	 */
465	if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
466	    !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
467		return (B_FALSE);
468
469	/*
470	 * Embed type must be explicitly enabled.
471	 */
472	switch (BPE_GET_ETYPE(bp)) {
473	case BP_EMBEDDED_TYPE_DATA:
474		if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
475			return (B_TRUE);
476		break;
477	default:
478		return (B_FALSE);
479	}
480	return (B_FALSE);
481}
482
483/*
484 * This is the callback function to traverse_dataset that acts as the worker
485 * thread for dmu_send_impl.
486 */
487/*ARGSUSED*/
488static int
489send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
490    const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
491{
492	struct send_thread_arg *sta = arg;
493	struct send_block_record *record;
494	uint64_t record_size;
495	int err = 0;
496
497	if (sta->cancel)
498		return (SET_ERROR(EINTR));
499
500	if (bp == NULL) {
501		ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
502		return (0);
503	} else if (zb->zb_level < 0) {
504		return (0);
505	}
506
507	record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
508	record->eos_marker = B_FALSE;
509	record->bp = *bp;
510	record->zb = *zb;
511	record->indblkshift = dnp->dn_indblkshift;
512	record->datablkszsec = dnp->dn_datablkszsec;
513	record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
514	bqueue_enqueue(&sta->q, record, record_size);
515
516	return (err);
517}
518
519/*
520 * This function kicks off the traverse_dataset.  It also handles setting the
521 * error code of the thread in case something goes wrong, and pushes the End of
522 * Stream record when the traverse_dataset call has finished.  If there is no
523 * dataset to traverse, the thread immediately pushes End of Stream marker.
524 */
525static void
526send_traverse_thread(void *arg)
527{
528	struct send_thread_arg *st_arg = arg;
529	int err;
530	struct send_block_record *data;
531
532	if (st_arg->ds != NULL) {
533		err = traverse_dataset(st_arg->ds, st_arg->fromtxg,
534		    st_arg->flags, send_cb, arg);
535		if (err != EINTR)
536			st_arg->error_code = err;
537	}
538	data = kmem_zalloc(sizeof (*data), KM_SLEEP);
539	data->eos_marker = B_TRUE;
540	bqueue_enqueue(&st_arg->q, data, 1);
541	thread_exit();
542}
543
544/*
545 * This function actually handles figuring out what kind of record needs to be
546 * dumped, reading the data (which has hopefully been prefetched), and calling
547 * the appropriate helper function.
548 */
549static int
550do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
551{
552	dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
553	const blkptr_t *bp = &data->bp;
554	const zbookmark_phys_t *zb = &data->zb;
555	uint8_t indblkshift = data->indblkshift;
556	uint16_t dblkszsec = data->datablkszsec;
557	spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
558	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
559	int err = 0;
560
561	ASSERT3U(zb->zb_level, >=, 0);
562
563	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
564	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
565		return (0);
566	} else if (BP_IS_HOLE(bp) &&
567	    zb->zb_object == DMU_META_DNODE_OBJECT) {
568		uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
569		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
570		err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
571	} else if (BP_IS_HOLE(bp)) {
572		uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
573		uint64_t offset = zb->zb_blkid * span;
574		err = dump_free(dsa, zb->zb_object, offset, span);
575	} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
576		return (0);
577	} else if (type == DMU_OT_DNODE) {
578		int blksz = BP_GET_LSIZE(bp);
579		arc_flags_t aflags = ARC_FLAG_WAIT;
580		arc_buf_t *abuf;
581
582		ASSERT0(zb->zb_level);
583
584		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
585		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
586		    &aflags, zb) != 0)
587			return (SET_ERROR(EIO));
588
589		dnode_phys_t *blk = abuf->b_data;
590		uint64_t dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
591		for (int i = 0; i < blksz >> DNODE_SHIFT; i++) {
592			err = dump_dnode(dsa, dnobj + i, blk + i);
593			if (err != 0)
594				break;
595		}
596		(void) arc_buf_remove_ref(abuf, &abuf);
597	} else if (type == DMU_OT_SA) {
598		arc_flags_t aflags = ARC_FLAG_WAIT;
599		arc_buf_t *abuf;
600		int blksz = BP_GET_LSIZE(bp);
601
602		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
603		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
604		    &aflags, zb) != 0)
605			return (SET_ERROR(EIO));
606
607		err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
608		(void) arc_buf_remove_ref(abuf, &abuf);
609	} else if (backup_do_embed(dsa, bp)) {
610		/* it's an embedded level-0 block of a regular object */
611		int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
612		ASSERT0(zb->zb_level);
613		err = dump_write_embedded(dsa, zb->zb_object,
614		    zb->zb_blkid * blksz, blksz, bp);
615	} else {
616		/* it's a level-0 block of a regular object */
617		arc_flags_t aflags = ARC_FLAG_WAIT;
618		arc_buf_t *abuf;
619		int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
620		uint64_t offset;
621
622		ASSERT0(zb->zb_level);
623		if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
624		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
625		    &aflags, zb) != 0) {
626			if (zfs_send_corrupt_data) {
627				/* Send a block filled with 0x"zfs badd bloc" */
628				abuf = arc_buf_alloc(spa, blksz, &abuf,
629				    ARC_BUFC_DATA);
630				uint64_t *ptr;
631				for (ptr = abuf->b_data;
632				    (char *)ptr < (char *)abuf->b_data + blksz;
633				    ptr++)
634					*ptr = 0x2f5baddb10cULL;
635			} else {
636				return (SET_ERROR(EIO));
637			}
638		}
639
640		offset = zb->zb_blkid * blksz;
641
642		if (!(dsa->dsa_featureflags &
643		    DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
644		    blksz > SPA_OLD_MAXBLOCKSIZE) {
645			char *buf = abuf->b_data;
646			while (blksz > 0 && err == 0) {
647				int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
648				err = dump_write(dsa, type, zb->zb_object,
649				    offset, n, NULL, buf);
650				offset += n;
651				buf += n;
652				blksz -= n;
653			}
654		} else {
655			err = dump_write(dsa, type, zb->zb_object,
656			    offset, blksz, bp, abuf->b_data);
657		}
658		(void) arc_buf_remove_ref(abuf, &abuf);
659	}
660
661	ASSERT(err == 0 || err == EINTR);
662	return (err);
663}
664
665/*
666 * Pop the new data off the queue, and free the old data.
667 */
668static struct send_block_record *
669get_next_record(bqueue_t *bq, struct send_block_record *data)
670{
671	struct send_block_record *tmp = bqueue_dequeue(bq);
672	kmem_free(data, sizeof (*data));
673	return (tmp);
674}
675
676/*
677 * Actually do the bulk of the work in a zfs send.
678 *
679 * Note: Releases dp using the specified tag.
680 */
681static int
682dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
683    zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone, boolean_t embedok,
684#ifdef illumos
685    boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
686#else
687    boolean_t large_block_ok, int outfd, struct file *fp, offset_t *off)
688#endif
689{
690	objset_t *os;
691	dmu_replay_record_t *drr;
692	dmu_sendarg_t *dsp;
693	int err;
694	uint64_t fromtxg = 0;
695	uint64_t featureflags = 0;
696	struct send_thread_arg to_arg;
697
698	err = dmu_objset_from_ds(to_ds, &os);
699	if (err != 0) {
700		dsl_pool_rele(dp, tag);
701		return (err);
702	}
703
704	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
705	drr->drr_type = DRR_BEGIN;
706	drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
707	DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
708	    DMU_SUBSTREAM);
709
710#ifdef _KERNEL
711	if (dmu_objset_type(os) == DMU_OST_ZFS) {
712		uint64_t version;
713		if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
714			kmem_free(drr, sizeof (dmu_replay_record_t));
715			dsl_pool_rele(dp, tag);
716			return (SET_ERROR(EINVAL));
717		}
718		if (version >= ZPL_VERSION_SA) {
719			featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
720		}
721	}
722#endif
723
724	if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
725		featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
726	if (embedok &&
727	    spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
728		featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
729		if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
730			featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
731	}
732
733	DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
734	    featureflags);
735
736	drr->drr_u.drr_begin.drr_creation_time =
737	    dsl_dataset_phys(to_ds)->ds_creation_time;
738	drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
739	if (is_clone)
740		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
741	drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
742	if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
743		drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
744
745	if (ancestor_zb != NULL) {
746		drr->drr_u.drr_begin.drr_fromguid =
747		    ancestor_zb->zbm_guid;
748		fromtxg = ancestor_zb->zbm_creation_txg;
749	}
750	dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
751	if (!to_ds->ds_is_snapshot) {
752		(void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
753		    sizeof (drr->drr_u.drr_begin.drr_toname));
754	}
755
756	dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
757
758	dsp->dsa_drr = drr;
759	dsp->dsa_outfd = outfd;
760	dsp->dsa_proc = curproc;
761	dsp->dsa_td = curthread;
762	dsp->dsa_fp = fp;
763	dsp->dsa_os = os;
764	dsp->dsa_off = off;
765	dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
766	dsp->dsa_pending_op = PENDING_NONE;
767	dsp->dsa_incremental = (ancestor_zb != NULL);
768	dsp->dsa_featureflags = featureflags;
769
770	mutex_enter(&to_ds->ds_sendstream_lock);
771	list_insert_head(&to_ds->ds_sendstreams, dsp);
772	mutex_exit(&to_ds->ds_sendstream_lock);
773
774	dsl_dataset_long_hold(to_ds, FTAG);
775	dsl_pool_rele(dp, tag);
776
777	if (dump_record(dsp, NULL, 0) != 0) {
778		err = dsp->dsa_err;
779		goto out;
780	}
781
782	err = bqueue_init(&to_arg.q, zfs_send_queue_length,
783	    offsetof(struct send_block_record, ln));
784	to_arg.error_code = 0;
785	to_arg.cancel = B_FALSE;
786	to_arg.ds = to_ds;
787	to_arg.fromtxg = fromtxg;
788	to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
789	(void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc,
790	    TS_RUN, minclsyspri);
791
792	struct send_block_record *to_data;
793	to_data = bqueue_dequeue(&to_arg.q);
794
795	while (!to_data->eos_marker && err == 0) {
796		err = do_dump(dsp, to_data);
797		to_data = get_next_record(&to_arg.q, to_data);
798		if (issig(JUSTLOOKING) && issig(FORREAL))
799			err = EINTR;
800	}
801
802	if (err != 0) {
803		to_arg.cancel = B_TRUE;
804		while (!to_data->eos_marker) {
805			to_data = get_next_record(&to_arg.q, to_data);
806		}
807	}
808	kmem_free(to_data, sizeof (*to_data));
809
810	bqueue_destroy(&to_arg.q);
811
812	if (err == 0 && to_arg.error_code != 0)
813		err = to_arg.error_code;
814
815	if (err != 0)
816		goto out;
817
818	if (dsp->dsa_pending_op != PENDING_NONE)
819		if (dump_record(dsp, NULL, 0) != 0)
820			err = SET_ERROR(EINTR);
821
822	if (err != 0) {
823		if (err == EINTR && dsp->dsa_err != 0)
824			err = dsp->dsa_err;
825		goto out;
826	}
827
828	bzero(drr, sizeof (dmu_replay_record_t));
829	drr->drr_type = DRR_END;
830	drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
831	drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
832
833	if (dump_record(dsp, NULL, 0) != 0)
834		err = dsp->dsa_err;
835
836out:
837	mutex_enter(&to_ds->ds_sendstream_lock);
838	list_remove(&to_ds->ds_sendstreams, dsp);
839	mutex_exit(&to_ds->ds_sendstream_lock);
840
841	kmem_free(drr, sizeof (dmu_replay_record_t));
842	kmem_free(dsp, sizeof (dmu_sendarg_t));
843
844	dsl_dataset_long_rele(to_ds, FTAG);
845
846	return (err);
847}
848
849int
850dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
851    boolean_t embedok, boolean_t large_block_ok,
852#ifdef illumos
853    int outfd, vnode_t *vp, offset_t *off)
854#else
855    int outfd, struct file *fp, offset_t *off)
856#endif
857{
858	dsl_pool_t *dp;
859	dsl_dataset_t *ds;
860	dsl_dataset_t *fromds = NULL;
861	int err;
862
863	err = dsl_pool_hold(pool, FTAG, &dp);
864	if (err != 0)
865		return (err);
866
867	err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
868	if (err != 0) {
869		dsl_pool_rele(dp, FTAG);
870		return (err);
871	}
872
873	if (fromsnap != 0) {
874		zfs_bookmark_phys_t zb;
875		boolean_t is_clone;
876
877		err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
878		if (err != 0) {
879			dsl_dataset_rele(ds, FTAG);
880			dsl_pool_rele(dp, FTAG);
881			return (err);
882		}
883		if (!dsl_dataset_is_before(ds, fromds, 0))
884			err = SET_ERROR(EXDEV);
885		zb.zbm_creation_time =
886		    dsl_dataset_phys(fromds)->ds_creation_time;
887		zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
888		zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
889		is_clone = (fromds->ds_dir != ds->ds_dir);
890		dsl_dataset_rele(fromds, FTAG);
891		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
892		    embedok, large_block_ok, outfd, fp, off);
893	} else {
894		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
895		    embedok, large_block_ok, outfd, fp, off);
896	}
897	dsl_dataset_rele(ds, FTAG);
898	return (err);
899}
900
901int
902dmu_send(const char *tosnap, const char *fromsnap,
903    boolean_t embedok, boolean_t large_block_ok,
904#ifdef illumos
905    int outfd, vnode_t *vp, offset_t *off)
906#else
907    int outfd, struct file *fp, offset_t *off)
908#endif
909{
910	dsl_pool_t *dp;
911	dsl_dataset_t *ds;
912	int err;
913	boolean_t owned = B_FALSE;
914
915	if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
916		return (SET_ERROR(EINVAL));
917
918	err = dsl_pool_hold(tosnap, FTAG, &dp);
919	if (err != 0)
920		return (err);
921
922	if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
923		/*
924		 * We are sending a filesystem or volume.  Ensure
925		 * that it doesn't change by owning the dataset.
926		 */
927		err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
928		owned = B_TRUE;
929	} else {
930		err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
931	}
932	if (err != 0) {
933		dsl_pool_rele(dp, FTAG);
934		return (err);
935	}
936
937	if (fromsnap != NULL) {
938		zfs_bookmark_phys_t zb;
939		boolean_t is_clone = B_FALSE;
940		int fsnamelen = strchr(tosnap, '@') - tosnap;
941
942		/*
943		 * If the fromsnap is in a different filesystem, then
944		 * mark the send stream as a clone.
945		 */
946		if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
947		    (fromsnap[fsnamelen] != '@' &&
948		    fromsnap[fsnamelen] != '#')) {
949			is_clone = B_TRUE;
950		}
951
952		if (strchr(fromsnap, '@')) {
953			dsl_dataset_t *fromds;
954			err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
955			if (err == 0) {
956				if (!dsl_dataset_is_before(ds, fromds, 0))
957					err = SET_ERROR(EXDEV);
958				zb.zbm_creation_time =
959				    dsl_dataset_phys(fromds)->ds_creation_time;
960				zb.zbm_creation_txg =
961				    dsl_dataset_phys(fromds)->ds_creation_txg;
962				zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
963				is_clone = (ds->ds_dir != fromds->ds_dir);
964				dsl_dataset_rele(fromds, FTAG);
965			}
966		} else {
967			err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
968		}
969		if (err != 0) {
970			dsl_dataset_rele(ds, FTAG);
971			dsl_pool_rele(dp, FTAG);
972			return (err);
973		}
974		err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
975		    embedok, large_block_ok, outfd, fp, off);
976	} else {
977		err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
978		    embedok, large_block_ok, outfd, fp, off);
979	}
980	if (owned)
981		dsl_dataset_disown(ds, FTAG);
982	else
983		dsl_dataset_rele(ds, FTAG);
984	return (err);
985}
986
987static int
988dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
989    uint64_t *sizep)
990{
991	int err;
992	/*
993	 * Assume that space (both on-disk and in-stream) is dominated by
994	 * data.  We will adjust for indirect blocks and the copies property,
995	 * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
996	 */
997
998	/*
999	 * Subtract out approximate space used by indirect blocks.
1000	 * Assume most space is used by data blocks (non-indirect, non-dnode).
1001	 * Assume all blocks are recordsize.  Assume ditto blocks and
1002	 * internal fragmentation counter out compression.
1003	 *
1004	 * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
1005	 * block, which we observe in practice.
1006	 */
1007	uint64_t recordsize;
1008	err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
1009	if (err != 0)
1010		return (err);
1011	size -= size / recordsize * sizeof (blkptr_t);
1012
1013	/* Add in the space for the record associated with each block. */
1014	size += size / recordsize * sizeof (dmu_replay_record_t);
1015
1016	*sizep = size;
1017
1018	return (0);
1019}
1020
1021int
1022dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
1023{
1024	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1025	int err;
1026	uint64_t size;
1027
1028	ASSERT(dsl_pool_config_held(dp));
1029
1030	/* tosnap must be a snapshot */
1031	if (!ds->ds_is_snapshot)
1032		return (SET_ERROR(EINVAL));
1033
1034	/* fromsnap, if provided, must be a snapshot */
1035	if (fromds != NULL && !fromds->ds_is_snapshot)
1036		return (SET_ERROR(EINVAL));
1037
1038	/*
1039	 * fromsnap must be an earlier snapshot from the same fs as tosnap,
1040	 * or the origin's fs.
1041	 */
1042	if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
1043		return (SET_ERROR(EXDEV));
1044
1045	/* Get uncompressed size estimate of changed data. */
1046	if (fromds == NULL) {
1047		size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
1048	} else {
1049		uint64_t used, comp;
1050		err = dsl_dataset_space_written(fromds, ds,
1051		    &used, &comp, &size);
1052		if (err != 0)
1053			return (err);
1054	}
1055
1056	err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1057	return (err);
1058}
1059
1060/*
1061 * Simple callback used to traverse the blocks of a snapshot and sum their
1062 * uncompressed size
1063 */
1064/* ARGSUSED */
1065static int
1066dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
1067    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
1068{
1069	uint64_t *spaceptr = arg;
1070	if (bp != NULL && !BP_IS_HOLE(bp)) {
1071		*spaceptr += BP_GET_UCSIZE(bp);
1072	}
1073	return (0);
1074}
1075
1076/*
1077 * Given a desination snapshot and a TXG, calculate the approximate size of a
1078 * send stream sent from that TXG. from_txg may be zero, indicating that the
1079 * whole snapshot will be sent.
1080 */
1081int
1082dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
1083    uint64_t *sizep)
1084{
1085	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1086	int err;
1087	uint64_t size = 0;
1088
1089	ASSERT(dsl_pool_config_held(dp));
1090
1091	/* tosnap must be a snapshot */
1092	if (!dsl_dataset_is_snapshot(ds))
1093		return (SET_ERROR(EINVAL));
1094
1095	/* verify that from_txg is before the provided snapshot was taken */
1096	if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
1097		return (SET_ERROR(EXDEV));
1098	}
1099
1100	/*
1101	 * traverse the blocks of the snapshot with birth times after
1102	 * from_txg, summing their uncompressed size
1103	 */
1104	err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
1105	    dmu_calculate_send_traversal, &size);
1106	if (err)
1107		return (err);
1108
1109	err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
1110	return (err);
1111}
1112
1113typedef struct dmu_recv_begin_arg {
1114	const char *drba_origin;
1115	dmu_recv_cookie_t *drba_cookie;
1116	cred_t *drba_cred;
1117	uint64_t drba_snapobj;
1118} dmu_recv_begin_arg_t;
1119
1120static int
1121recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
1122    uint64_t fromguid)
1123{
1124	uint64_t val;
1125	int error;
1126	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1127
1128	/* temporary clone name must not exist */
1129	error = zap_lookup(dp->dp_meta_objset,
1130	    dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
1131	    8, 1, &val);
1132	if (error != ENOENT)
1133		return (error == 0 ? EBUSY : error);
1134
1135	/* new snapshot name must not exist */
1136	error = zap_lookup(dp->dp_meta_objset,
1137	    dsl_dataset_phys(ds)->ds_snapnames_zapobj,
1138	    drba->drba_cookie->drc_tosnap, 8, 1, &val);
1139	if (error != ENOENT)
1140		return (error == 0 ? EEXIST : error);
1141
1142	/*
1143	 * Check snapshot limit before receiving. We'll recheck again at the
1144	 * end, but might as well abort before receiving if we're already over
1145	 * the limit.
1146	 *
1147	 * Note that we do not check the file system limit with
1148	 * dsl_dir_fscount_check because the temporary %clones don't count
1149	 * against that limit.
1150	 */
1151	error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
1152	    NULL, drba->drba_cred);
1153	if (error != 0)
1154		return (error);
1155
1156	if (fromguid != 0) {
1157		dsl_dataset_t *snap;
1158		uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1159
1160		/* Find snapshot in this dir that matches fromguid. */
1161		while (obj != 0) {
1162			error = dsl_dataset_hold_obj(dp, obj, FTAG,
1163			    &snap);
1164			if (error != 0)
1165				return (SET_ERROR(ENODEV));
1166			if (snap->ds_dir != ds->ds_dir) {
1167				dsl_dataset_rele(snap, FTAG);
1168				return (SET_ERROR(ENODEV));
1169			}
1170			if (dsl_dataset_phys(snap)->ds_guid == fromguid)
1171				break;
1172			obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
1173			dsl_dataset_rele(snap, FTAG);
1174		}
1175		if (obj == 0)
1176			return (SET_ERROR(ENODEV));
1177
1178		if (drba->drba_cookie->drc_force) {
1179			drba->drba_snapobj = obj;
1180		} else {
1181			/*
1182			 * If we are not forcing, there must be no
1183			 * changes since fromsnap.
1184			 */
1185			if (dsl_dataset_modified_since_snap(ds, snap)) {
1186				dsl_dataset_rele(snap, FTAG);
1187				return (SET_ERROR(ETXTBSY));
1188			}
1189			drba->drba_snapobj = ds->ds_prev->ds_object;
1190		}
1191
1192		dsl_dataset_rele(snap, FTAG);
1193	} else {
1194		/* if full, then must be forced */
1195		if (!drba->drba_cookie->drc_force)
1196			return (SET_ERROR(EEXIST));
1197		/* start from $ORIGIN@$ORIGIN, if supported */
1198		drba->drba_snapobj = dp->dp_origin_snap != NULL ?
1199		    dp->dp_origin_snap->ds_object : 0;
1200	}
1201
1202	return (0);
1203
1204}
1205
1206static int
1207dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
1208{
1209	dmu_recv_begin_arg_t *drba = arg;
1210	dsl_pool_t *dp = dmu_tx_pool(tx);
1211	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1212	uint64_t fromguid = drrb->drr_fromguid;
1213	int flags = drrb->drr_flags;
1214	int error;
1215	uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
1216	dsl_dataset_t *ds;
1217	const char *tofs = drba->drba_cookie->drc_tofs;
1218
1219	/* already checked */
1220	ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
1221
1222	if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
1223	    DMU_COMPOUNDSTREAM ||
1224	    drrb->drr_type >= DMU_OST_NUMTYPES ||
1225	    ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
1226		return (SET_ERROR(EINVAL));
1227
1228	/* Verify pool version supports SA if SA_SPILL feature set */
1229	if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
1230	    spa_version(dp->dp_spa) < SPA_VERSION_SA)
1231		return (SET_ERROR(ENOTSUP));
1232
1233	/*
1234	 * The receiving code doesn't know how to translate a WRITE_EMBEDDED
1235	 * record to a plan WRITE record, so the pool must have the
1236	 * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
1237	 * records.  Same with WRITE_EMBEDDED records that use LZ4 compression.
1238	 */
1239	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
1240	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
1241		return (SET_ERROR(ENOTSUP));
1242	if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
1243	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
1244		return (SET_ERROR(ENOTSUP));
1245
1246	/*
1247	 * The receiving code doesn't know how to translate large blocks
1248	 * to smaller ones, so the pool must have the LARGE_BLOCKS
1249	 * feature enabled if the stream has LARGE_BLOCKS.
1250	 */
1251	if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
1252	    !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
1253		return (SET_ERROR(ENOTSUP));
1254
1255	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1256	if (error == 0) {
1257		/* target fs already exists; recv into temp clone */
1258
1259		/* Can't recv a clone into an existing fs */
1260		if (flags & DRR_FLAG_CLONE) {
1261			dsl_dataset_rele(ds, FTAG);
1262			return (SET_ERROR(EINVAL));
1263		}
1264
1265		error = recv_begin_check_existing_impl(drba, ds, fromguid);
1266		dsl_dataset_rele(ds, FTAG);
1267	} else if (error == ENOENT) {
1268		/* target fs does not exist; must be a full backup or clone */
1269		char buf[MAXNAMELEN];
1270
1271		/*
1272		 * If it's a non-clone incremental, we are missing the
1273		 * target fs, so fail the recv.
1274		 */
1275		if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
1276		    drba->drba_origin))
1277			return (SET_ERROR(ENOENT));
1278
1279		/* Open the parent of tofs */
1280		ASSERT3U(strlen(tofs), <, MAXNAMELEN);
1281		(void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
1282		error = dsl_dataset_hold(dp, buf, FTAG, &ds);
1283		if (error != 0)
1284			return (error);
1285
1286		/*
1287		 * Check filesystem and snapshot limits before receiving. We'll
1288		 * recheck snapshot limits again at the end (we create the
1289		 * filesystems and increment those counts during begin_sync).
1290		 */
1291		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1292		    ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
1293		if (error != 0) {
1294			dsl_dataset_rele(ds, FTAG);
1295			return (error);
1296		}
1297
1298		error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
1299		    ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
1300		if (error != 0) {
1301			dsl_dataset_rele(ds, FTAG);
1302			return (error);
1303		}
1304
1305		if (drba->drba_origin != NULL) {
1306			dsl_dataset_t *origin;
1307			error = dsl_dataset_hold(dp, drba->drba_origin,
1308			    FTAG, &origin);
1309			if (error != 0) {
1310				dsl_dataset_rele(ds, FTAG);
1311				return (error);
1312			}
1313			if (!origin->ds_is_snapshot) {
1314				dsl_dataset_rele(origin, FTAG);
1315				dsl_dataset_rele(ds, FTAG);
1316				return (SET_ERROR(EINVAL));
1317			}
1318			if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
1319				dsl_dataset_rele(origin, FTAG);
1320				dsl_dataset_rele(ds, FTAG);
1321				return (SET_ERROR(ENODEV));
1322			}
1323			dsl_dataset_rele(origin, FTAG);
1324		}
1325		dsl_dataset_rele(ds, FTAG);
1326		error = 0;
1327	}
1328	return (error);
1329}
1330
1331static void
1332dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
1333{
1334	dmu_recv_begin_arg_t *drba = arg;
1335	dsl_pool_t *dp = dmu_tx_pool(tx);
1336	struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
1337	const char *tofs = drba->drba_cookie->drc_tofs;
1338	dsl_dataset_t *ds, *newds;
1339	uint64_t dsobj;
1340	int error;
1341	uint64_t crflags;
1342
1343	crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
1344	    DS_FLAG_CI_DATASET : 0;
1345
1346	error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
1347	if (error == 0) {
1348		/* create temporary clone */
1349		dsl_dataset_t *snap = NULL;
1350		if (drba->drba_snapobj != 0) {
1351			VERIFY0(dsl_dataset_hold_obj(dp,
1352			    drba->drba_snapobj, FTAG, &snap));
1353		}
1354		dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
1355		    snap, crflags, drba->drba_cred, tx);
1356		if (drba->drba_snapobj != 0)
1357			dsl_dataset_rele(snap, FTAG);
1358		dsl_dataset_rele(ds, FTAG);
1359	} else {
1360		dsl_dir_t *dd;
1361		const char *tail;
1362		dsl_dataset_t *origin = NULL;
1363
1364		VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
1365
1366		if (drba->drba_origin != NULL) {
1367			VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
1368			    FTAG, &origin));
1369		}
1370
1371		/* Create new dataset. */
1372		dsobj = dsl_dataset_create_sync(dd,
1373		    strrchr(tofs, '/') + 1,
1374		    origin, crflags, drba->drba_cred, tx);
1375		if (origin != NULL)
1376			dsl_dataset_rele(origin, FTAG);
1377		dsl_dir_rele(dd, FTAG);
1378		drba->drba_cookie->drc_newfs = B_TRUE;
1379	}
1380	VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
1381
1382	dmu_buf_will_dirty(newds->ds_dbuf, tx);
1383	dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
1384
1385	/*
1386	 * If we actually created a non-clone, we need to create the
1387	 * objset in our new dataset.
1388	 */
1389	if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
1390		(void) dmu_objset_create_impl(dp->dp_spa,
1391		    newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
1392	}
1393
1394	drba->drba_cookie->drc_ds = newds;
1395
1396	spa_history_log_internal_ds(newds, "receive", tx, "");
1397}
1398
1399/*
1400 * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
1401 * succeeds; otherwise we will leak the holds on the datasets.
1402 */
1403int
1404dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
1405    boolean_t force, char *origin, dmu_recv_cookie_t *drc)
1406{
1407	dmu_recv_begin_arg_t drba = { 0 };
1408	dmu_replay_record_t *drr;
1409
1410	bzero(drc, sizeof (dmu_recv_cookie_t));
1411	drc->drc_drrb = drrb;
1412	drc->drc_tosnap = tosnap;
1413	drc->drc_tofs = tofs;
1414	drc->drc_force = force;
1415	drc->drc_cred = CRED();
1416
1417	if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
1418		drc->drc_byteswap = B_TRUE;
1419	else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
1420		return (SET_ERROR(EINVAL));
1421
1422	drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
1423	drr->drr_type = DRR_BEGIN;
1424	drr->drr_u.drr_begin = *drc->drc_drrb;
1425	if (drc->drc_byteswap) {
1426		fletcher_4_incremental_byteswap(drr,
1427		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1428	} else {
1429		fletcher_4_incremental_native(drr,
1430		    sizeof (dmu_replay_record_t), &drc->drc_cksum);
1431	}
1432	kmem_free(drr, sizeof (dmu_replay_record_t));
1433
1434	if (drc->drc_byteswap) {
1435		drrb->drr_magic = BSWAP_64(drrb->drr_magic);
1436		drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
1437		drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
1438		drrb->drr_type = BSWAP_32(drrb->drr_type);
1439		drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
1440		drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
1441	}
1442
1443	drba.drba_origin = origin;
1444	drba.drba_cookie = drc;
1445	drba.drba_cred = CRED();
1446
1447	return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
1448	    &drba, 5, ZFS_SPACE_CHECK_NORMAL));
1449}
1450
1451struct receive_record_arg {
1452	dmu_replay_record_t header;
1453	void *payload; /* Pointer to a buffer containing the payload */
1454	/*
1455	 * If the record is a write, pointer to the arc_buf_t containing the
1456	 * payload.
1457	 */
1458	arc_buf_t *write_buf;
1459	int payload_size;
1460	boolean_t eos_marker; /* Marks the end of the stream */
1461	bqueue_node_t node;
1462};
1463
1464struct receive_writer_arg {
1465	objset_t *os;
1466	boolean_t byteswap;
1467	bqueue_t q;
1468	/*
1469	 * These three args are used to signal to the main thread that we're
1470	 * done.
1471	 */
1472	kmutex_t mutex;
1473	kcondvar_t cv;
1474	boolean_t done;
1475	int err;
1476	/* A map from guid to dataset to help handle dedup'd streams. */
1477	avl_tree_t *guid_to_ds_map;
1478};
1479
1480struct receive_arg  {
1481	objset_t *os;
1482	kthread_t *td;
1483	struct file *fp;
1484	uint64_t voff; /* The current offset in the stream */
1485	/*
1486	 * A record that has had its payload read in, but hasn't yet been handed
1487	 * off to the worker thread.
1488	 */
1489	struct receive_record_arg *rrd;
1490	/* A record that has had its header read in, but not its payload. */
1491	struct receive_record_arg *next_rrd;
1492	zio_cksum_t cksum;
1493	zio_cksum_t prev_cksum;
1494	int err;
1495	boolean_t byteswap;
1496	/* Sorted list of objects not to issue prefetches for. */
1497	list_t ignore_obj_list;
1498};
1499
1500struct receive_ign_obj_node {
1501	list_node_t node;
1502	uint64_t object;
1503};
1504
1505typedef struct guid_map_entry {
1506	uint64_t	guid;
1507	dsl_dataset_t	*gme_ds;
1508	avl_node_t	avlnode;
1509} guid_map_entry_t;
1510
1511static int
1512guid_compare(const void *arg1, const void *arg2)
1513{
1514	const guid_map_entry_t *gmep1 = arg1;
1515	const guid_map_entry_t *gmep2 = arg2;
1516
1517	if (gmep1->guid < gmep2->guid)
1518		return (-1);
1519	else if (gmep1->guid > gmep2->guid)
1520		return (1);
1521	return (0);
1522}
1523
1524static void
1525free_guid_map_onexit(void *arg)
1526{
1527	avl_tree_t *ca = arg;
1528	void *cookie = NULL;
1529	guid_map_entry_t *gmep;
1530
1531	while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
1532		dsl_dataset_long_rele(gmep->gme_ds, gmep);
1533		dsl_dataset_rele(gmep->gme_ds, gmep);
1534		kmem_free(gmep, sizeof (guid_map_entry_t));
1535	}
1536	avl_destroy(ca);
1537	kmem_free(ca, sizeof (avl_tree_t));
1538}
1539
1540static int
1541restore_bytes(struct receive_arg *ra, void *buf, int len, off_t off, ssize_t *resid)
1542{
1543	struct uio auio;
1544	struct iovec aiov;
1545	int error;
1546
1547	aiov.iov_base = buf;
1548	aiov.iov_len = len;
1549	auio.uio_iov = &aiov;
1550	auio.uio_iovcnt = 1;
1551	auio.uio_resid = len;
1552	auio.uio_segflg = UIO_SYSSPACE;
1553	auio.uio_rw = UIO_READ;
1554	auio.uio_offset = off;
1555	auio.uio_td = ra->td;
1556#ifdef _KERNEL
1557	error = fo_read(ra->fp, &auio, ra->td->td_ucred, FOF_OFFSET, ra->td);
1558#else
1559	fprintf(stderr, "%s: returning EOPNOTSUPP\n", __func__);
1560	error = EOPNOTSUPP;
1561#endif
1562	*resid = auio.uio_resid;
1563	return (error);
1564}
1565
1566static int
1567receive_read(struct receive_arg *ra, int len, void *buf)
1568{
1569	int done = 0;
1570
1571	/* some things will require 8-byte alignment, so everything must */
1572	ASSERT0(len % 8);
1573
1574	while (done < len) {
1575		ssize_t resid;
1576
1577		ra->err = restore_bytes(ra, buf + done,
1578		    len - done, ra->voff, &resid);
1579
1580		if (resid == len - done)
1581			ra->err = SET_ERROR(EINVAL);
1582		ra->voff += len - done - resid;
1583		done = len - resid;
1584		if (ra->err != 0)
1585			return (ra->err);
1586	}
1587
1588	ASSERT3U(done, ==, len);
1589	return (0);
1590}
1591
1592static void
1593byteswap_record(dmu_replay_record_t *drr)
1594{
1595#define	DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
1596#define	DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
1597	drr->drr_type = BSWAP_32(drr->drr_type);
1598	drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
1599
1600	switch (drr->drr_type) {
1601	case DRR_BEGIN:
1602		DO64(drr_begin.drr_magic);
1603		DO64(drr_begin.drr_versioninfo);
1604		DO64(drr_begin.drr_creation_time);
1605		DO32(drr_begin.drr_type);
1606		DO32(drr_begin.drr_flags);
1607		DO64(drr_begin.drr_toguid);
1608		DO64(drr_begin.drr_fromguid);
1609		break;
1610	case DRR_OBJECT:
1611		DO64(drr_object.drr_object);
1612		DO32(drr_object.drr_type);
1613		DO32(drr_object.drr_bonustype);
1614		DO32(drr_object.drr_blksz);
1615		DO32(drr_object.drr_bonuslen);
1616		DO64(drr_object.drr_toguid);
1617		break;
1618	case DRR_FREEOBJECTS:
1619		DO64(drr_freeobjects.drr_firstobj);
1620		DO64(drr_freeobjects.drr_numobjs);
1621		DO64(drr_freeobjects.drr_toguid);
1622		break;
1623	case DRR_WRITE:
1624		DO64(drr_write.drr_object);
1625		DO32(drr_write.drr_type);
1626		DO64(drr_write.drr_offset);
1627		DO64(drr_write.drr_length);
1628		DO64(drr_write.drr_toguid);
1629		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
1630		DO64(drr_write.drr_key.ddk_prop);
1631		break;
1632	case DRR_WRITE_BYREF:
1633		DO64(drr_write_byref.drr_object);
1634		DO64(drr_write_byref.drr_offset);
1635		DO64(drr_write_byref.drr_length);
1636		DO64(drr_write_byref.drr_toguid);
1637		DO64(drr_write_byref.drr_refguid);
1638		DO64(drr_write_byref.drr_refobject);
1639		DO64(drr_write_byref.drr_refoffset);
1640		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
1641		    drr_key.ddk_cksum);
1642		DO64(drr_write_byref.drr_key.ddk_prop);
1643		break;
1644	case DRR_WRITE_EMBEDDED:
1645		DO64(drr_write_embedded.drr_object);
1646		DO64(drr_write_embedded.drr_offset);
1647		DO64(drr_write_embedded.drr_length);
1648		DO64(drr_write_embedded.drr_toguid);
1649		DO32(drr_write_embedded.drr_lsize);
1650		DO32(drr_write_embedded.drr_psize);
1651		break;
1652	case DRR_FREE:
1653		DO64(drr_free.drr_object);
1654		DO64(drr_free.drr_offset);
1655		DO64(drr_free.drr_length);
1656		DO64(drr_free.drr_toguid);
1657		break;
1658	case DRR_SPILL:
1659		DO64(drr_spill.drr_object);
1660		DO64(drr_spill.drr_length);
1661		DO64(drr_spill.drr_toguid);
1662		break;
1663	case DRR_END:
1664		DO64(drr_end.drr_toguid);
1665		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
1666		break;
1667	}
1668
1669	if (drr->drr_type != DRR_BEGIN) {
1670		ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
1671	}
1672
1673#undef DO64
1674#undef DO32
1675}
1676
1677static inline uint8_t
1678deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
1679{
1680	if (bonus_type == DMU_OT_SA) {
1681		return (1);
1682	} else {
1683		return (1 +
1684		    ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
1685	}
1686}
1687
1688static int
1689receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
1690    void *data)
1691{
1692	dmu_object_info_t doi;
1693	dmu_tx_t *tx;
1694	uint64_t object;
1695	int err;
1696
1697	if (drro->drr_type == DMU_OT_NONE ||
1698	    !DMU_OT_IS_VALID(drro->drr_type) ||
1699	    !DMU_OT_IS_VALID(drro->drr_bonustype) ||
1700	    drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
1701	    drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
1702	    P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
1703	    drro->drr_blksz < SPA_MINBLOCKSIZE ||
1704	    drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
1705	    drro->drr_bonuslen > DN_MAX_BONUSLEN) {
1706		return (SET_ERROR(EINVAL));
1707	}
1708
1709	err = dmu_object_info(rwa->os, drro->drr_object, &doi);
1710
1711	if (err != 0 && err != ENOENT)
1712		return (SET_ERROR(EINVAL));
1713	object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
1714
1715	/*
1716	 * If we are losing blkptrs or changing the block size this must
1717	 * be a new file instance.  We must clear out the previous file
1718	 * contents before we can change this type of metadata in the dnode.
1719	 */
1720	if (err == 0) {
1721		int nblkptr;
1722
1723		nblkptr = deduce_nblkptr(drro->drr_bonustype,
1724		    drro->drr_bonuslen);
1725
1726		if (drro->drr_blksz != doi.doi_data_block_size ||
1727		    nblkptr < doi.doi_nblkptr) {
1728			err = dmu_free_long_range(rwa->os, drro->drr_object,
1729			    0, DMU_OBJECT_END);
1730			if (err != 0)
1731				return (SET_ERROR(EINVAL));
1732		}
1733	}
1734
1735	tx = dmu_tx_create(rwa->os);
1736	dmu_tx_hold_bonus(tx, object);
1737	err = dmu_tx_assign(tx, TXG_WAIT);
1738	if (err != 0) {
1739		dmu_tx_abort(tx);
1740		return (err);
1741	}
1742
1743	if (object == DMU_NEW_OBJECT) {
1744		/* currently free, want to be allocated */
1745		err = dmu_object_claim(rwa->os, drro->drr_object,
1746		    drro->drr_type, drro->drr_blksz,
1747		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1748	} else if (drro->drr_type != doi.doi_type ||
1749	    drro->drr_blksz != doi.doi_data_block_size ||
1750	    drro->drr_bonustype != doi.doi_bonus_type ||
1751	    drro->drr_bonuslen != doi.doi_bonus_size) {
1752		/* currently allocated, but with different properties */
1753		err = dmu_object_reclaim(rwa->os, drro->drr_object,
1754		    drro->drr_type, drro->drr_blksz,
1755		    drro->drr_bonustype, drro->drr_bonuslen, tx);
1756	}
1757	if (err != 0) {
1758		dmu_tx_commit(tx);
1759		return (SET_ERROR(EINVAL));
1760	}
1761
1762	dmu_object_set_checksum(rwa->os, drro->drr_object,
1763	    drro->drr_checksumtype, tx);
1764	dmu_object_set_compress(rwa->os, drro->drr_object,
1765	    drro->drr_compress, tx);
1766
1767	if (data != NULL) {
1768		dmu_buf_t *db;
1769
1770		VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
1771		dmu_buf_will_dirty(db, tx);
1772
1773		ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
1774		bcopy(data, db->db_data, drro->drr_bonuslen);
1775		if (rwa->byteswap) {
1776			dmu_object_byteswap_t byteswap =
1777			    DMU_OT_BYTESWAP(drro->drr_bonustype);
1778			dmu_ot_byteswap[byteswap].ob_func(db->db_data,
1779			    drro->drr_bonuslen);
1780		}
1781		dmu_buf_rele(db, FTAG);
1782	}
1783	dmu_tx_commit(tx);
1784	return (0);
1785}
1786
1787/* ARGSUSED */
1788static int
1789receive_freeobjects(struct receive_writer_arg *rwa,
1790    struct drr_freeobjects *drrfo)
1791{
1792	uint64_t obj;
1793
1794	if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
1795		return (SET_ERROR(EINVAL));
1796
1797	for (obj = drrfo->drr_firstobj;
1798	    obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
1799	    (void) dmu_object_next(rwa->os, &obj, FALSE, 0)) {
1800		int err;
1801
1802		if (dmu_object_info(rwa->os, obj, NULL) != 0)
1803			continue;
1804
1805		err = dmu_free_long_object(rwa->os, obj);
1806		if (err != 0)
1807			return (err);
1808	}
1809	return (0);
1810}
1811
1812static int
1813receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
1814    arc_buf_t *abuf)
1815{
1816	dmu_tx_t *tx;
1817	int err;
1818
1819	if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
1820	    !DMU_OT_IS_VALID(drrw->drr_type))
1821		return (SET_ERROR(EINVAL));
1822
1823	if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
1824		return (SET_ERROR(EINVAL));
1825
1826	tx = dmu_tx_create(rwa->os);
1827
1828	dmu_tx_hold_write(tx, drrw->drr_object,
1829	    drrw->drr_offset, drrw->drr_length);
1830	err = dmu_tx_assign(tx, TXG_WAIT);
1831	if (err != 0) {
1832		dmu_tx_abort(tx);
1833		return (err);
1834	}
1835	if (rwa->byteswap) {
1836		dmu_object_byteswap_t byteswap =
1837		    DMU_OT_BYTESWAP(drrw->drr_type);
1838		dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
1839		    drrw->drr_length);
1840	}
1841
1842	dmu_buf_t *bonus;
1843	if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
1844		return (SET_ERROR(EINVAL));
1845	dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
1846	dmu_tx_commit(tx);
1847	dmu_buf_rele(bonus, FTAG);
1848	return (0);
1849}
1850
1851/*
1852 * Handle a DRR_WRITE_BYREF record.  This record is used in dedup'ed
1853 * streams to refer to a copy of the data that is already on the
1854 * system because it came in earlier in the stream.  This function
1855 * finds the earlier copy of the data, and uses that copy instead of
1856 * data from the stream to fulfill this write.
1857 */
1858static int
1859receive_write_byref(struct receive_writer_arg *rwa,
1860    struct drr_write_byref *drrwbr)
1861{
1862	dmu_tx_t *tx;
1863	int err;
1864	guid_map_entry_t gmesrch;
1865	guid_map_entry_t *gmep;
1866	avl_index_t where;
1867	objset_t *ref_os = NULL;
1868	dmu_buf_t *dbp;
1869
1870	if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
1871		return (SET_ERROR(EINVAL));
1872
1873	/*
1874	 * If the GUID of the referenced dataset is different from the
1875	 * GUID of the target dataset, find the referenced dataset.
1876	 */
1877	if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
1878		gmesrch.guid = drrwbr->drr_refguid;
1879		if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
1880		    &where)) == NULL) {
1881			return (SET_ERROR(EINVAL));
1882		}
1883		if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
1884			return (SET_ERROR(EINVAL));
1885	} else {
1886		ref_os = rwa->os;
1887	}
1888
1889	err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
1890	    drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
1891	if (err != 0)
1892		return (err);
1893
1894	tx = dmu_tx_create(rwa->os);
1895
1896	dmu_tx_hold_write(tx, drrwbr->drr_object,
1897	    drrwbr->drr_offset, drrwbr->drr_length);
1898	err = dmu_tx_assign(tx, TXG_WAIT);
1899	if (err != 0) {
1900		dmu_tx_abort(tx);
1901		return (err);
1902	}
1903	dmu_write(rwa->os, drrwbr->drr_object,
1904	    drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
1905	dmu_buf_rele(dbp, FTAG);
1906	dmu_tx_commit(tx);
1907	return (0);
1908}
1909
1910static int
1911receive_write_embedded(struct receive_writer_arg *rwa,
1912    struct drr_write_embedded *drrwnp, void *data)
1913{
1914	dmu_tx_t *tx;
1915	int err;
1916
1917	if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
1918		return (EINVAL);
1919
1920	if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
1921		return (EINVAL);
1922
1923	if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
1924		return (EINVAL);
1925	if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
1926		return (EINVAL);
1927
1928	tx = dmu_tx_create(rwa->os);
1929
1930	dmu_tx_hold_write(tx, drrwnp->drr_object,
1931	    drrwnp->drr_offset, drrwnp->drr_length);
1932	err = dmu_tx_assign(tx, TXG_WAIT);
1933	if (err != 0) {
1934		dmu_tx_abort(tx);
1935		return (err);
1936	}
1937
1938	dmu_write_embedded(rwa->os, drrwnp->drr_object,
1939	    drrwnp->drr_offset, data, drrwnp->drr_etype,
1940	    drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
1941	    rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
1942
1943	dmu_tx_commit(tx);
1944	return (0);
1945}
1946
1947static int
1948receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
1949    void *data)
1950{
1951	dmu_tx_t *tx;
1952	dmu_buf_t *db, *db_spill;
1953	int err;
1954
1955	if (drrs->drr_length < SPA_MINBLOCKSIZE ||
1956	    drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
1957		return (SET_ERROR(EINVAL));
1958
1959	if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
1960		return (SET_ERROR(EINVAL));
1961
1962	VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
1963	if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
1964		dmu_buf_rele(db, FTAG);
1965		return (err);
1966	}
1967
1968	tx = dmu_tx_create(rwa->os);
1969
1970	dmu_tx_hold_spill(tx, db->db_object);
1971
1972	err = dmu_tx_assign(tx, TXG_WAIT);
1973	if (err != 0) {
1974		dmu_buf_rele(db, FTAG);
1975		dmu_buf_rele(db_spill, FTAG);
1976		dmu_tx_abort(tx);
1977		return (err);
1978	}
1979	dmu_buf_will_dirty(db_spill, tx);
1980
1981	if (db_spill->db_size < drrs->drr_length)
1982		VERIFY(0 == dbuf_spill_set_blksz(db_spill,
1983		    drrs->drr_length, tx));
1984	bcopy(data, db_spill->db_data, drrs->drr_length);
1985
1986	dmu_buf_rele(db, FTAG);
1987	dmu_buf_rele(db_spill, FTAG);
1988
1989	dmu_tx_commit(tx);
1990	return (0);
1991}
1992
1993/* ARGSUSED */
1994static int
1995receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
1996{
1997	int err;
1998
1999	if (drrf->drr_length != -1ULL &&
2000	    drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
2001		return (SET_ERROR(EINVAL));
2002
2003	if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
2004		return (SET_ERROR(EINVAL));
2005
2006	err = dmu_free_long_range(rwa->os, drrf->drr_object,
2007	    drrf->drr_offset, drrf->drr_length);
2008
2009	return (err);
2010}
2011
2012/* used to destroy the drc_ds on error */
2013static void
2014dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
2015{
2016	char name[MAXNAMELEN];
2017	dsl_dataset_name(drc->drc_ds, name);
2018	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2019	(void) dsl_destroy_head(name);
2020}
2021
2022static void
2023receive_cksum(struct receive_arg *ra, int len, void *buf)
2024{
2025	if (ra->byteswap) {
2026		fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
2027	} else {
2028		fletcher_4_incremental_native(buf, len, &ra->cksum);
2029	}
2030}
2031
2032/*
2033 * Read the payload into a buffer of size len, and update the current record's
2034 * payload field.
2035 * Allocate ra->next_rrd and read the next record's header into
2036 * ra->next_rrd->header.
2037 * Verify checksum of payload and next record.
2038 */
2039static int
2040receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
2041{
2042	int err;
2043
2044	if (len != 0) {
2045		ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
2046		ra->rrd->payload = buf;
2047		ra->rrd->payload_size = len;
2048		err = receive_read(ra, len, ra->rrd->payload);
2049		if (err != 0)
2050			return (err);
2051		receive_cksum(ra, len, ra->rrd->payload);
2052	}
2053
2054	ra->prev_cksum = ra->cksum;
2055
2056	ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
2057	err = receive_read(ra, sizeof (ra->next_rrd->header),
2058	    &ra->next_rrd->header);
2059	if (err != 0) {
2060		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2061		ra->next_rrd = NULL;
2062		return (err);
2063	}
2064	if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
2065		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2066		ra->next_rrd = NULL;
2067		return (SET_ERROR(EINVAL));
2068	}
2069
2070	/*
2071	 * Note: checksum is of everything up to but not including the
2072	 * checksum itself.
2073	 */
2074	ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2075	    ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
2076	receive_cksum(ra,
2077	    offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
2078	    &ra->next_rrd->header);
2079
2080	zio_cksum_t cksum_orig =
2081	    ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2082	zio_cksum_t *cksump =
2083	    &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
2084
2085	if (ra->byteswap)
2086		byteswap_record(&ra->next_rrd->header);
2087
2088	if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
2089	    !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
2090		kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
2091		ra->next_rrd = NULL;
2092		return (SET_ERROR(ECKSUM));
2093	}
2094
2095	receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
2096
2097	return (0);
2098}
2099
2100/*
2101 * Issue the prefetch reads for any necessary indirect blocks.
2102 *
2103 * We use the object ignore list to tell us whether or not to issue prefetches
2104 * for a given object.  We do this for both correctness (in case the blocksize
2105 * of an object has changed) and performance (if the object doesn't exist, don't
2106 * needlessly try to issue prefetches).  We also trim the list as we go through
2107 * the stream to prevent it from growing to an unbounded size.
2108 *
2109 * The object numbers within will always be in sorted order, and any write
2110 * records we see will also be in sorted order, but they're not sorted with
2111 * respect to each other (i.e. we can get several object records before
2112 * receiving each object's write records).  As a result, once we've reached a
2113 * given object number, we can safely remove any reference to lower object
2114 * numbers in the ignore list. In practice, we receive up to 32 object records
2115 * before receiving write records, so the list can have up to 32 nodes in it.
2116 */
2117/* ARGSUSED */
2118static void
2119receive_read_prefetch(struct receive_arg *ra,
2120    uint64_t object, uint64_t offset, uint64_t length)
2121{
2122	struct receive_ign_obj_node *node = list_head(&ra->ignore_obj_list);
2123	while (node != NULL && node->object < object) {
2124		VERIFY3P(node, ==, list_remove_head(&ra->ignore_obj_list));
2125		kmem_free(node, sizeof (*node));
2126		node = list_head(&ra->ignore_obj_list);
2127	}
2128	if (node == NULL || node->object > object) {
2129		dmu_prefetch(ra->os, object, 1, offset, length,
2130		    ZIO_PRIORITY_SYNC_READ);
2131	}
2132}
2133
2134/*
2135 * Read records off the stream, issuing any necessary prefetches.
2136 */
2137static int
2138receive_read_record(struct receive_arg *ra)
2139{
2140	int err;
2141
2142	switch (ra->rrd->header.drr_type) {
2143	case DRR_OBJECT:
2144	{
2145		struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
2146		uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
2147		void *buf = kmem_zalloc(size, KM_SLEEP);
2148		dmu_object_info_t doi;
2149		err = receive_read_payload_and_next_header(ra, size, buf);
2150		if (err != 0) {
2151			kmem_free(buf, size);
2152			return (err);
2153		}
2154		err = dmu_object_info(ra->os, drro->drr_object, &doi);
2155		/*
2156		 * See receive_read_prefetch for an explanation why we're
2157		 * storing this object in the ignore_obj_list.
2158		 */
2159		if (err == ENOENT ||
2160		    (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
2161			struct receive_ign_obj_node *node =
2162			    kmem_zalloc(sizeof (*node),
2163			    KM_SLEEP);
2164			node->object = drro->drr_object;
2165#ifdef ZFS_DEBUG
2166			struct receive_ign_obj_node *last_object =
2167			    list_tail(&ra->ignore_obj_list);
2168			uint64_t last_objnum = (last_object != NULL ?
2169			    last_object->object : 0);
2170			ASSERT3U(node->object, >, last_objnum);
2171#endif
2172			list_insert_tail(&ra->ignore_obj_list, node);
2173			err = 0;
2174		}
2175		return (err);
2176	}
2177	case DRR_FREEOBJECTS:
2178	{
2179		err = receive_read_payload_and_next_header(ra, 0, NULL);
2180		return (err);
2181	}
2182	case DRR_WRITE:
2183	{
2184		struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
2185		arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
2186		    drrw->drr_length);
2187
2188		err = receive_read_payload_and_next_header(ra,
2189		    drrw->drr_length, abuf->b_data);
2190		if (err != 0) {
2191			dmu_return_arcbuf(abuf);
2192			return (err);
2193		}
2194		ra->rrd->write_buf = abuf;
2195		receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
2196		    drrw->drr_length);
2197		return (err);
2198	}
2199	case DRR_WRITE_BYREF:
2200	{
2201		struct drr_write_byref *drrwb =
2202		    &ra->rrd->header.drr_u.drr_write_byref;
2203		err = receive_read_payload_and_next_header(ra, 0, NULL);
2204		receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
2205		    drrwb->drr_length);
2206		return (err);
2207	}
2208	case DRR_WRITE_EMBEDDED:
2209	{
2210		struct drr_write_embedded *drrwe =
2211		    &ra->rrd->header.drr_u.drr_write_embedded;
2212		uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
2213		void *buf = kmem_zalloc(size, KM_SLEEP);
2214
2215		err = receive_read_payload_and_next_header(ra, size, buf);
2216		if (err != 0) {
2217			kmem_free(buf, size);
2218			return (err);
2219		}
2220
2221		receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
2222		    drrwe->drr_length);
2223		return (err);
2224	}
2225	case DRR_FREE:
2226	{
2227		/*
2228		 * It might be beneficial to prefetch indirect blocks here, but
2229		 * we don't really have the data to decide for sure.
2230		 */
2231		err = receive_read_payload_and_next_header(ra, 0, NULL);
2232		return (err);
2233	}
2234	case DRR_END:
2235	{
2236		struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
2237		if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
2238			return (SET_ERROR(EINVAL));
2239		return (0);
2240	}
2241	case DRR_SPILL:
2242	{
2243		struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
2244		void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
2245		err = receive_read_payload_and_next_header(ra, drrs->drr_length,
2246		    buf);
2247		if (err != 0)
2248			kmem_free(buf, drrs->drr_length);
2249		return (err);
2250	}
2251	default:
2252		return (SET_ERROR(EINVAL));
2253	}
2254}
2255
2256/*
2257 * Commit the records to the pool.
2258 */
2259static int
2260receive_process_record(struct receive_writer_arg *rwa,
2261    struct receive_record_arg *rrd)
2262{
2263	int err;
2264
2265	switch (rrd->header.drr_type) {
2266	case DRR_OBJECT:
2267	{
2268		struct drr_object *drro = &rrd->header.drr_u.drr_object;
2269		err = receive_object(rwa, drro, rrd->payload);
2270		kmem_free(rrd->payload, rrd->payload_size);
2271		rrd->payload = NULL;
2272		return (err);
2273	}
2274	case DRR_FREEOBJECTS:
2275	{
2276		struct drr_freeobjects *drrfo =
2277		    &rrd->header.drr_u.drr_freeobjects;
2278		return (receive_freeobjects(rwa, drrfo));
2279	}
2280	case DRR_WRITE:
2281	{
2282		struct drr_write *drrw = &rrd->header.drr_u.drr_write;
2283		err = receive_write(rwa, drrw, rrd->write_buf);
2284		/* if receive_write() is successful, it consumes the arc_buf */
2285		if (err != 0)
2286			dmu_return_arcbuf(rrd->write_buf);
2287		rrd->write_buf = NULL;
2288		rrd->payload = NULL;
2289		return (err);
2290	}
2291	case DRR_WRITE_BYREF:
2292	{
2293		struct drr_write_byref *drrwbr =
2294		    &rrd->header.drr_u.drr_write_byref;
2295		return (receive_write_byref(rwa, drrwbr));
2296	}
2297	case DRR_WRITE_EMBEDDED:
2298	{
2299		struct drr_write_embedded *drrwe =
2300		    &rrd->header.drr_u.drr_write_embedded;
2301		err = receive_write_embedded(rwa, drrwe, rrd->payload);
2302		kmem_free(rrd->payload, rrd->payload_size);
2303		rrd->payload = NULL;
2304		return (err);
2305	}
2306	case DRR_FREE:
2307	{
2308		struct drr_free *drrf = &rrd->header.drr_u.drr_free;
2309		return (receive_free(rwa, drrf));
2310	}
2311	case DRR_SPILL:
2312	{
2313		struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
2314		err = receive_spill(rwa, drrs, rrd->payload);
2315		kmem_free(rrd->payload, rrd->payload_size);
2316		rrd->payload = NULL;
2317		return (err);
2318	}
2319	default:
2320		return (SET_ERROR(EINVAL));
2321	}
2322}
2323
2324/*
2325 * dmu_recv_stream's worker thread; pull records off the queue, and then call
2326 * receive_process_record  When we're done, signal the main thread and exit.
2327 */
2328static void
2329receive_writer_thread(void *arg)
2330{
2331	struct receive_writer_arg *rwa = arg;
2332	struct receive_record_arg *rrd;
2333	for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
2334	    rrd = bqueue_dequeue(&rwa->q)) {
2335		/*
2336		 * If there's an error, the main thread will stop putting things
2337		 * on the queue, but we need to clear everything in it before we
2338		 * can exit.
2339		 */
2340		if (rwa->err == 0) {
2341			rwa->err = receive_process_record(rwa, rrd);
2342		} else if (rrd->write_buf != NULL) {
2343			dmu_return_arcbuf(rrd->write_buf);
2344			rrd->write_buf = NULL;
2345			rrd->payload = NULL;
2346		} else if (rrd->payload != NULL) {
2347			kmem_free(rrd->payload, rrd->payload_size);
2348			rrd->payload = NULL;
2349		}
2350		kmem_free(rrd, sizeof (*rrd));
2351	}
2352	kmem_free(rrd, sizeof (*rrd));
2353	mutex_enter(&rwa->mutex);
2354	rwa->done = B_TRUE;
2355	cv_signal(&rwa->cv);
2356	mutex_exit(&rwa->mutex);
2357	thread_exit();
2358}
2359
2360/*
2361 * Read in the stream's records, one by one, and apply them to the pool.  There
2362 * are two threads involved; the thread that calls this function will spin up a
2363 * worker thread, read the records off the stream one by one, and issue
2364 * prefetches for any necessary indirect blocks.  It will then push the records
2365 * onto an internal blocking queue.  The worker thread will pull the records off
2366 * the queue, and actually write the data into the DMU.  This way, the worker
2367 * thread doesn't have to wait for reads to complete, since everything it needs
2368 * (the indirect blocks) will be prefetched.
2369 *
2370 * NB: callers *must* call dmu_recv_end() if this succeeds.
2371 */
2372int
2373dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
2374    int cleanup_fd, uint64_t *action_handlep)
2375{
2376	int err = 0;
2377	struct receive_arg ra = { 0 };
2378	struct receive_writer_arg rwa = { 0 };
2379	int featureflags;
2380
2381	ra.byteswap = drc->drc_byteswap;
2382	ra.cksum = drc->drc_cksum;
2383	ra.td = curthread;
2384	ra.fp = fp;
2385	ra.voff = *voffp;
2386	list_create(&ra.ignore_obj_list, sizeof (struct receive_ign_obj_node),
2387	    offsetof(struct receive_ign_obj_node, node));
2388
2389	/* these were verified in dmu_recv_begin */
2390	ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
2391	    DMU_SUBSTREAM);
2392	ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
2393
2394	/*
2395	 * Open the objset we are modifying.
2396	 */
2397	VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra.os));
2398
2399	ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
2400
2401	featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
2402
2403	/* if this stream is dedup'ed, set up the avl tree for guid mapping */
2404	if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
2405		minor_t minor;
2406
2407		if (cleanup_fd == -1) {
2408			ra.err = SET_ERROR(EBADF);
2409			goto out;
2410		}
2411		ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
2412		if (ra.err != 0) {
2413			cleanup_fd = -1;
2414			goto out;
2415		}
2416
2417		if (*action_handlep == 0) {
2418			rwa.guid_to_ds_map =
2419			    kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
2420			avl_create(rwa.guid_to_ds_map, guid_compare,
2421			    sizeof (guid_map_entry_t),
2422			    offsetof(guid_map_entry_t, avlnode));
2423			err = zfs_onexit_add_cb(minor,
2424			    free_guid_map_onexit, rwa.guid_to_ds_map,
2425			    action_handlep);
2426			if (ra.err != 0)
2427				goto out;
2428		} else {
2429			err = zfs_onexit_cb_data(minor, *action_handlep,
2430			    (void **)&rwa.guid_to_ds_map);
2431			if (ra.err != 0)
2432				goto out;
2433		}
2434
2435		drc->drc_guid_to_ds_map = rwa.guid_to_ds_map;
2436	}
2437
2438	err = receive_read_payload_and_next_header(&ra, 0, NULL);
2439	if (err)
2440		goto out;
2441
2442	(void) bqueue_init(&rwa.q, zfs_recv_queue_length,
2443	    offsetof(struct receive_record_arg, node));
2444	cv_init(&rwa.cv, NULL, CV_DEFAULT, NULL);
2445	mutex_init(&rwa.mutex, NULL, MUTEX_DEFAULT, NULL);
2446	rwa.os = ra.os;
2447	rwa.byteswap = drc->drc_byteswap;
2448
2449	(void) thread_create(NULL, 0, receive_writer_thread, &rwa, 0, curproc,
2450	    TS_RUN, minclsyspri);
2451	/*
2452	 * We're reading rwa.err without locks, which is safe since we are the
2453	 * only reader, and the worker thread is the only writer.  It's ok if we
2454	 * miss a write for an iteration or two of the loop, since the writer
2455	 * thread will keep freeing records we send it until we send it an eos
2456	 * marker.
2457	 *
2458	 * We can leave this loop in 3 ways:  First, if rwa.err is
2459	 * non-zero.  In that case, the writer thread will free the rrd we just
2460	 * pushed.  Second, if  we're interrupted; in that case, either it's the
2461	 * first loop and ra.rrd was never allocated, or it's later, and ra.rrd
2462	 * has been handed off to the writer thread who will free it.  Finally,
2463	 * if receive_read_record fails or we're at the end of the stream, then
2464	 * we free ra.rrd and exit.
2465	 */
2466	while (rwa.err == 0) {
2467		if (issig(JUSTLOOKING) && issig(FORREAL)) {
2468			err = SET_ERROR(EINTR);
2469			break;
2470		}
2471
2472		ASSERT3P(ra.rrd, ==, NULL);
2473		ra.rrd = ra.next_rrd;
2474		ra.next_rrd = NULL;
2475		/* Allocates and loads header into ra.next_rrd */
2476		err = receive_read_record(&ra);
2477
2478		if (ra.rrd->header.drr_type == DRR_END || err != 0) {
2479			kmem_free(ra.rrd, sizeof (*ra.rrd));
2480			ra.rrd = NULL;
2481			break;
2482		}
2483
2484		bqueue_enqueue(&rwa.q, ra.rrd,
2485		    sizeof (struct receive_record_arg) + ra.rrd->payload_size);
2486		ra.rrd = NULL;
2487	}
2488	if (ra.next_rrd == NULL)
2489		ra.next_rrd = kmem_zalloc(sizeof (*ra.next_rrd), KM_SLEEP);
2490	ra.next_rrd->eos_marker = B_TRUE;
2491	bqueue_enqueue(&rwa.q, ra.next_rrd, 1);
2492
2493	mutex_enter(&rwa.mutex);
2494	while (!rwa.done) {
2495		cv_wait(&rwa.cv, &rwa.mutex);
2496	}
2497	mutex_exit(&rwa.mutex);
2498
2499	cv_destroy(&rwa.cv);
2500	mutex_destroy(&rwa.mutex);
2501	bqueue_destroy(&rwa.q);
2502	if (err == 0)
2503		err = rwa.err;
2504
2505out:
2506	if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
2507		zfs_onexit_fd_rele(cleanup_fd);
2508
2509	if (err != 0) {
2510		/*
2511		 * destroy what we created, so we don't leave it in the
2512		 * inconsistent restoring state.
2513		 */
2514		dmu_recv_cleanup_ds(drc);
2515	}
2516
2517	*voffp = ra.voff;
2518	for (struct receive_ign_obj_node *n =
2519	    list_remove_head(&ra.ignore_obj_list); n != NULL;
2520	    n = list_remove_head(&ra.ignore_obj_list)) {
2521		kmem_free(n, sizeof (*n));
2522	}
2523	list_destroy(&ra.ignore_obj_list);
2524	return (err);
2525}
2526
2527static int
2528dmu_recv_end_check(void *arg, dmu_tx_t *tx)
2529{
2530	dmu_recv_cookie_t *drc = arg;
2531	dsl_pool_t *dp = dmu_tx_pool(tx);
2532	int error;
2533
2534	ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
2535
2536	if (!drc->drc_newfs) {
2537		dsl_dataset_t *origin_head;
2538
2539		error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
2540		if (error != 0)
2541			return (error);
2542		if (drc->drc_force) {
2543			/*
2544			 * We will destroy any snapshots in tofs (i.e. before
2545			 * origin_head) that are after the origin (which is
2546			 * the snap before drc_ds, because drc_ds can not
2547			 * have any snaps of its own).
2548			 */
2549			uint64_t obj;
2550
2551			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2552			while (obj !=
2553			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2554				dsl_dataset_t *snap;
2555				error = dsl_dataset_hold_obj(dp, obj, FTAG,
2556				    &snap);
2557				if (error != 0)
2558					break;
2559				if (snap->ds_dir != origin_head->ds_dir)
2560					error = SET_ERROR(EINVAL);
2561				if (error == 0)  {
2562					error = dsl_destroy_snapshot_check_impl(
2563					    snap, B_FALSE);
2564				}
2565				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2566				dsl_dataset_rele(snap, FTAG);
2567				if (error != 0)
2568					break;
2569			}
2570			if (error != 0) {
2571				dsl_dataset_rele(origin_head, FTAG);
2572				return (error);
2573			}
2574		}
2575		error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
2576		    origin_head, drc->drc_force, drc->drc_owner, tx);
2577		if (error != 0) {
2578			dsl_dataset_rele(origin_head, FTAG);
2579			return (error);
2580		}
2581		error = dsl_dataset_snapshot_check_impl(origin_head,
2582		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2583		dsl_dataset_rele(origin_head, FTAG);
2584		if (error != 0)
2585			return (error);
2586
2587		error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
2588	} else {
2589		error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
2590		    drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
2591	}
2592	return (error);
2593}
2594
2595static void
2596dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
2597{
2598	dmu_recv_cookie_t *drc = arg;
2599	dsl_pool_t *dp = dmu_tx_pool(tx);
2600
2601	spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
2602	    tx, "snap=%s", drc->drc_tosnap);
2603
2604	if (!drc->drc_newfs) {
2605		dsl_dataset_t *origin_head;
2606
2607		VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
2608		    &origin_head));
2609
2610		if (drc->drc_force) {
2611			/*
2612			 * Destroy any snapshots of drc_tofs (origin_head)
2613			 * after the origin (the snap before drc_ds).
2614			 */
2615			uint64_t obj;
2616
2617			obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
2618			while (obj !=
2619			    dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
2620				dsl_dataset_t *snap;
2621				VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
2622				    &snap));
2623				ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
2624				obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
2625				dsl_destroy_snapshot_sync_impl(snap,
2626				    B_FALSE, tx);
2627				dsl_dataset_rele(snap, FTAG);
2628			}
2629		}
2630		VERIFY3P(drc->drc_ds->ds_prev, ==,
2631		    origin_head->ds_prev);
2632
2633		dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
2634		    origin_head, tx);
2635		dsl_dataset_snapshot_sync_impl(origin_head,
2636		    drc->drc_tosnap, tx);
2637
2638		/* set snapshot's creation time and guid */
2639		dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
2640		dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
2641		    drc->drc_drrb->drr_creation_time;
2642		dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
2643		    drc->drc_drrb->drr_toguid;
2644		dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
2645		    ~DS_FLAG_INCONSISTENT;
2646
2647		dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
2648		dsl_dataset_phys(origin_head)->ds_flags &=
2649		    ~DS_FLAG_INCONSISTENT;
2650
2651		dsl_dataset_rele(origin_head, FTAG);
2652		dsl_destroy_head_sync_impl(drc->drc_ds, tx);
2653
2654		if (drc->drc_owner != NULL)
2655			VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
2656	} else {
2657		dsl_dataset_t *ds = drc->drc_ds;
2658
2659		dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
2660
2661		/* set snapshot's creation time and guid */
2662		dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
2663		dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
2664		    drc->drc_drrb->drr_creation_time;
2665		dsl_dataset_phys(ds->ds_prev)->ds_guid =
2666		    drc->drc_drrb->drr_toguid;
2667		dsl_dataset_phys(ds->ds_prev)->ds_flags &=
2668		    ~DS_FLAG_INCONSISTENT;
2669
2670		dmu_buf_will_dirty(ds->ds_dbuf, tx);
2671		dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
2672	}
2673	drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
2674	/*
2675	 * Release the hold from dmu_recv_begin.  This must be done before
2676	 * we return to open context, so that when we free the dataset's dnode,
2677	 * we can evict its bonus buffer.
2678	 */
2679	dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
2680	drc->drc_ds = NULL;
2681}
2682
2683static int
2684add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
2685{
2686	dsl_pool_t *dp;
2687	dsl_dataset_t *snapds;
2688	guid_map_entry_t *gmep;
2689	int err;
2690
2691	ASSERT(guid_map != NULL);
2692
2693	err = dsl_pool_hold(name, FTAG, &dp);
2694	if (err != 0)
2695		return (err);
2696	gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
2697	err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
2698	if (err == 0) {
2699		gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
2700		gmep->gme_ds = snapds;
2701		avl_add(guid_map, gmep);
2702		dsl_dataset_long_hold(snapds, gmep);
2703	} else
2704		kmem_free(gmep, sizeof (*gmep));
2705
2706	dsl_pool_rele(dp, FTAG);
2707	return (err);
2708}
2709
2710static int dmu_recv_end_modified_blocks = 3;
2711
2712static int
2713dmu_recv_existing_end(dmu_recv_cookie_t *drc)
2714{
2715	int error;
2716	char name[MAXNAMELEN];
2717
2718#ifdef _KERNEL
2719	/*
2720	 * We will be destroying the ds; make sure its origin is unmounted if
2721	 * necessary.
2722	 */
2723	dsl_dataset_name(drc->drc_ds, name);
2724	zfs_destroy_unmount_origin(name);
2725#endif
2726
2727	error = dsl_sync_task(drc->drc_tofs,
2728	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2729	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2730
2731	if (error != 0)
2732		dmu_recv_cleanup_ds(drc);
2733	return (error);
2734}
2735
2736static int
2737dmu_recv_new_end(dmu_recv_cookie_t *drc)
2738{
2739	int error;
2740
2741	error = dsl_sync_task(drc->drc_tofs,
2742	    dmu_recv_end_check, dmu_recv_end_sync, drc,
2743	    dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
2744
2745	if (error != 0) {
2746		dmu_recv_cleanup_ds(drc);
2747	} else if (drc->drc_guid_to_ds_map != NULL) {
2748		(void) add_ds_to_guidmap(drc->drc_tofs,
2749		    drc->drc_guid_to_ds_map,
2750		    drc->drc_newsnapobj);
2751	}
2752	return (error);
2753}
2754
2755int
2756dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
2757{
2758	drc->drc_owner = owner;
2759
2760	if (drc->drc_newfs)
2761		return (dmu_recv_new_end(drc));
2762	else
2763		return (dmu_recv_existing_end(drc));
2764}
2765
2766/*
2767 * Return TRUE if this objset is currently being received into.
2768 */
2769boolean_t
2770dmu_objset_is_receiving(objset_t *os)
2771{
2772	return (os->os_dsl_dataset != NULL &&
2773	    os->os_dsl_dataset->ds_owner == dmu_recv_tag);
2774}
2775