1/*
2 * CDDL HEADER START
3 *
4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
7 * 1.0 of the CDDL.
8 *
9 * A full copy of the text of the CDDL should have accompanied this
10 * source.  A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
12 *
13 * CDDL HEADER END
14 */
15
16/*
17 * Copyright (c) 2013, 2018 by Delphix. All rights reserved.
18 * Copyright 2017 Nexenta Systems, Inc.
19 * Copyright 2019, 2020 by Christian Schwarz. All rights reserved.
20 */
21
22#include <sys/zfs_context.h>
23#include <sys/dsl_dataset.h>
24#include <sys/dsl_dir.h>
25#include <sys/dsl_prop.h>
26#include <sys/dsl_synctask.h>
27#include <sys/dsl_destroy.h>
28#include <sys/dmu_impl.h>
29#include <sys/dmu_tx.h>
30#include <sys/arc.h>
31#include <sys/zap.h>
32#include <sys/zfeature.h>
33#include <sys/spa.h>
34#include <sys/dsl_bookmark.h>
35#include <zfs_namecheck.h>
36#include <sys/dmu_send.h>
37#include <sys/dbuf.h>
38
39static int
40dsl_bookmark_hold_ds(dsl_pool_t *dp, const char *fullname,
41    dsl_dataset_t **dsp, const void *tag, char **shortnamep)
42{
43	char buf[ZFS_MAX_DATASET_NAME_LEN];
44	char *hashp;
45
46	if (strlen(fullname) >= ZFS_MAX_DATASET_NAME_LEN)
47		return (SET_ERROR(ENAMETOOLONG));
48	hashp = strchr(fullname, '#');
49	if (hashp == NULL)
50		return (SET_ERROR(EINVAL));
51
52	*shortnamep = hashp + 1;
53	if (zfs_component_namecheck(*shortnamep, NULL, NULL))
54		return (SET_ERROR(EINVAL));
55	(void) strlcpy(buf, fullname, hashp - fullname + 1);
56	return (dsl_dataset_hold(dp, buf, tag, dsp));
57}
58
59/*
60 * When reading BOOKMARK_V1 bookmarks, the BOOKMARK_V2 fields are guaranteed
61 * to be zeroed.
62 *
63 * Returns ESRCH if bookmark is not found.
64 * Note, we need to use the ZAP rather than the AVL to look up bookmarks
65 * by name, because only the ZAP honors the casesensitivity setting.
66 */
67int
68dsl_bookmark_lookup_impl(dsl_dataset_t *ds, const char *shortname,
69    zfs_bookmark_phys_t *bmark_phys)
70{
71	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
72	uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
73	matchtype_t mt = 0;
74	int err;
75
76	if (bmark_zapobj == 0)
77		return (SET_ERROR(ESRCH));
78
79	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
80		mt = MT_NORMALIZE;
81
82	/*
83	 * Zero out the bookmark in case the one stored on disk
84	 * is in an older, shorter format.
85	 */
86	memset(bmark_phys, 0, sizeof (*bmark_phys));
87
88	err = zap_lookup_norm(mos, bmark_zapobj, shortname, sizeof (uint64_t),
89	    sizeof (*bmark_phys) / sizeof (uint64_t), bmark_phys, mt, NULL, 0,
90	    NULL);
91
92	return (err == ENOENT ? SET_ERROR(ESRCH) : err);
93}
94
95/*
96 * If later_ds is non-NULL, this will return EXDEV if the specified bookmark
97 * does not represents an earlier point in later_ds's timeline.  However,
98 * bmp will still be filled in if we return EXDEV.
99 *
100 * Returns ENOENT if the dataset containing the bookmark does not exist.
101 * Returns ESRCH if the dataset exists but the bookmark was not found in it.
102 */
103int
104dsl_bookmark_lookup(dsl_pool_t *dp, const char *fullname,
105    dsl_dataset_t *later_ds, zfs_bookmark_phys_t *bmp)
106{
107	char *shortname;
108	dsl_dataset_t *ds;
109	int error;
110
111	error = dsl_bookmark_hold_ds(dp, fullname, &ds, FTAG, &shortname);
112	if (error != 0)
113		return (error);
114
115	error = dsl_bookmark_lookup_impl(ds, shortname, bmp);
116	if (error == 0 && later_ds != NULL) {
117		if (!dsl_dataset_is_before(later_ds, ds, bmp->zbm_creation_txg))
118			error = SET_ERROR(EXDEV);
119	}
120	dsl_dataset_rele(ds, FTAG);
121	return (error);
122}
123
124/*
125 * Validates that
126 * - bmark is a full dataset path of a bookmark (bookmark_namecheck)
127 * - source is a full path of a snapshot or bookmark
128 *   ({bookmark,snapshot}_namecheck)
129 *
130 * Returns 0 if valid, -1 otherwise.
131 */
132static int
133dsl_bookmark_create_nvl_validate_pair(const char *bmark, const char *source)
134{
135	if (bookmark_namecheck(bmark, NULL, NULL) != 0)
136		return (-1);
137
138	int is_bmark, is_snap;
139	is_bmark = bookmark_namecheck(source, NULL, NULL) == 0;
140	is_snap = snapshot_namecheck(source, NULL, NULL) == 0;
141	if (!is_bmark && !is_snap)
142		return (-1);
143
144	return (0);
145}
146
147/*
148 * Check that the given nvlist corresponds to the following schema:
149 *  { newbookmark -> source, ... }
150 * where
151 * - each pair passes dsl_bookmark_create_nvl_validate_pair
152 * - all newbookmarks are in the same pool
153 * - all newbookmarks have unique names
154 *
155 * Note that this function is only validates above schema. Callers must ensure
156 * that the bookmarks can be created, e.g. that sources exist.
157 *
158 * Returns 0 if the nvlist adheres to above schema.
159 * Returns -1 if it doesn't.
160 */
161int
162dsl_bookmark_create_nvl_validate(nvlist_t *bmarks)
163{
164	const char *first = NULL;
165	size_t first_len = 0;
166
167	for (nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
168	    pair != NULL; pair = nvlist_next_nvpair(bmarks, pair)) {
169
170		const char *bmark = nvpair_name(pair);
171		const char *source;
172
173		/* list structure: values must be snapshots XOR bookmarks */
174		if (nvpair_value_string(pair, &source) != 0)
175			return (-1);
176		if (dsl_bookmark_create_nvl_validate_pair(bmark, source) != 0)
177			return (-1);
178
179		/* same pool check */
180		if (first == NULL) {
181			const char *cp = strpbrk(bmark, "/#");
182			if (cp == NULL)
183				return (-1);
184			first = bmark;
185			first_len = cp - bmark;
186		}
187		if (strncmp(first, bmark, first_len) != 0)
188			return (-1);
189		switch (*(bmark + first_len)) {
190			case '/': /* fallthrough */
191			case '#':
192				break;
193			default:
194				return (-1);
195		}
196
197		/* unique newbookmark names; todo: O(n^2) */
198		for (nvpair_t *pair2 = nvlist_next_nvpair(bmarks, pair);
199		    pair2 != NULL; pair2 = nvlist_next_nvpair(bmarks, pair2)) {
200			if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
201				return (-1);
202		}
203
204	}
205	return (0);
206}
207
208/*
209 * expects that newbm and source have been validated using
210 * dsl_bookmark_create_nvl_validate_pair
211 */
212static int
213dsl_bookmark_create_check_impl(dsl_pool_t *dp,
214    const char *newbm, const char *source)
215{
216	ASSERT0(dsl_bookmark_create_nvl_validate_pair(newbm, source));
217	/* defer source namecheck until we know it's a snapshot or bookmark */
218
219	int error;
220	dsl_dataset_t *newbm_ds;
221	char *newbm_short;
222	zfs_bookmark_phys_t bmark_phys;
223
224	error = dsl_bookmark_hold_ds(dp, newbm, &newbm_ds, FTAG, &newbm_short);
225	if (error != 0)
226		return (error);
227
228	/* Verify that the new bookmark does not already exist */
229	error = dsl_bookmark_lookup_impl(newbm_ds, newbm_short, &bmark_phys);
230	switch (error) {
231	case ESRCH:
232		/* happy path: new bmark doesn't exist, proceed after switch */
233		break;
234	case 0:
235		error = SET_ERROR(EEXIST);
236		goto eholdnewbmds;
237	default:
238		/* dsl_bookmark_lookup_impl already did SET_ERROR */
239		goto eholdnewbmds;
240	}
241
242	/* error is retval of the following if-cascade */
243	if (strchr(source, '@') != NULL) {
244		dsl_dataset_t *source_snap_ds;
245		ASSERT3S(snapshot_namecheck(source, NULL, NULL), ==, 0);
246		error = dsl_dataset_hold(dp, source, FTAG, &source_snap_ds);
247		if (error == 0) {
248			VERIFY(source_snap_ds->ds_is_snapshot);
249			/*
250			 * Verify that source snapshot is an earlier point in
251			 * newbm_ds's timeline (source may be newbm_ds's origin)
252			 */
253			if (!dsl_dataset_is_before(newbm_ds, source_snap_ds, 0))
254				error = SET_ERROR(
255				    ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
256			dsl_dataset_rele(source_snap_ds, FTAG);
257		}
258	} else if (strchr(source, '#') != NULL) {
259		zfs_bookmark_phys_t source_phys;
260		ASSERT3S(bookmark_namecheck(source, NULL, NULL), ==, 0);
261		/*
262		 * Source must exists and be an earlier point in newbm_ds's
263		 * timeline (newbm_ds's origin may be a snap of source's ds)
264		 */
265		error = dsl_bookmark_lookup(dp, source, newbm_ds, &source_phys);
266		switch (error) {
267		case 0:
268			break; /* happy path */
269		case EXDEV:
270			error = SET_ERROR(ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR);
271			break;
272		default:
273			/* dsl_bookmark_lookup already did SET_ERROR */
274			break;
275		}
276	} else {
277		/*
278		 * dsl_bookmark_create_nvl_validate validates that source is
279		 * either snapshot or bookmark
280		 */
281		panic("unreachable code: %s", source);
282	}
283
284eholdnewbmds:
285	dsl_dataset_rele(newbm_ds, FTAG);
286	return (error);
287}
288
289int
290dsl_bookmark_create_check(void *arg, dmu_tx_t *tx)
291{
292	dsl_bookmark_create_arg_t *dbca = arg;
293	int rv = 0;
294	int schema_err = 0;
295	ASSERT3P(dbca, !=, NULL);
296	ASSERT3P(dbca->dbca_bmarks, !=, NULL);
297	/* dbca->dbca_errors is allowed to be NULL */
298
299	dsl_pool_t *dp = dmu_tx_pool(tx);
300
301	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
302		return (SET_ERROR(ENOTSUP));
303
304	if (dsl_bookmark_create_nvl_validate(dbca->dbca_bmarks) != 0)
305		rv = schema_err = SET_ERROR(EINVAL);
306
307	for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
308	    pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
309		const char *new = nvpair_name(pair);
310
311		int error = schema_err;
312		if (error == 0) {
313			const char *source = fnvpair_value_string(pair);
314			error = dsl_bookmark_create_check_impl(dp, new, source);
315			if (error != 0)
316				error = SET_ERROR(error);
317		}
318
319		if (error != 0) {
320			rv = error;
321			if (dbca->dbca_errors != NULL)
322				fnvlist_add_int32(dbca->dbca_errors,
323				    new, error);
324		}
325	}
326
327	return (rv);
328}
329
330static dsl_bookmark_node_t *
331dsl_bookmark_node_alloc(char *shortname)
332{
333	dsl_bookmark_node_t *dbn = kmem_alloc(sizeof (*dbn), KM_SLEEP);
334	dbn->dbn_name = spa_strdup(shortname);
335	dbn->dbn_dirty = B_FALSE;
336	mutex_init(&dbn->dbn_lock, NULL, MUTEX_DEFAULT, NULL);
337	return (dbn);
338}
339
340/*
341 * Set the fields in the zfs_bookmark_phys_t based on the specified snapshot.
342 */
343static void
344dsl_bookmark_set_phys(zfs_bookmark_phys_t *zbm, dsl_dataset_t *snap)
345{
346	spa_t *spa = dsl_dataset_get_spa(snap);
347	objset_t *mos = spa_get_dsl(spa)->dp_meta_objset;
348	dsl_dataset_phys_t *dsp = dsl_dataset_phys(snap);
349
350	memset(zbm, 0, sizeof (zfs_bookmark_phys_t));
351	zbm->zbm_guid = dsp->ds_guid;
352	zbm->zbm_creation_txg = dsp->ds_creation_txg;
353	zbm->zbm_creation_time = dsp->ds_creation_time;
354	zbm->zbm_redaction_obj = 0;
355
356	/*
357	 * If the dataset is encrypted create a larger bookmark to
358	 * accommodate the IVset guid. The IVset guid was added
359	 * after the encryption feature to prevent a problem with
360	 * raw sends. If we encounter an encrypted dataset without
361	 * an IVset guid we fall back to a normal bookmark.
362	 */
363	if (snap->ds_dir->dd_crypto_obj != 0 &&
364	    spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
365		(void) zap_lookup(mos, snap->ds_object,
366		    DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
367		    &zbm->zbm_ivset_guid);
368	}
369
370	if (spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_WRITTEN)) {
371		zbm->zbm_flags = ZBM_FLAG_SNAPSHOT_EXISTS | ZBM_FLAG_HAS_FBN;
372		zbm->zbm_referenced_bytes_refd = dsp->ds_referenced_bytes;
373		zbm->zbm_compressed_bytes_refd = dsp->ds_compressed_bytes;
374		zbm->zbm_uncompressed_bytes_refd = dsp->ds_uncompressed_bytes;
375
376		dsl_dataset_t *nextds;
377		VERIFY0(dsl_dataset_hold_obj(snap->ds_dir->dd_pool,
378		    dsp->ds_next_snap_obj, FTAG, &nextds));
379		dsl_deadlist_space(&nextds->ds_deadlist,
380		    &zbm->zbm_referenced_freed_before_next_snap,
381		    &zbm->zbm_compressed_freed_before_next_snap,
382		    &zbm->zbm_uncompressed_freed_before_next_snap);
383		dsl_dataset_rele(nextds, FTAG);
384	}
385}
386
387/*
388 * Add dsl_bookmark_node_t `dbn` to the given dataset and increment appropriate
389 * SPA feature counters.
390 */
391void
392dsl_bookmark_node_add(dsl_dataset_t *hds, dsl_bookmark_node_t *dbn,
393    dmu_tx_t *tx)
394{
395	dsl_pool_t *dp = dmu_tx_pool(tx);
396	objset_t *mos = dp->dp_meta_objset;
397
398	if (hds->ds_bookmarks_obj == 0) {
399		hds->ds_bookmarks_obj = zap_create_norm(mos,
400		    U8_TEXTPREP_TOUPPER, DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0,
401		    tx);
402		spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
403
404		dsl_dataset_zapify(hds, tx);
405		VERIFY0(zap_add(mos, hds->ds_object,
406		    DS_FIELD_BOOKMARK_NAMES,
407		    sizeof (hds->ds_bookmarks_obj), 1,
408		    &hds->ds_bookmarks_obj, tx));
409	}
410
411	avl_add(&hds->ds_bookmarks, dbn);
412
413	/*
414	 * To maintain backwards compatibility with software that doesn't
415	 * understand SPA_FEATURE_BOOKMARK_V2, we need to use the smallest
416	 * possible bookmark size.
417	 */
418	uint64_t bookmark_phys_size = BOOKMARK_PHYS_SIZE_V1;
419	if (spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2) &&
420	    (dbn->dbn_phys.zbm_ivset_guid != 0 || dbn->dbn_phys.zbm_flags &
421	    ZBM_FLAG_HAS_FBN || dbn->dbn_phys.zbm_redaction_obj != 0)) {
422		bookmark_phys_size = BOOKMARK_PHYS_SIZE_V2;
423		spa_feature_incr(dp->dp_spa, SPA_FEATURE_BOOKMARK_V2, tx);
424	}
425
426	zfs_bookmark_phys_t zero_phys = { 0 };
427	ASSERT0(memcmp(((char *)&dbn->dbn_phys) + bookmark_phys_size,
428	    &zero_phys, sizeof (zfs_bookmark_phys_t) - bookmark_phys_size));
429
430	VERIFY0(zap_add(mos, hds->ds_bookmarks_obj, dbn->dbn_name,
431	    sizeof (uint64_t), bookmark_phys_size / sizeof (uint64_t),
432	    &dbn->dbn_phys, tx));
433}
434
435/*
436 * If redaction_list is non-null, we create a redacted bookmark and redaction
437 * list, and store the object number of the redaction list in redact_obj.
438 */
439static void
440dsl_bookmark_create_sync_impl_snap(const char *bookmark, const char *snapshot,
441    dmu_tx_t *tx, uint64_t num_redact_snaps, uint64_t *redact_snaps,
442    const void *tag, redaction_list_t **redaction_list)
443{
444	dsl_pool_t *dp = dmu_tx_pool(tx);
445	objset_t *mos = dp->dp_meta_objset;
446	dsl_dataset_t *snapds, *bmark_fs;
447	char *shortname;
448	boolean_t bookmark_redacted;
449	uint64_t *dsredactsnaps;
450	uint64_t dsnumsnaps;
451
452	VERIFY0(dsl_dataset_hold(dp, snapshot, FTAG, &snapds));
453	VERIFY0(dsl_bookmark_hold_ds(dp, bookmark, &bmark_fs, FTAG,
454	    &shortname));
455
456	dsl_bookmark_node_t *dbn = dsl_bookmark_node_alloc(shortname);
457	dsl_bookmark_set_phys(&dbn->dbn_phys, snapds);
458
459	bookmark_redacted = dsl_dataset_get_uint64_array_feature(snapds,
460	    SPA_FEATURE_REDACTED_DATASETS, &dsnumsnaps, &dsredactsnaps);
461	if (redaction_list != NULL || bookmark_redacted) {
462		redaction_list_t *local_rl;
463		boolean_t spill = B_FALSE;
464		if (bookmark_redacted) {
465			redact_snaps = dsredactsnaps;
466			num_redact_snaps = dsnumsnaps;
467		}
468		int bonuslen = sizeof (redaction_list_phys_t) +
469		    num_redact_snaps * sizeof (uint64_t);
470		if (bonuslen > dmu_bonus_max())
471			spill = B_TRUE;
472		dbn->dbn_phys.zbm_redaction_obj = dmu_object_alloc(mos,
473		    DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
474		    DMU_OTN_UINT64_METADATA, spill ? 0 : bonuslen, tx);
475		spa_feature_incr(dp->dp_spa,
476		    SPA_FEATURE_REDACTION_BOOKMARKS, tx);
477		if (spill) {
478			spa_feature_incr(dp->dp_spa,
479			    SPA_FEATURE_REDACTION_LIST_SPILL, tx);
480		}
481
482		VERIFY0(dsl_redaction_list_hold_obj(dp,
483		    dbn->dbn_phys.zbm_redaction_obj, tag, &local_rl));
484		dsl_redaction_list_long_hold(dp, local_rl, tag);
485
486		if (!spill) {
487			ASSERT3U(local_rl->rl_bonus->db_size, >=, bonuslen);
488			dmu_buf_will_dirty(local_rl->rl_bonus, tx);
489		} else {
490			dmu_buf_t *db;
491			VERIFY0(dmu_spill_hold_by_bonus(local_rl->rl_bonus,
492			    DB_RF_MUST_SUCCEED, FTAG, &db));
493			dmu_buf_will_fill(db, tx, B_FALSE);
494			VERIFY0(dbuf_spill_set_blksz(db, P2ROUNDUP(bonuslen,
495			    SPA_MINBLOCKSIZE), tx));
496			local_rl->rl_phys = db->db_data;
497			local_rl->rl_dbuf = db;
498		}
499		memcpy(local_rl->rl_phys->rlp_snaps, redact_snaps,
500		    sizeof (uint64_t) * num_redact_snaps);
501		local_rl->rl_phys->rlp_num_snaps = num_redact_snaps;
502		if (bookmark_redacted) {
503			ASSERT3P(redaction_list, ==, NULL);
504			local_rl->rl_phys->rlp_last_blkid = UINT64_MAX;
505			local_rl->rl_phys->rlp_last_object = UINT64_MAX;
506			dsl_redaction_list_long_rele(local_rl, tag);
507			dsl_redaction_list_rele(local_rl, tag);
508		} else {
509			*redaction_list = local_rl;
510		}
511	}
512
513	if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
514		spa_feature_incr(dp->dp_spa,
515		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
516	}
517
518	dsl_bookmark_node_add(bmark_fs, dbn, tx);
519
520	spa_history_log_internal_ds(bmark_fs, "bookmark", tx,
521	    "name=%s creation_txg=%llu target_snap=%llu redact_obj=%llu",
522	    shortname, (longlong_t)dbn->dbn_phys.zbm_creation_txg,
523	    (longlong_t)snapds->ds_object,
524	    (longlong_t)dbn->dbn_phys.zbm_redaction_obj);
525
526	dsl_dataset_rele(bmark_fs, FTAG);
527	dsl_dataset_rele(snapds, FTAG);
528}
529
530
531static void
532dsl_bookmark_create_sync_impl_book(
533    const char *new_name, const char *source_name, dmu_tx_t *tx)
534{
535	dsl_pool_t *dp = dmu_tx_pool(tx);
536	dsl_dataset_t *bmark_fs_source, *bmark_fs_new;
537	char *source_shortname, *new_shortname;
538	zfs_bookmark_phys_t source_phys;
539
540	VERIFY0(dsl_bookmark_hold_ds(dp, source_name, &bmark_fs_source, FTAG,
541	    &source_shortname));
542	VERIFY0(dsl_bookmark_hold_ds(dp, new_name, &bmark_fs_new, FTAG,
543	    &new_shortname));
544
545	/*
546	 * create a copy of the source bookmark by copying most of its members
547	 *
548	 * Caveat: bookmarking a redaction bookmark yields a normal bookmark
549	 * -----------------------------------------------------------------
550	 * Reasoning:
551	 * - The zbm_redaction_obj would be referred to by both source and new
552	 *   bookmark, but would be destroyed once either source or new is
553	 *   destroyed, resulting in use-after-free of the referred object.
554	 * - User expectation when issuing the `zfs bookmark` command is that
555	 *   a normal bookmark of the source is created
556	 *
557	 * Design Alternatives For Full Redaction Bookmark Copying:
558	 * - reference-count the redaction object => would require on-disk
559	 *   format change for existing redaction objects
560	 * - Copy the redaction object => cannot be done in syncing context
561	 *   because the redaction object might be too large
562	 */
563
564	VERIFY0(dsl_bookmark_lookup_impl(bmark_fs_source, source_shortname,
565	    &source_phys));
566	dsl_bookmark_node_t *new_dbn = dsl_bookmark_node_alloc(new_shortname);
567
568	memcpy(&new_dbn->dbn_phys, &source_phys, sizeof (source_phys));
569	new_dbn->dbn_phys.zbm_redaction_obj = 0;
570
571	/* update feature counters */
572	if (new_dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
573		spa_feature_incr(dp->dp_spa,
574		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
575	}
576	/* no need for redaction bookmark counter; nulled zbm_redaction_obj */
577	/* dsl_bookmark_node_add bumps bookmarks and v2-bookmarks counter */
578
579	/*
580	 * write new bookmark
581	 *
582	 * Note that dsl_bookmark_lookup_impl guarantees that, if source is a
583	 * v1 bookmark, the v2-only fields are zeroed.
584	 * And dsl_bookmark_node_add writes back a v1-sized bookmark if
585	 * v2 bookmarks are disabled and/or v2-only fields are zeroed.
586	 * => bookmark copying works on pre-bookmark-v2 pools
587	 */
588	dsl_bookmark_node_add(bmark_fs_new, new_dbn, tx);
589
590	spa_history_log_internal_ds(bmark_fs_source, "bookmark", tx,
591	    "name=%s creation_txg=%llu source_guid=%llu",
592	    new_shortname, (longlong_t)new_dbn->dbn_phys.zbm_creation_txg,
593	    (longlong_t)source_phys.zbm_guid);
594
595	dsl_dataset_rele(bmark_fs_source, FTAG);
596	dsl_dataset_rele(bmark_fs_new, FTAG);
597}
598
599void
600dsl_bookmark_create_sync(void *arg, dmu_tx_t *tx)
601{
602	dsl_bookmark_create_arg_t *dbca = arg;
603
604	ASSERT(spa_feature_is_enabled(dmu_tx_pool(tx)->dp_spa,
605	    SPA_FEATURE_BOOKMARKS));
606
607	for (nvpair_t *pair = nvlist_next_nvpair(dbca->dbca_bmarks, NULL);
608	    pair != NULL; pair = nvlist_next_nvpair(dbca->dbca_bmarks, pair)) {
609
610		const char *new = nvpair_name(pair);
611		const char *source = fnvpair_value_string(pair);
612
613		if (strchr(source, '@') != NULL) {
614			dsl_bookmark_create_sync_impl_snap(new, source, tx,
615			    0, NULL, NULL, NULL);
616		} else if (strchr(source, '#') != NULL) {
617			dsl_bookmark_create_sync_impl_book(new, source, tx);
618		} else {
619			panic("unreachable code");
620		}
621
622	}
623}
624
625/*
626 * The bookmarks must all be in the same pool.
627 */
628int
629dsl_bookmark_create(nvlist_t *bmarks, nvlist_t *errors)
630{
631	nvpair_t *pair;
632	dsl_bookmark_create_arg_t dbca;
633
634	pair = nvlist_next_nvpair(bmarks, NULL);
635	if (pair == NULL)
636		return (0);
637
638	dbca.dbca_bmarks = bmarks;
639	dbca.dbca_errors = errors;
640
641	return (dsl_sync_task(nvpair_name(pair), dsl_bookmark_create_check,
642	    dsl_bookmark_create_sync, &dbca,
643	    fnvlist_num_pairs(bmarks), ZFS_SPACE_CHECK_NORMAL));
644}
645
646static int
647dsl_bookmark_create_redacted_check(void *arg, dmu_tx_t *tx)
648{
649	dsl_bookmark_create_redacted_arg_t *dbcra = arg;
650	dsl_pool_t *dp = dmu_tx_pool(tx);
651	int rv = 0;
652
653	if (!spa_feature_is_enabled(dp->dp_spa,
654	    SPA_FEATURE_REDACTION_BOOKMARKS))
655		return (SET_ERROR(ENOTSUP));
656	/*
657	 * If the list of redact snaps will not fit in the bonus buffer (or
658	 * spill block, with the REDACTION_LIST_SPILL feature) with the
659	 * furthest reached object and offset, fail.
660	 */
661	uint64_t snaplimit = ((spa_feature_is_enabled(dp->dp_spa,
662	    SPA_FEATURE_REDACTION_LIST_SPILL) ? spa_maxblocksize(dp->dp_spa) :
663	    dmu_bonus_max()) -
664	    sizeof (redaction_list_phys_t)) / sizeof (uint64_t);
665	if (dbcra->dbcra_numsnaps > snaplimit)
666		return (SET_ERROR(E2BIG));
667
668	if (dsl_bookmark_create_nvl_validate_pair(
669	    dbcra->dbcra_bmark, dbcra->dbcra_snap) != 0)
670		return (SET_ERROR(EINVAL));
671
672	rv = dsl_bookmark_create_check_impl(dp,
673	    dbcra->dbcra_bmark, dbcra->dbcra_snap);
674	return (rv);
675}
676
677static void
678dsl_bookmark_create_redacted_sync(void *arg, dmu_tx_t *tx)
679{
680	dsl_bookmark_create_redacted_arg_t *dbcra = arg;
681	dsl_bookmark_create_sync_impl_snap(dbcra->dbcra_bmark,
682	    dbcra->dbcra_snap, tx, dbcra->dbcra_numsnaps, dbcra->dbcra_snaps,
683	    dbcra->dbcra_tag, dbcra->dbcra_rl);
684}
685
686int
687dsl_bookmark_create_redacted(const char *bookmark, const char *snapshot,
688    uint64_t numsnaps, uint64_t *snapguids, const void *tag,
689    redaction_list_t **rl)
690{
691	dsl_bookmark_create_redacted_arg_t dbcra;
692
693	dbcra.dbcra_bmark = bookmark;
694	dbcra.dbcra_snap = snapshot;
695	dbcra.dbcra_rl = rl;
696	dbcra.dbcra_numsnaps = numsnaps;
697	dbcra.dbcra_snaps = snapguids;
698	dbcra.dbcra_tag = tag;
699
700	return (dsl_sync_task(bookmark, dsl_bookmark_create_redacted_check,
701	    dsl_bookmark_create_redacted_sync, &dbcra, 5,
702	    ZFS_SPACE_CHECK_NORMAL));
703}
704
705/*
706 * Retrieve the list of properties given in the 'props' nvlist for a bookmark.
707 * If 'props' is NULL, retrieves all properties.
708 */
709static void
710dsl_bookmark_fetch_props(dsl_pool_t *dp, zfs_bookmark_phys_t *bmark_phys,
711    nvlist_t *props, nvlist_t *out_props)
712{
713	ASSERT3P(dp, !=, NULL);
714	ASSERT3P(bmark_phys, !=, NULL);
715	ASSERT3P(out_props, !=, NULL);
716	ASSERT(RRW_LOCK_HELD(&dp->dp_config_rwlock));
717
718	if (props == NULL || nvlist_exists(props,
719	    zfs_prop_to_name(ZFS_PROP_GUID))) {
720		dsl_prop_nvlist_add_uint64(out_props,
721		    ZFS_PROP_GUID, bmark_phys->zbm_guid);
722	}
723	if (props == NULL || nvlist_exists(props,
724	    zfs_prop_to_name(ZFS_PROP_CREATETXG))) {
725		dsl_prop_nvlist_add_uint64(out_props,
726		    ZFS_PROP_CREATETXG, bmark_phys->zbm_creation_txg);
727	}
728	if (props == NULL || nvlist_exists(props,
729	    zfs_prop_to_name(ZFS_PROP_CREATION))) {
730		dsl_prop_nvlist_add_uint64(out_props,
731		    ZFS_PROP_CREATION, bmark_phys->zbm_creation_time);
732	}
733	if (props == NULL || nvlist_exists(props,
734	    zfs_prop_to_name(ZFS_PROP_IVSET_GUID))) {
735		dsl_prop_nvlist_add_uint64(out_props,
736		    ZFS_PROP_IVSET_GUID, bmark_phys->zbm_ivset_guid);
737	}
738	if (bmark_phys->zbm_flags & ZBM_FLAG_HAS_FBN) {
739		if (props == NULL || nvlist_exists(props,
740		    zfs_prop_to_name(ZFS_PROP_REFERENCED))) {
741			dsl_prop_nvlist_add_uint64(out_props,
742			    ZFS_PROP_REFERENCED,
743			    bmark_phys->zbm_referenced_bytes_refd);
744		}
745		if (props == NULL || nvlist_exists(props,
746		    zfs_prop_to_name(ZFS_PROP_LOGICALREFERENCED))) {
747			dsl_prop_nvlist_add_uint64(out_props,
748			    ZFS_PROP_LOGICALREFERENCED,
749			    bmark_phys->zbm_uncompressed_bytes_refd);
750		}
751		if (props == NULL || nvlist_exists(props,
752		    zfs_prop_to_name(ZFS_PROP_REFRATIO))) {
753			uint64_t ratio =
754			    bmark_phys->zbm_compressed_bytes_refd == 0 ? 100 :
755			    bmark_phys->zbm_uncompressed_bytes_refd * 100 /
756			    bmark_phys->zbm_compressed_bytes_refd;
757			dsl_prop_nvlist_add_uint64(out_props,
758			    ZFS_PROP_REFRATIO, ratio);
759		}
760	}
761
762	if ((props == NULL || nvlist_exists(props, "redact_snaps") ||
763	    nvlist_exists(props, "redact_complete")) &&
764	    bmark_phys->zbm_redaction_obj != 0) {
765		redaction_list_t *rl;
766		int err = dsl_redaction_list_hold_obj(dp,
767		    bmark_phys->zbm_redaction_obj, FTAG, &rl);
768		if (err == 0) {
769			if (nvlist_exists(props, "redact_snaps")) {
770				nvlist_t *nvl;
771				nvl = fnvlist_alloc();
772				fnvlist_add_uint64_array(nvl, ZPROP_VALUE,
773				    rl->rl_phys->rlp_snaps,
774				    rl->rl_phys->rlp_num_snaps);
775				fnvlist_add_nvlist(out_props, "redact_snaps",
776				    nvl);
777				nvlist_free(nvl);
778			}
779			if (nvlist_exists(props, "redact_complete")) {
780				nvlist_t *nvl;
781				nvl = fnvlist_alloc();
782				fnvlist_add_boolean_value(nvl, ZPROP_VALUE,
783				    rl->rl_phys->rlp_last_blkid == UINT64_MAX &&
784				    rl->rl_phys->rlp_last_object == UINT64_MAX);
785				fnvlist_add_nvlist(out_props, "redact_complete",
786				    nvl);
787				nvlist_free(nvl);
788			}
789			dsl_redaction_list_rele(rl, FTAG);
790		}
791	}
792}
793
794int
795dsl_get_bookmarks_impl(dsl_dataset_t *ds, nvlist_t *props, nvlist_t *outnvl)
796{
797	dsl_pool_t *dp = ds->ds_dir->dd_pool;
798
799	ASSERT(dsl_pool_config_held(dp));
800
801	if (dsl_dataset_is_snapshot(ds))
802		return (SET_ERROR(EINVAL));
803
804	for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
805	    dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
806		nvlist_t *out_props = fnvlist_alloc();
807
808		dsl_bookmark_fetch_props(dp, &dbn->dbn_phys, props, out_props);
809
810		fnvlist_add_nvlist(outnvl, dbn->dbn_name, out_props);
811		fnvlist_free(out_props);
812	}
813	return (0);
814}
815
816/*
817 * Comparison func for ds_bookmarks AVL tree.  We sort the bookmarks by
818 * their TXG, then by their FBN-ness.  The "FBN-ness" component ensures
819 * that all bookmarks at the same TXG that HAS_FBN are adjacent, which
820 * dsl_bookmark_destroy_sync_impl() depends on.  Note that there may be
821 * multiple bookmarks at the same TXG (with the same FBN-ness).  In this
822 * case we differentiate them by an arbitrary metric (in this case,
823 * their names).
824 */
825static int
826dsl_bookmark_compare(const void *l, const void *r)
827{
828	const dsl_bookmark_node_t *ldbn = l;
829	const dsl_bookmark_node_t *rdbn = r;
830
831	int64_t cmp = TREE_CMP(ldbn->dbn_phys.zbm_creation_txg,
832	    rdbn->dbn_phys.zbm_creation_txg);
833	if (likely(cmp))
834		return (cmp);
835	cmp = TREE_CMP((ldbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN),
836	    (rdbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
837	if (likely(cmp))
838		return (cmp);
839	cmp = strcmp(ldbn->dbn_name, rdbn->dbn_name);
840	return (TREE_ISIGN(cmp));
841}
842
843/*
844 * Cache this (head) dataset's bookmarks in the ds_bookmarks AVL tree.
845 */
846int
847dsl_bookmark_init_ds(dsl_dataset_t *ds)
848{
849	dsl_pool_t *dp = ds->ds_dir->dd_pool;
850	objset_t *mos = dp->dp_meta_objset;
851
852	ASSERT(!ds->ds_is_snapshot);
853
854	avl_create(&ds->ds_bookmarks, dsl_bookmark_compare,
855	    sizeof (dsl_bookmark_node_t),
856	    offsetof(dsl_bookmark_node_t, dbn_node));
857
858	if (!dsl_dataset_is_zapified(ds))
859		return (0);
860
861	int zaperr = zap_lookup(mos, ds->ds_object, DS_FIELD_BOOKMARK_NAMES,
862	    sizeof (ds->ds_bookmarks_obj), 1, &ds->ds_bookmarks_obj);
863	if (zaperr == ENOENT)
864		return (0);
865	if (zaperr != 0)
866		return (zaperr);
867
868	if (ds->ds_bookmarks_obj == 0)
869		return (0);
870
871	int err = 0;
872	zap_cursor_t zc;
873	zap_attribute_t attr;
874
875	for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
876	    (err = zap_cursor_retrieve(&zc, &attr)) == 0;
877	    zap_cursor_advance(&zc)) {
878		dsl_bookmark_node_t *dbn =
879		    dsl_bookmark_node_alloc(attr.za_name);
880
881		err = dsl_bookmark_lookup_impl(ds,
882		    dbn->dbn_name, &dbn->dbn_phys);
883		ASSERT3U(err, !=, ENOENT);
884		if (err != 0) {
885			kmem_free(dbn, sizeof (*dbn));
886			break;
887		}
888		avl_add(&ds->ds_bookmarks, dbn);
889	}
890	zap_cursor_fini(&zc);
891	if (err == ENOENT)
892		err = 0;
893	return (err);
894}
895
896void
897dsl_bookmark_fini_ds(dsl_dataset_t *ds)
898{
899	void *cookie = NULL;
900	dsl_bookmark_node_t *dbn;
901
902	if (ds->ds_is_snapshot)
903		return;
904
905	while ((dbn = avl_destroy_nodes(&ds->ds_bookmarks, &cookie)) != NULL) {
906		spa_strfree(dbn->dbn_name);
907		mutex_destroy(&dbn->dbn_lock);
908		kmem_free(dbn, sizeof (*dbn));
909	}
910	avl_destroy(&ds->ds_bookmarks);
911}
912
913/*
914 * Retrieve the bookmarks that exist in the specified dataset, and the
915 * requested properties of each bookmark.
916 *
917 * The "props" nvlist specifies which properties are requested.
918 * See lzc_get_bookmarks() for the list of valid properties.
919 */
920int
921dsl_get_bookmarks(const char *dsname, nvlist_t *props, nvlist_t *outnvl)
922{
923	dsl_pool_t *dp;
924	dsl_dataset_t *ds;
925	int err;
926
927	err = dsl_pool_hold(dsname, FTAG, &dp);
928	if (err != 0)
929		return (err);
930	err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
931	if (err != 0) {
932		dsl_pool_rele(dp, FTAG);
933		return (err);
934	}
935
936	err = dsl_get_bookmarks_impl(ds, props, outnvl);
937
938	dsl_dataset_rele(ds, FTAG);
939	dsl_pool_rele(dp, FTAG);
940	return (err);
941}
942
943/*
944 * Retrieve all properties for a single bookmark in the given dataset.
945 */
946int
947dsl_get_bookmark_props(const char *dsname, const char *bmname, nvlist_t *props)
948{
949	dsl_pool_t *dp;
950	dsl_dataset_t *ds;
951	zfs_bookmark_phys_t bmark_phys = { 0 };
952	int err;
953
954	err = dsl_pool_hold(dsname, FTAG, &dp);
955	if (err != 0)
956		return (err);
957	err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
958	if (err != 0) {
959		dsl_pool_rele(dp, FTAG);
960		return (err);
961	}
962
963	err = dsl_bookmark_lookup_impl(ds, bmname, &bmark_phys);
964	if (err != 0)
965		goto out;
966
967	dsl_bookmark_fetch_props(dp, &bmark_phys, NULL, props);
968out:
969	dsl_dataset_rele(ds, FTAG);
970	dsl_pool_rele(dp, FTAG);
971	return (err);
972}
973
974typedef struct dsl_bookmark_destroy_arg {
975	nvlist_t *dbda_bmarks;
976	nvlist_t *dbda_success;
977	nvlist_t *dbda_errors;
978} dsl_bookmark_destroy_arg_t;
979
980static void
981dsl_bookmark_destroy_sync_impl(dsl_dataset_t *ds, const char *name,
982    dmu_tx_t *tx)
983{
984	objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
985	uint64_t bmark_zapobj = ds->ds_bookmarks_obj;
986	matchtype_t mt = 0;
987	uint64_t int_size, num_ints;
988	/*
989	 * 'search' must be zeroed so that dbn_flags (which is used in
990	 * dsl_bookmark_compare()) will be zeroed even if the on-disk
991	 * (in ZAP) bookmark is shorter than offsetof(dbn_flags).
992	 */
993	dsl_bookmark_node_t search = { 0 };
994	char realname[ZFS_MAX_DATASET_NAME_LEN];
995
996	/*
997	 * Find the real name of this bookmark, which may be different
998	 * from the given name if the dataset is case-insensitive.  Then
999	 * use the real name to find the node in the ds_bookmarks AVL tree.
1000	 */
1001
1002	if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
1003		mt = MT_NORMALIZE;
1004
1005	VERIFY0(zap_length(mos, bmark_zapobj, name, &int_size, &num_ints));
1006
1007	ASSERT3U(int_size, ==, sizeof (uint64_t));
1008
1009	if (num_ints * int_size > BOOKMARK_PHYS_SIZE_V1) {
1010		spa_feature_decr(dmu_objset_spa(mos),
1011		    SPA_FEATURE_BOOKMARK_V2, tx);
1012	}
1013	VERIFY0(zap_lookup_norm(mos, bmark_zapobj, name, sizeof (uint64_t),
1014	    num_ints, &search.dbn_phys, mt, realname, sizeof (realname), NULL));
1015
1016	search.dbn_name = realname;
1017	dsl_bookmark_node_t *dbn = avl_find(&ds->ds_bookmarks, &search, NULL);
1018	ASSERT(dbn != NULL);
1019
1020	if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) {
1021		/*
1022		 * If this bookmark HAS_FBN, and it is before the most
1023		 * recent snapshot, then its TXG is a key in the head's
1024		 * deadlist (and all clones' heads' deadlists).  If this is
1025		 * the last thing keeping the key (i.e. there are no more
1026		 * bookmarks with HAS_FBN at this TXG, and there is no
1027		 * snapshot at this TXG), then remove the key.
1028		 *
1029		 * Note that this algorithm depends on ds_bookmarks being
1030		 * sorted such that all bookmarks at the same TXG with
1031		 * HAS_FBN are adjacent (with no non-HAS_FBN bookmarks
1032		 * at the same TXG in between them).  If this were not
1033		 * the case, we would need to examine *all* bookmarks
1034		 * at this TXG, rather than just the adjacent ones.
1035		 */
1036
1037		dsl_bookmark_node_t *dbn_prev =
1038		    AVL_PREV(&ds->ds_bookmarks, dbn);
1039		dsl_bookmark_node_t *dbn_next =
1040		    AVL_NEXT(&ds->ds_bookmarks, dbn);
1041
1042		boolean_t more_bookmarks_at_this_txg =
1043		    (dbn_prev != NULL && dbn_prev->dbn_phys.zbm_creation_txg ==
1044		    dbn->dbn_phys.zbm_creation_txg &&
1045		    (dbn_prev->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) ||
1046		    (dbn_next != NULL && dbn_next->dbn_phys.zbm_creation_txg ==
1047		    dbn->dbn_phys.zbm_creation_txg &&
1048		    (dbn_next->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN));
1049
1050		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS) &&
1051		    !more_bookmarks_at_this_txg &&
1052		    dbn->dbn_phys.zbm_creation_txg <
1053		    dsl_dataset_phys(ds)->ds_prev_snap_txg) {
1054			dsl_dir_remove_clones_key(ds->ds_dir,
1055			    dbn->dbn_phys.zbm_creation_txg, tx);
1056			dsl_deadlist_remove_key(&ds->ds_deadlist,
1057			    dbn->dbn_phys.zbm_creation_txg, tx);
1058		}
1059
1060		spa_feature_decr(dmu_objset_spa(mos),
1061		    SPA_FEATURE_BOOKMARK_WRITTEN, tx);
1062	}
1063
1064	if (dbn->dbn_phys.zbm_redaction_obj != 0) {
1065		dnode_t *rl;
1066		VERIFY0(dnode_hold(mos,
1067		    dbn->dbn_phys.zbm_redaction_obj, FTAG, &rl));
1068		if (rl->dn_have_spill) {
1069			spa_feature_decr(dmu_objset_spa(mos),
1070			    SPA_FEATURE_REDACTION_LIST_SPILL, tx);
1071		}
1072		dnode_rele(rl, FTAG);
1073		VERIFY0(dmu_object_free(mos,
1074		    dbn->dbn_phys.zbm_redaction_obj, tx));
1075		spa_feature_decr(dmu_objset_spa(mos),
1076		    SPA_FEATURE_REDACTION_BOOKMARKS, tx);
1077	}
1078
1079	avl_remove(&ds->ds_bookmarks, dbn);
1080	spa_strfree(dbn->dbn_name);
1081	mutex_destroy(&dbn->dbn_lock);
1082	kmem_free(dbn, sizeof (*dbn));
1083
1084	VERIFY0(zap_remove_norm(mos, bmark_zapobj, name, mt, tx));
1085}
1086
1087static int
1088dsl_bookmark_destroy_check(void *arg, dmu_tx_t *tx)
1089{
1090	dsl_bookmark_destroy_arg_t *dbda = arg;
1091	dsl_pool_t *dp = dmu_tx_pool(tx);
1092	int rv = 0;
1093
1094	ASSERT(nvlist_empty(dbda->dbda_success));
1095	ASSERT(nvlist_empty(dbda->dbda_errors));
1096
1097	if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_BOOKMARKS))
1098		return (0);
1099
1100	for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_bmarks, NULL);
1101	    pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_bmarks, pair)) {
1102		const char *fullname = nvpair_name(pair);
1103		dsl_dataset_t *ds;
1104		zfs_bookmark_phys_t bm;
1105		int error;
1106		char *shortname;
1107
1108		error = dsl_bookmark_hold_ds(dp, fullname, &ds,
1109		    FTAG, &shortname);
1110		if (error == ENOENT) {
1111			/* ignore it; the bookmark is "already destroyed" */
1112			continue;
1113		}
1114		if (error == 0) {
1115			error = dsl_bookmark_lookup_impl(ds, shortname, &bm);
1116			dsl_dataset_rele(ds, FTAG);
1117			if (error == ESRCH) {
1118				/*
1119				 * ignore it; the bookmark is
1120				 * "already destroyed"
1121				 */
1122				continue;
1123			}
1124			if (error == 0 && bm.zbm_redaction_obj != 0) {
1125				redaction_list_t *rl = NULL;
1126				error = dsl_redaction_list_hold_obj(tx->tx_pool,
1127				    bm.zbm_redaction_obj, FTAG, &rl);
1128				if (error == ENOENT) {
1129					error = 0;
1130				} else if (error == 0 &&
1131				    dsl_redaction_list_long_held(rl)) {
1132					error = SET_ERROR(EBUSY);
1133				}
1134				if (rl != NULL) {
1135					dsl_redaction_list_rele(rl, FTAG);
1136				}
1137			}
1138		}
1139		if (error == 0) {
1140			if (dmu_tx_is_syncing(tx)) {
1141				fnvlist_add_boolean(dbda->dbda_success,
1142				    fullname);
1143			}
1144		} else {
1145			fnvlist_add_int32(dbda->dbda_errors, fullname, error);
1146			rv = error;
1147		}
1148	}
1149	return (rv);
1150}
1151
1152static void
1153dsl_bookmark_destroy_sync(void *arg, dmu_tx_t *tx)
1154{
1155	dsl_bookmark_destroy_arg_t *dbda = arg;
1156	dsl_pool_t *dp = dmu_tx_pool(tx);
1157	objset_t *mos = dp->dp_meta_objset;
1158
1159	for (nvpair_t *pair = nvlist_next_nvpair(dbda->dbda_success, NULL);
1160	    pair != NULL; pair = nvlist_next_nvpair(dbda->dbda_success, pair)) {
1161		dsl_dataset_t *ds;
1162		char *shortname;
1163		uint64_t zap_cnt;
1164
1165		VERIFY0(dsl_bookmark_hold_ds(dp, nvpair_name(pair),
1166		    &ds, FTAG, &shortname));
1167		dsl_bookmark_destroy_sync_impl(ds, shortname, tx);
1168
1169		/*
1170		 * If all of this dataset's bookmarks have been destroyed,
1171		 * free the zap object and decrement the feature's use count.
1172		 */
1173		VERIFY0(zap_count(mos, ds->ds_bookmarks_obj, &zap_cnt));
1174		if (zap_cnt == 0) {
1175			dmu_buf_will_dirty(ds->ds_dbuf, tx);
1176			VERIFY0(zap_destroy(mos, ds->ds_bookmarks_obj, tx));
1177			ds->ds_bookmarks_obj = 0;
1178			spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
1179			VERIFY0(zap_remove(mos, ds->ds_object,
1180			    DS_FIELD_BOOKMARK_NAMES, tx));
1181		}
1182
1183		spa_history_log_internal_ds(ds, "remove bookmark", tx,
1184		    "name=%s", shortname);
1185
1186		dsl_dataset_rele(ds, FTAG);
1187	}
1188}
1189
1190/*
1191 * The bookmarks must all be in the same pool.
1192 */
1193int
1194dsl_bookmark_destroy(nvlist_t *bmarks, nvlist_t *errors)
1195{
1196	int rv;
1197	dsl_bookmark_destroy_arg_t dbda;
1198	nvpair_t *pair = nvlist_next_nvpair(bmarks, NULL);
1199	if (pair == NULL)
1200		return (0);
1201
1202	dbda.dbda_bmarks = bmarks;
1203	dbda.dbda_errors = errors;
1204	dbda.dbda_success = fnvlist_alloc();
1205
1206	rv = dsl_sync_task(nvpair_name(pair), dsl_bookmark_destroy_check,
1207	    dsl_bookmark_destroy_sync, &dbda, fnvlist_num_pairs(bmarks),
1208	    ZFS_SPACE_CHECK_RESERVED);
1209	fnvlist_free(dbda.dbda_success);
1210	return (rv);
1211}
1212
1213/* Return B_TRUE if there are any long holds on this dataset. */
1214boolean_t
1215dsl_redaction_list_long_held(redaction_list_t *rl)
1216{
1217	return (!zfs_refcount_is_zero(&rl->rl_longholds));
1218}
1219
1220void
1221dsl_redaction_list_long_hold(dsl_pool_t *dp, redaction_list_t *rl,
1222    const void *tag)
1223{
1224	ASSERT(dsl_pool_config_held(dp));
1225	(void) zfs_refcount_add(&rl->rl_longholds, tag);
1226}
1227
1228void
1229dsl_redaction_list_long_rele(redaction_list_t *rl, const void *tag)
1230{
1231	(void) zfs_refcount_remove(&rl->rl_longholds, tag);
1232}
1233
1234static void
1235redaction_list_evict_sync(void *rlu)
1236{
1237	redaction_list_t *rl = rlu;
1238	zfs_refcount_destroy(&rl->rl_longholds);
1239
1240	kmem_free(rl, sizeof (redaction_list_t));
1241}
1242
1243void
1244dsl_redaction_list_rele(redaction_list_t *rl, const void *tag)
1245{
1246	if (rl->rl_bonus != rl->rl_dbuf)
1247		dmu_buf_rele(rl->rl_dbuf, tag);
1248	dmu_buf_rele(rl->rl_bonus, tag);
1249}
1250
1251int
1252dsl_redaction_list_hold_obj(dsl_pool_t *dp, uint64_t rlobj, const void *tag,
1253    redaction_list_t **rlp)
1254{
1255	objset_t *mos = dp->dp_meta_objset;
1256	dmu_buf_t *dbuf, *spill_dbuf;
1257	redaction_list_t *rl;
1258	int err;
1259
1260	ASSERT(dsl_pool_config_held(dp));
1261
1262	err = dmu_bonus_hold(mos, rlobj, tag, &dbuf);
1263	if (err != 0)
1264		return (err);
1265
1266	rl = dmu_buf_get_user(dbuf);
1267	if (rl == NULL) {
1268		redaction_list_t *winner = NULL;
1269
1270		rl = kmem_zalloc(sizeof (redaction_list_t), KM_SLEEP);
1271		rl->rl_bonus = dbuf;
1272		if (dmu_spill_hold_existing(dbuf, tag, &spill_dbuf) == 0) {
1273			rl->rl_dbuf = spill_dbuf;
1274		} else {
1275			rl->rl_dbuf = dbuf;
1276		}
1277		rl->rl_object = rlobj;
1278		rl->rl_phys = rl->rl_dbuf->db_data;
1279		rl->rl_mos = dp->dp_meta_objset;
1280		zfs_refcount_create(&rl->rl_longholds);
1281		dmu_buf_init_user(&rl->rl_dbu, redaction_list_evict_sync, NULL,
1282		    &rl->rl_bonus);
1283		if ((winner = dmu_buf_set_user_ie(dbuf, &rl->rl_dbu)) != NULL) {
1284			kmem_free(rl, sizeof (*rl));
1285			rl = winner;
1286		}
1287	}
1288	*rlp = rl;
1289	return (0);
1290}
1291
1292/*
1293 * Snapshot ds is being destroyed.
1294 *
1295 * Adjust the "freed_before_next" of any bookmarks between this snap
1296 * and the previous snapshot, because their "next snapshot" is changing.
1297 *
1298 * If there are any bookmarks with HAS_FBN at this snapshot, remove
1299 * their HAS_SNAP flag (note: there can be at most one snapshot of
1300 * each filesystem at a given txg), and return B_TRUE.  In this case
1301 * the caller can not remove the key in the deadlist at this TXG, because
1302 * the HAS_FBN bookmarks require the key be there.
1303 *
1304 * Returns B_FALSE if there are no bookmarks with HAS_FBN at this
1305 * snapshot's TXG.  In this case the caller can remove the key in the
1306 * deadlist at this TXG.
1307 */
1308boolean_t
1309dsl_bookmark_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
1310{
1311	dsl_pool_t *dp = ds->ds_dir->dd_pool;
1312
1313	dsl_dataset_t *head, *next;
1314	VERIFY0(dsl_dataset_hold_obj(dp,
1315	    dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &head));
1316	VERIFY0(dsl_dataset_hold_obj(dp,
1317	    dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &next));
1318
1319	/*
1320	 * Find the first bookmark that HAS_FBN at or after the
1321	 * previous snapshot.
1322	 */
1323	dsl_bookmark_node_t search = { 0 };
1324	avl_index_t idx;
1325	search.dbn_phys.zbm_creation_txg =
1326	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1327	search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1328	/*
1329	 * The empty-string name can't be in the AVL, and it compares
1330	 * before any entries with this TXG.
1331	 */
1332	search.dbn_name = (char *)"";
1333	VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1334	dsl_bookmark_node_t *dbn =
1335	    avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1336
1337	/*
1338	 * Iterate over all bookmarks that are at or after the previous
1339	 * snapshot, and before this (being deleted) snapshot.  Adjust
1340	 * their FBN based on their new next snapshot.
1341	 */
1342	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg <
1343	    dsl_dataset_phys(ds)->ds_creation_txg;
1344	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1345		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN))
1346			continue;
1347		/*
1348		 * Increase our FBN by the amount of space that was live
1349		 * (referenced) at the time of this bookmark (i.e.
1350		 * birth <= zbm_creation_txg), and killed between this
1351		 * (being deleted) snapshot and the next snapshot (i.e.
1352		 * on the next snapshot's deadlist).  (Space killed before
1353		 * this are already on our FBN.)
1354		 */
1355		uint64_t referenced, compressed, uncompressed;
1356		dsl_deadlist_space_range(&next->ds_deadlist,
1357		    0, dbn->dbn_phys.zbm_creation_txg,
1358		    &referenced, &compressed, &uncompressed);
1359		dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1360		    referenced;
1361		dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1362		    compressed;
1363		dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1364		    uncompressed;
1365		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1366		    dbn->dbn_name, sizeof (uint64_t),
1367		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1368		    &dbn->dbn_phys, tx));
1369	}
1370	dsl_dataset_rele(next, FTAG);
1371
1372	/*
1373	 * There may be several bookmarks at this txg (the TXG of the
1374	 * snapshot being deleted).  We need to clear the SNAPSHOT_EXISTS
1375	 * flag on all of them, and return TRUE if there is at least 1
1376	 * bookmark here with HAS_FBN (thus preventing the deadlist
1377	 * key from being removed).
1378	 */
1379	boolean_t rv = B_FALSE;
1380	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1381	    dsl_dataset_phys(ds)->ds_creation_txg;
1382	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1383		if (!(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1384			ASSERT(!(dbn->dbn_phys.zbm_flags &
1385			    ZBM_FLAG_SNAPSHOT_EXISTS));
1386			continue;
1387		}
1388		ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_SNAPSHOT_EXISTS);
1389		dbn->dbn_phys.zbm_flags &= ~ZBM_FLAG_SNAPSHOT_EXISTS;
1390		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1391		    dbn->dbn_name, sizeof (uint64_t),
1392		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1393		    &dbn->dbn_phys, tx));
1394		rv = B_TRUE;
1395	}
1396	dsl_dataset_rele(head, FTAG);
1397	return (rv);
1398}
1399
1400/*
1401 * A snapshot is being created of this (head) dataset.
1402 *
1403 * We don't keep keys in the deadlist for the most recent snapshot, or any
1404 * bookmarks at or after it, because there can't be any blocks on the
1405 * deadlist in this range.  Now that the most recent snapshot is after
1406 * all bookmarks, we need to add these keys.  Note that the caller always
1407 * adds a key at the previous snapshot, so we only add keys for bookmarks
1408 * after that.
1409 */
1410void
1411dsl_bookmark_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
1412{
1413	uint64_t last_key_added = UINT64_MAX;
1414	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1415	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >
1416	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1417	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1418		uint64_t creation_txg = dbn->dbn_phys.zbm_creation_txg;
1419		ASSERT3U(creation_txg, <=, last_key_added);
1420		/*
1421		 * Note, there may be multiple bookmarks at this TXG,
1422		 * and we only want to add the key for this TXG once.
1423		 * The ds_bookmarks AVL is sorted by TXG, so we will visit
1424		 * these bookmarks in sequence.
1425		 */
1426		if ((dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN) &&
1427		    creation_txg != last_key_added) {
1428			dsl_deadlist_add_key(&ds->ds_deadlist,
1429			    creation_txg, tx);
1430			last_key_added = creation_txg;
1431		}
1432	}
1433}
1434
1435/*
1436 * The next snapshot of the origin dataset has changed, due to
1437 * promote or clone swap.  If there are any bookmarks at this dataset,
1438 * we need to update their zbm_*_freed_before_next_snap to reflect this.
1439 * The head dataset has the relevant bookmarks in ds_bookmarks.
1440 */
1441void
1442dsl_bookmark_next_changed(dsl_dataset_t *head, dsl_dataset_t *origin,
1443    dmu_tx_t *tx)
1444{
1445	dsl_pool_t *dp = dmu_tx_pool(tx);
1446
1447	/*
1448	 * Find the first bookmark that HAS_FBN at the origin snapshot.
1449	 */
1450	dsl_bookmark_node_t search = { 0 };
1451	avl_index_t idx;
1452	search.dbn_phys.zbm_creation_txg =
1453	    dsl_dataset_phys(origin)->ds_creation_txg;
1454	search.dbn_phys.zbm_flags = ZBM_FLAG_HAS_FBN;
1455	/*
1456	 * The empty-string name can't be in the AVL, and it compares
1457	 * before any entries with this TXG.
1458	 */
1459	search.dbn_name = (char *)"";
1460	VERIFY3P(avl_find(&head->ds_bookmarks, &search, &idx), ==, NULL);
1461	dsl_bookmark_node_t *dbn =
1462	    avl_nearest(&head->ds_bookmarks, idx, AVL_AFTER);
1463
1464	/*
1465	 * Iterate over all bookmarks that are at the origin txg.
1466	 * Adjust their FBN based on their new next snapshot.
1467	 */
1468	for (; dbn != NULL && dbn->dbn_phys.zbm_creation_txg ==
1469	    dsl_dataset_phys(origin)->ds_creation_txg &&
1470	    (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1471	    dbn = AVL_NEXT(&head->ds_bookmarks, dbn)) {
1472
1473		/*
1474		 * Bookmark is at the origin, therefore its
1475		 * "next dataset" is changing, so we need
1476		 * to reset its FBN by recomputing it in
1477		 * dsl_bookmark_set_phys().
1478		 */
1479		ASSERT3U(dbn->dbn_phys.zbm_guid, ==,
1480		    dsl_dataset_phys(origin)->ds_guid);
1481		ASSERT3U(dbn->dbn_phys.zbm_referenced_bytes_refd, ==,
1482		    dsl_dataset_phys(origin)->ds_referenced_bytes);
1483		ASSERT(dbn->dbn_phys.zbm_flags &
1484		    ZBM_FLAG_SNAPSHOT_EXISTS);
1485		/*
1486		 * Save and restore the zbm_redaction_obj, which
1487		 * is zeroed by dsl_bookmark_set_phys().
1488		 */
1489		uint64_t redaction_obj =
1490		    dbn->dbn_phys.zbm_redaction_obj;
1491		dsl_bookmark_set_phys(&dbn->dbn_phys, origin);
1492		dbn->dbn_phys.zbm_redaction_obj = redaction_obj;
1493
1494		VERIFY0(zap_update(dp->dp_meta_objset, head->ds_bookmarks_obj,
1495		    dbn->dbn_name, sizeof (uint64_t),
1496		    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1497		    &dbn->dbn_phys, tx));
1498	}
1499}
1500
1501/*
1502 * This block is no longer referenced by this (head) dataset.
1503 *
1504 * Adjust the FBN of any bookmarks that reference this block, whose "next"
1505 * is the head dataset.
1506 */
1507void
1508dsl_bookmark_block_killed(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
1509{
1510	(void) tx;
1511
1512	/*
1513	 * Iterate over bookmarks whose "next" is the head dataset.
1514	 */
1515	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1516	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1517	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1518	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1519		/*
1520		 * If the block was live (referenced) at the time of this
1521		 * bookmark, add its space to the bookmark's FBN.
1522		 */
1523		if (BP_GET_LOGICAL_BIRTH(bp) <=
1524		    dbn->dbn_phys.zbm_creation_txg &&
1525		    (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)) {
1526			mutex_enter(&dbn->dbn_lock);
1527			dbn->dbn_phys.zbm_referenced_freed_before_next_snap +=
1528			    bp_get_dsize_sync(dsl_dataset_get_spa(ds), bp);
1529			dbn->dbn_phys.zbm_compressed_freed_before_next_snap +=
1530			    BP_GET_PSIZE(bp);
1531			dbn->dbn_phys.zbm_uncompressed_freed_before_next_snap +=
1532			    BP_GET_UCSIZE(bp);
1533			/*
1534			 * Changing the ZAP object here would be too
1535			 * expensive.  Also, we may be called from the zio
1536			 * interrupt thread, which can't block on i/o.
1537			 * Therefore, we mark this bookmark as dirty and
1538			 * modify the ZAP once per txg, in
1539			 * dsl_bookmark_sync_done().
1540			 */
1541			dbn->dbn_dirty = B_TRUE;
1542			mutex_exit(&dbn->dbn_lock);
1543		}
1544	}
1545}
1546
1547void
1548dsl_bookmark_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
1549{
1550	dsl_pool_t *dp = dmu_tx_pool(tx);
1551
1552	if (dsl_dataset_is_snapshot(ds))
1553		return;
1554
1555	/*
1556	 * We only dirty bookmarks that are at or after the most recent
1557	 * snapshot.  We can't create snapshots between
1558	 * dsl_bookmark_block_killed() and dsl_bookmark_sync_done(), so we
1559	 * don't need to look at any bookmarks before ds_prev_snap_txg.
1560	 */
1561	for (dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1562	    dbn != NULL && dbn->dbn_phys.zbm_creation_txg >=
1563	    dsl_dataset_phys(ds)->ds_prev_snap_txg;
1564	    dbn = AVL_PREV(&ds->ds_bookmarks, dbn)) {
1565		if (dbn->dbn_dirty) {
1566			/*
1567			 * We only dirty nodes with HAS_FBN, therefore
1568			 * we can always use the current bookmark struct size.
1569			 */
1570			ASSERT(dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN);
1571			VERIFY0(zap_update(dp->dp_meta_objset,
1572			    ds->ds_bookmarks_obj,
1573			    dbn->dbn_name, sizeof (uint64_t),
1574			    sizeof (zfs_bookmark_phys_t) / sizeof (uint64_t),
1575			    &dbn->dbn_phys, tx));
1576			dbn->dbn_dirty = B_FALSE;
1577		}
1578	}
1579#ifdef ZFS_DEBUG
1580	for (dsl_bookmark_node_t *dbn = avl_first(&ds->ds_bookmarks);
1581	    dbn != NULL; dbn = AVL_NEXT(&ds->ds_bookmarks, dbn)) {
1582		ASSERT(!dbn->dbn_dirty);
1583	}
1584#endif
1585}
1586
1587/*
1588 * Return the TXG of the most recent bookmark (or 0 if there are no bookmarks).
1589 */
1590uint64_t
1591dsl_bookmark_latest_txg(dsl_dataset_t *ds)
1592{
1593	ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
1594	dsl_bookmark_node_t *dbn = avl_last(&ds->ds_bookmarks);
1595	if (dbn == NULL)
1596		return (0);
1597	return (dbn->dbn_phys.zbm_creation_txg);
1598}
1599
1600/*
1601 * Compare the redact_block_phys_t to the bookmark. If the last block in the
1602 * redact_block_phys_t is before the bookmark, return -1.  If the first block in
1603 * the redact_block_phys_t is after the bookmark, return 1.  Otherwise, the
1604 * bookmark is inside the range of the redact_block_phys_t, and we return 0.
1605 */
1606static int
1607redact_block_zb_compare(redact_block_phys_t *first,
1608    zbookmark_phys_t *second)
1609{
1610	/*
1611	 * If the block_phys is for a previous object, or the last block in the
1612	 * block_phys is strictly before the block in the bookmark, the
1613	 * block_phys is earlier.
1614	 */
1615	if (first->rbp_object < second->zb_object ||
1616	    (first->rbp_object == second->zb_object &&
1617	    first->rbp_blkid + (redact_block_get_count(first) - 1) <
1618	    second->zb_blkid)) {
1619		return (-1);
1620	}
1621
1622	/*
1623	 * If the bookmark is for a previous object, or the block in the
1624	 * bookmark is strictly before the first block in the block_phys, the
1625	 * bookmark is earlier.
1626	 */
1627	if (first->rbp_object > second->zb_object ||
1628	    (first->rbp_object == second->zb_object &&
1629	    first->rbp_blkid > second->zb_blkid)) {
1630		return (1);
1631	}
1632
1633	return (0);
1634}
1635
1636/*
1637 * Traverse the redaction list in the provided object, and call the callback for
1638 * each entry we find. Don't call the callback for any records before resume.
1639 */
1640int
1641dsl_redaction_list_traverse(redaction_list_t *rl, zbookmark_phys_t *resume,
1642    rl_traverse_callback_t cb, void *arg)
1643{
1644	objset_t *mos = rl->rl_mos;
1645	int err = 0;
1646
1647	if (rl->rl_phys->rlp_last_object != UINT64_MAX ||
1648	    rl->rl_phys->rlp_last_blkid != UINT64_MAX) {
1649		/*
1650		 * When we finish a send, we update the last object and offset
1651		 * to UINT64_MAX.  If a send fails partway through, the last
1652		 * object and offset will have some other value, indicating how
1653		 * far the send got. The redaction list must be complete before
1654		 * it can be traversed, so return EINVAL if the last object and
1655		 * blkid are not set to UINT64_MAX.
1656		 */
1657		return (SET_ERROR(EINVAL));
1658	}
1659
1660	/*
1661	 * This allows us to skip the binary search and resume checking logic
1662	 * below, if we're not resuming a redacted send.
1663	 */
1664	if (ZB_IS_ZERO(resume))
1665		resume = NULL;
1666
1667	/*
1668	 * Binary search for the point to resume from.
1669	 */
1670	uint64_t maxidx = rl->rl_phys->rlp_num_entries - 1;
1671	uint64_t minidx = 0;
1672	while (resume != NULL && maxidx > minidx) {
1673		redact_block_phys_t rbp = { 0 };
1674		ASSERT3U(maxidx, >, minidx);
1675		uint64_t mididx = minidx + ((maxidx - minidx) / 2);
1676		err = dmu_read(mos, rl->rl_object, mididx * sizeof (rbp),
1677		    sizeof (rbp), &rbp, DMU_READ_NO_PREFETCH);
1678		if (err != 0)
1679			break;
1680
1681		int cmp = redact_block_zb_compare(&rbp, resume);
1682
1683		if (cmp == 0) {
1684			minidx = mididx;
1685			break;
1686		} else if (cmp > 0) {
1687			maxidx =
1688			    (mididx == minidx ? minidx : mididx - 1);
1689		} else {
1690			minidx = mididx + 1;
1691		}
1692	}
1693
1694	unsigned int bufsize = SPA_OLD_MAXBLOCKSIZE;
1695	redact_block_phys_t *buf = zio_data_buf_alloc(bufsize);
1696
1697	unsigned int entries_per_buf = bufsize / sizeof (redact_block_phys_t);
1698	uint64_t start_block = minidx / entries_per_buf;
1699	err = dmu_read(mos, rl->rl_object, start_block * bufsize, bufsize, buf,
1700	    DMU_READ_PREFETCH);
1701
1702	for (uint64_t curidx = minidx;
1703	    err == 0 && curidx < rl->rl_phys->rlp_num_entries;
1704	    curidx++) {
1705		/*
1706		 * We read in the redaction list one block at a time.  Once we
1707		 * finish with all the entries in a given block, we read in a
1708		 * new one.  The predictive prefetcher will take care of any
1709		 * prefetching, and this code shouldn't be the bottleneck, so we
1710		 * don't need to do manual prefetching.
1711		 */
1712		if (curidx % entries_per_buf == 0) {
1713			err = dmu_read(mos, rl->rl_object, curidx *
1714			    sizeof (*buf), bufsize, buf,
1715			    DMU_READ_PREFETCH);
1716			if (err != 0)
1717				break;
1718		}
1719		redact_block_phys_t *rb = &buf[curidx % entries_per_buf];
1720		/*
1721		 * If resume is non-null, we should either not send the data, or
1722		 * null out resume so we don't have to keep doing these
1723		 * comparisons.
1724		 */
1725		if (resume != NULL) {
1726			/*
1727			 * It is possible that after the binary search we got
1728			 * a record before the resume point. There's two cases
1729			 * where this can occur. If the record is the last
1730			 * redaction record, and the resume point is after the
1731			 * end of the redacted data, curidx will be the last
1732			 * redaction record. In that case, the loop will end
1733			 * after this iteration. The second case is if the
1734			 * resume point is between two redaction records, the
1735			 * binary search can return either the record before
1736			 * or after the resume point. In that case, the next
1737			 * iteration will be greater than the resume point.
1738			 */
1739			if (redact_block_zb_compare(rb, resume) < 0) {
1740				ASSERT3U(curidx, ==, minidx);
1741				continue;
1742			} else {
1743				/*
1744				 * If the place to resume is in the middle of
1745				 * the range described by this
1746				 * redact_block_phys, then modify the
1747				 * redact_block_phys in memory so we generate
1748				 * the right records.
1749				 */
1750				if (resume->zb_object == rb->rbp_object &&
1751				    resume->zb_blkid > rb->rbp_blkid) {
1752					uint64_t diff = resume->zb_blkid -
1753					    rb->rbp_blkid;
1754					rb->rbp_blkid = resume->zb_blkid;
1755					redact_block_set_count(rb,
1756					    redact_block_get_count(rb) - diff);
1757				}
1758				resume = NULL;
1759			}
1760		}
1761
1762		if (cb(rb, arg) != 0) {
1763			err = EINTR;
1764			break;
1765		}
1766	}
1767
1768	zio_data_buf_free(buf, bufsize);
1769	return (err);
1770}
1771