dmu_traverse.c revision 282995
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2015 Chunwei Chen. All rights reserved.
25 */
26
27#include <sys/zfs_context.h>
28#include <sys/dmu_objset.h>
29#include <sys/dmu_traverse.h>
30#include <sys/dsl_dataset.h>
31#include <sys/dsl_dir.h>
32#include <sys/dsl_pool.h>
33#include <sys/dnode.h>
34#include <sys/spa.h>
35#include <sys/zio.h>
36#include <sys/dmu_impl.h>
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
39#include <sys/callb.h>
40#include <sys/zfeature.h>
41
42int zfs_pd_blks_max = 100;
43
44typedef struct prefetch_data {
45	kmutex_t pd_mtx;
46	kcondvar_t pd_cv;
47	int pd_blks_max;
48	int pd_blks_fetched;
49	int pd_flags;
50	boolean_t pd_cancel;
51	boolean_t pd_exited;
52} prefetch_data_t;
53
54typedef struct traverse_data {
55	spa_t *td_spa;
56	uint64_t td_objset;
57	blkptr_t *td_rootbp;
58	uint64_t td_min_txg;
59	zbookmark_phys_t *td_resume;
60	int td_flags;
61	prefetch_data_t *td_pfd;
62	boolean_t td_paused;
63	uint64_t td_hole_birth_enabled_txg;
64	blkptr_cb_t *td_func;
65	void *td_arg;
66} traverse_data_t;
67
68static int traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
69    uint64_t objset, uint64_t object);
70static void prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *,
71    uint64_t objset, uint64_t object);
72
73static int
74traverse_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
75{
76	traverse_data_t *td = arg;
77	zbookmark_phys_t zb;
78
79	if (BP_IS_HOLE(bp))
80		return (0);
81
82	if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(td->td_spa))
83		return (0);
84
85	SET_BOOKMARK(&zb, td->td_objset, ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
86	    bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
87
88	(void) td->td_func(td->td_spa, zilog, bp, &zb, NULL, td->td_arg);
89
90	return (0);
91}
92
93static int
94traverse_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
95{
96	traverse_data_t *td = arg;
97
98	if (lrc->lrc_txtype == TX_WRITE) {
99		lr_write_t *lr = (lr_write_t *)lrc;
100		blkptr_t *bp = &lr->lr_blkptr;
101		zbookmark_phys_t zb;
102
103		if (BP_IS_HOLE(bp))
104			return (0);
105
106		if (claim_txg == 0 || bp->blk_birth < claim_txg)
107			return (0);
108
109		SET_BOOKMARK(&zb, td->td_objset, lr->lr_foid,
110		    ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
111
112		(void) td->td_func(td->td_spa, zilog, bp, &zb, NULL,
113		    td->td_arg);
114	}
115	return (0);
116}
117
118static void
119traverse_zil(traverse_data_t *td, zil_header_t *zh)
120{
121	uint64_t claim_txg = zh->zh_claim_txg;
122	zilog_t *zilog;
123
124	/*
125	 * We only want to visit blocks that have been claimed but not yet
126	 * replayed; plus, in read-only mode, blocks that are already stable.
127	 */
128	if (claim_txg == 0 && spa_writeable(td->td_spa))
129		return;
130
131	zilog = zil_alloc(spa_get_dsl(td->td_spa)->dp_meta_objset, zh);
132
133	(void) zil_parse(zilog, traverse_zil_block, traverse_zil_record, td,
134	    claim_txg);
135
136	zil_free(zilog);
137}
138
139typedef enum resume_skip {
140	RESUME_SKIP_ALL,
141	RESUME_SKIP_NONE,
142	RESUME_SKIP_CHILDREN
143} resume_skip_t;
144
145/*
146 * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and
147 * the block indicated by zb does not need to be visited at all. Returns
148 * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the
149 * resume point. This indicates that this block should be visited but not its
150 * children (since they must have been visited in a previous traversal).
151 * Otherwise returns RESUME_SKIP_NONE.
152 */
153static resume_skip_t
154resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp,
155    const zbookmark_phys_t *zb)
156{
157	if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume)) {
158		/*
159		 * If we already visited this bp & everything below,
160		 * don't bother doing it again.
161		 */
162		if (zbookmark_is_before(dnp, zb, td->td_resume))
163			return (RESUME_SKIP_ALL);
164
165		/*
166		 * If we found the block we're trying to resume from, zero
167		 * the bookmark out to indicate that we have resumed.
168		 */
169		if (bcmp(zb, td->td_resume, sizeof (*zb)) == 0) {
170			bzero(td->td_resume, sizeof (*zb));
171			if (td->td_flags & TRAVERSE_POST)
172				return (RESUME_SKIP_CHILDREN);
173		}
174	}
175	return (RESUME_SKIP_NONE);
176}
177
178static void
179traverse_prefetch_metadata(traverse_data_t *td,
180    const blkptr_t *bp, const zbookmark_phys_t *zb)
181{
182	arc_flags_t flags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
183
184	if (!(td->td_flags & TRAVERSE_PREFETCH_METADATA))
185		return;
186	/*
187	 * If we are in the process of resuming, don't prefetch, because
188	 * some children will not be needed (and in fact may have already
189	 * been freed).
190	 */
191	if (td->td_resume != NULL && !ZB_IS_ZERO(td->td_resume))
192		return;
193	if (BP_IS_HOLE(bp) || bp->blk_birth <= td->td_min_txg)
194		return;
195	if (BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE)
196		return;
197
198	(void) arc_read(NULL, td->td_spa, bp, NULL, NULL,
199	    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
200}
201
202static boolean_t
203prefetch_needed(prefetch_data_t *pfd, const blkptr_t *bp)
204{
205	ASSERT(pfd->pd_flags & TRAVERSE_PREFETCH_DATA);
206	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) ||
207	    BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG)
208		return (B_FALSE);
209	return (B_TRUE);
210}
211
212static int
213traverse_visitbp(traverse_data_t *td, const dnode_phys_t *dnp,
214    const blkptr_t *bp, const zbookmark_phys_t *zb)
215{
216	zbookmark_phys_t czb;
217	int err = 0;
218	arc_buf_t *buf = NULL;
219	prefetch_data_t *pd = td->td_pfd;
220	boolean_t hard = td->td_flags & TRAVERSE_HARD;
221
222	switch (resume_skip_check(td, dnp, zb)) {
223	case RESUME_SKIP_ALL:
224		return (0);
225	case RESUME_SKIP_CHILDREN:
226		goto post;
227	case RESUME_SKIP_NONE:
228		break;
229	default:
230		ASSERT(0);
231	}
232
233	if (bp->blk_birth == 0) {
234		/*
235		 * Since this block has a birth time of 0 it must be a
236		 * hole created before the SPA_FEATURE_HOLE_BIRTH
237		 * feature was enabled.  If SPA_FEATURE_HOLE_BIRTH
238		 * was enabled before the min_txg for this traveral we
239		 * know the hole must have been created before the
240		 * min_txg for this traveral, so we can skip it. If
241		 * SPA_FEATURE_HOLE_BIRTH was enabled after the min_txg
242		 * for this traveral we cannot tell if the hole was
243		 * created before or after the min_txg for this
244		 * traversal, so we cannot skip it.
245		 */
246		if (td->td_hole_birth_enabled_txg < td->td_min_txg)
247			return (0);
248	} else if (bp->blk_birth <= td->td_min_txg) {
249		return (0);
250	}
251
252	if (pd != NULL && !pd->pd_exited && prefetch_needed(pd, bp)) {
253		mutex_enter(&pd->pd_mtx);
254		ASSERT(pd->pd_blks_fetched >= 0);
255		while (pd->pd_blks_fetched == 0 && !pd->pd_exited)
256			cv_wait(&pd->pd_cv, &pd->pd_mtx);
257		pd->pd_blks_fetched--;
258		cv_broadcast(&pd->pd_cv);
259		mutex_exit(&pd->pd_mtx);
260	}
261
262	if (BP_IS_HOLE(bp)) {
263		err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
264		if (err != 0)
265			goto post;
266		return (0);
267	}
268
269	if (td->td_flags & TRAVERSE_PRE) {
270		err = td->td_func(td->td_spa, NULL, bp, zb, dnp,
271		    td->td_arg);
272		if (err == TRAVERSE_VISIT_NO_CHILDREN)
273			return (0);
274		if (err != 0)
275			goto post;
276	}
277
278	if (BP_GET_LEVEL(bp) > 0) {
279		arc_flags_t flags = ARC_FLAG_WAIT;
280		int i;
281		blkptr_t *cbp;
282		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
283
284		err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
285		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
286		if (err != 0)
287			goto post;
288		cbp = buf->b_data;
289
290		for (i = 0; i < epb; i++) {
291			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
292			    zb->zb_level - 1,
293			    zb->zb_blkid * epb + i);
294			traverse_prefetch_metadata(td, &cbp[i], &czb);
295		}
296
297		/* recursively visitbp() blocks below this */
298		for (i = 0; i < epb; i++) {
299			SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
300			    zb->zb_level - 1,
301			    zb->zb_blkid * epb + i);
302			err = traverse_visitbp(td, dnp, &cbp[i], &czb);
303			if (err != 0)
304				break;
305		}
306	} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
307		arc_flags_t flags = ARC_FLAG_WAIT;
308		int i;
309		int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
310		dnode_phys_t *cdnp;
311
312		err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
313		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
314		if (err != 0)
315			goto post;
316		cdnp = buf->b_data;
317
318		for (i = 0; i < epb; i++) {
319			prefetch_dnode_metadata(td, &cdnp[i], zb->zb_objset,
320			    zb->zb_blkid * epb + i);
321		}
322
323		/* recursively visitbp() blocks below this */
324		for (i = 0; i < epb; i++) {
325			err = traverse_dnode(td, &cdnp[i], zb->zb_objset,
326			    zb->zb_blkid * epb + i);
327			if (err != 0)
328				break;
329		}
330	} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
331		arc_flags_t flags = ARC_FLAG_WAIT;
332		objset_phys_t *osp;
333		dnode_phys_t *mdnp, *gdnp, *udnp;
334
335		err = arc_read(NULL, td->td_spa, bp, arc_getbuf_func, &buf,
336		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
337		if (err != 0)
338			goto post;
339
340		osp = buf->b_data;
341		mdnp = &osp->os_meta_dnode;
342		gdnp = &osp->os_groupused_dnode;
343		udnp = &osp->os_userused_dnode;
344
345		prefetch_dnode_metadata(td, mdnp, zb->zb_objset,
346		    DMU_META_DNODE_OBJECT);
347		if (arc_buf_size(buf) >= sizeof (objset_phys_t)) {
348			prefetch_dnode_metadata(td, gdnp, zb->zb_objset,
349			    DMU_GROUPUSED_OBJECT);
350			prefetch_dnode_metadata(td, udnp, zb->zb_objset,
351			    DMU_USERUSED_OBJECT);
352		}
353
354		err = traverse_dnode(td, mdnp, zb->zb_objset,
355		    DMU_META_DNODE_OBJECT);
356		if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
357			err = traverse_dnode(td, gdnp, zb->zb_objset,
358			    DMU_GROUPUSED_OBJECT);
359		}
360		if (err == 0 && arc_buf_size(buf) >= sizeof (objset_phys_t)) {
361			err = traverse_dnode(td, udnp, zb->zb_objset,
362			    DMU_USERUSED_OBJECT);
363		}
364	}
365
366	if (buf)
367		(void) arc_buf_remove_ref(buf, &buf);
368
369post:
370	if (err == 0 && (td->td_flags & TRAVERSE_POST))
371		err = td->td_func(td->td_spa, NULL, bp, zb, dnp, td->td_arg);
372
373	if (hard && (err == EIO || err == ECKSUM)) {
374		/*
375		 * Ignore this disk error as requested by the HARD flag,
376		 * and continue traversal.
377		 */
378		err = 0;
379	}
380
381	/*
382	 * If we are stopping here, set td_resume.
383	 */
384	if (td->td_resume != NULL && err != 0 && !td->td_paused) {
385		td->td_resume->zb_objset = zb->zb_objset;
386		td->td_resume->zb_object = zb->zb_object;
387		td->td_resume->zb_level = 0;
388		/*
389		 * If we have stopped on an indirect block (e.g. due to
390		 * i/o error), we have not visited anything below it.
391		 * Set the bookmark to the first level-0 block that we need
392		 * to visit.  This way, the resuming code does not need to
393		 * deal with resuming from indirect blocks.
394		 */
395		td->td_resume->zb_blkid = zb->zb_blkid <<
396		    (zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT));
397		td->td_paused = B_TRUE;
398	}
399
400	return (err);
401}
402
403static void
404prefetch_dnode_metadata(traverse_data_t *td, const dnode_phys_t *dnp,
405    uint64_t objset, uint64_t object)
406{
407	int j;
408	zbookmark_phys_t czb;
409
410	for (j = 0; j < dnp->dn_nblkptr; j++) {
411		SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
412		traverse_prefetch_metadata(td, &dnp->dn_blkptr[j], &czb);
413	}
414
415	if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
416		SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
417		traverse_prefetch_metadata(td, &dnp->dn_spill, &czb);
418	}
419}
420
421static int
422traverse_dnode(traverse_data_t *td, const dnode_phys_t *dnp,
423    uint64_t objset, uint64_t object)
424{
425	int j, err = 0;
426	zbookmark_phys_t czb;
427
428	for (j = 0; j < dnp->dn_nblkptr; j++) {
429		SET_BOOKMARK(&czb, objset, object, dnp->dn_nlevels - 1, j);
430		err = traverse_visitbp(td, dnp, &dnp->dn_blkptr[j], &czb);
431		if (err != 0)
432			break;
433	}
434
435	if (err == 0 && dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
436		SET_BOOKMARK(&czb, objset, object, 0, DMU_SPILL_BLKID);
437		err = traverse_visitbp(td, dnp, &dnp->dn_spill, &czb);
438	}
439	return (err);
440}
441
442/* ARGSUSED */
443static int
444traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
445    const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
446{
447	prefetch_data_t *pfd = arg;
448	arc_flags_t aflags = ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
449
450	ASSERT(pfd->pd_blks_fetched >= 0);
451	if (pfd->pd_cancel)
452		return (SET_ERROR(EINTR));
453
454	if (!prefetch_needed(pfd, bp))
455		return (0);
456
457	mutex_enter(&pfd->pd_mtx);
458	while (!pfd->pd_cancel && pfd->pd_blks_fetched >= pfd->pd_blks_max)
459		cv_wait(&pfd->pd_cv, &pfd->pd_mtx);
460	pfd->pd_blks_fetched++;
461	cv_broadcast(&pfd->pd_cv);
462	mutex_exit(&pfd->pd_mtx);
463
464	(void) arc_read(NULL, spa, bp, NULL, NULL, ZIO_PRIORITY_ASYNC_READ,
465	    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, &aflags, zb);
466
467	return (0);
468}
469
470static void
471traverse_prefetch_thread(void *arg)
472{
473	traverse_data_t *td_main = arg;
474	traverse_data_t td = *td_main;
475	zbookmark_phys_t czb;
476
477	td.td_func = traverse_prefetcher;
478	td.td_arg = td_main->td_pfd;
479	td.td_pfd = NULL;
480
481	SET_BOOKMARK(&czb, td.td_objset,
482	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
483	(void) traverse_visitbp(&td, NULL, td.td_rootbp, &czb);
484
485	mutex_enter(&td_main->td_pfd->pd_mtx);
486	td_main->td_pfd->pd_exited = B_TRUE;
487	cv_broadcast(&td_main->td_pfd->pd_cv);
488	mutex_exit(&td_main->td_pfd->pd_mtx);
489}
490
491/*
492 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
493 * in syncing context).
494 */
495static int
496traverse_impl(spa_t *spa, dsl_dataset_t *ds, uint64_t objset, blkptr_t *rootbp,
497    uint64_t txg_start, zbookmark_phys_t *resume, int flags,
498    blkptr_cb_t func, void *arg)
499{
500	traverse_data_t td;
501	prefetch_data_t pd = { 0 };
502	zbookmark_phys_t czb;
503	int err;
504
505	ASSERT(ds == NULL || objset == ds->ds_object);
506	ASSERT(!(flags & TRAVERSE_PRE) || !(flags & TRAVERSE_POST));
507
508	/*
509	 * The data prefetching mechanism (the prefetch thread) is incompatible
510	 * with resuming from a bookmark.
511	 */
512	ASSERT(resume == NULL || !(flags & TRAVERSE_PREFETCH_DATA));
513
514	td.td_spa = spa;
515	td.td_objset = objset;
516	td.td_rootbp = rootbp;
517	td.td_min_txg = txg_start;
518	td.td_resume = resume;
519	td.td_func = func;
520	td.td_arg = arg;
521	td.td_pfd = &pd;
522	td.td_flags = flags;
523	td.td_paused = B_FALSE;
524
525	if (spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
526		VERIFY(spa_feature_enabled_txg(spa,
527		    SPA_FEATURE_HOLE_BIRTH, &td.td_hole_birth_enabled_txg));
528	} else {
529		td.td_hole_birth_enabled_txg = 0;
530	}
531
532	pd.pd_blks_max = zfs_pd_blks_max;
533	pd.pd_flags = flags;
534	mutex_init(&pd.pd_mtx, NULL, MUTEX_DEFAULT, NULL);
535	cv_init(&pd.pd_cv, NULL, CV_DEFAULT, NULL);
536
537	/* See comment on ZIL traversal in dsl_scan_visitds. */
538	if (ds != NULL && !dsl_dataset_is_snapshot(ds) && !BP_IS_HOLE(rootbp)) {
539		arc_flags_t flags = ARC_FLAG_WAIT;
540		objset_phys_t *osp;
541		arc_buf_t *buf;
542
543		err = arc_read(NULL, td.td_spa, rootbp,
544		    arc_getbuf_func, &buf,
545		    ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, NULL);
546		if (err != 0)
547			return (err);
548
549		osp = buf->b_data;
550		traverse_zil(&td, &osp->os_zil_header);
551		(void) arc_buf_remove_ref(buf, &buf);
552	}
553
554	if (!(flags & TRAVERSE_PREFETCH_DATA) ||
555	    0 == taskq_dispatch(system_taskq, traverse_prefetch_thread,
556	    &td, TQ_NOQUEUE))
557		pd.pd_exited = B_TRUE;
558
559	SET_BOOKMARK(&czb, td.td_objset,
560	    ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
561	err = traverse_visitbp(&td, NULL, rootbp, &czb);
562
563	mutex_enter(&pd.pd_mtx);
564	pd.pd_cancel = B_TRUE;
565	cv_broadcast(&pd.pd_cv);
566	while (!pd.pd_exited)
567		cv_wait(&pd.pd_cv, &pd.pd_mtx);
568	mutex_exit(&pd.pd_mtx);
569
570	mutex_destroy(&pd.pd_mtx);
571	cv_destroy(&pd.pd_cv);
572
573	return (err);
574}
575
576/*
577 * NB: dataset must not be changing on-disk (eg, is a snapshot or we are
578 * in syncing context).
579 */
580int
581traverse_dataset(dsl_dataset_t *ds, uint64_t txg_start, int flags,
582    blkptr_cb_t func, void *arg)
583{
584	return (traverse_impl(ds->ds_dir->dd_pool->dp_spa, ds, ds->ds_object,
585	    &dsl_dataset_phys(ds)->ds_bp, txg_start, NULL, flags, func, arg));
586}
587
588int
589traverse_dataset_destroyed(spa_t *spa, blkptr_t *blkptr,
590    uint64_t txg_start, zbookmark_phys_t *resume, int flags,
591    blkptr_cb_t func, void *arg)
592{
593	return (traverse_impl(spa, NULL, ZB_DESTROYED_OBJSET,
594	    blkptr, txg_start, resume, flags, func, arg));
595}
596
597/*
598 * NB: pool must not be changing on-disk (eg, from zdb or sync context).
599 */
600int
601traverse_pool(spa_t *spa, uint64_t txg_start, int flags,
602    blkptr_cb_t func, void *arg)
603{
604	int err;
605	uint64_t obj;
606	dsl_pool_t *dp = spa_get_dsl(spa);
607	objset_t *mos = dp->dp_meta_objset;
608	boolean_t hard = (flags & TRAVERSE_HARD);
609
610	/* visit the MOS */
611	err = traverse_impl(spa, NULL, 0, spa_get_rootblkptr(spa),
612	    txg_start, NULL, flags, func, arg);
613	if (err != 0)
614		return (err);
615
616	/* visit each dataset */
617	for (obj = 1; err == 0;
618	    err = dmu_object_next(mos, &obj, FALSE, txg_start)) {
619		dmu_object_info_t doi;
620
621		err = dmu_object_info(mos, obj, &doi);
622		if (err != 0) {
623			if (hard)
624				continue;
625			break;
626		}
627
628		if (doi.doi_bonus_type == DMU_OT_DSL_DATASET) {
629			dsl_dataset_t *ds;
630			uint64_t txg = txg_start;
631
632			dsl_pool_config_enter(dp, FTAG);
633			err = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
634			dsl_pool_config_exit(dp, FTAG);
635			if (err != 0) {
636				if (hard)
637					continue;
638				break;
639			}
640			if (dsl_dataset_phys(ds)->ds_prev_snap_txg > txg)
641				txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
642			err = traverse_dataset(ds, txg, flags, func, arg);
643			dsl_dataset_rele(ds, FTAG);
644			if (err != 0)
645				break;
646		}
647	}
648	if (err == ESRCH)
649		err = 0;
650	return (err);
651}
652