bpobj.c revision 324010
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
25 * Copyright (c) 2017 Datto Inc.
26 */
27
28#include <sys/bpobj.h>
29#include <sys/zfs_context.h>
30#include <sys/refcount.h>
31#include <sys/dsl_pool.h>
32#include <sys/zfeature.h>
33#include <sys/zap.h>
34
35/*
36 * Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
37 */
38uint64_t
39bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
40{
41	spa_t *spa = dmu_objset_spa(os);
42	dsl_pool_t *dp = dmu_objset_pool(os);
43
44	if (spa_feature_is_enabled(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
45		if (!spa_feature_is_active(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
46			ASSERT0(dp->dp_empty_bpobj);
47			dp->dp_empty_bpobj =
48			    bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx);
49			VERIFY(zap_add(os,
50			    DMU_POOL_DIRECTORY_OBJECT,
51			    DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
52			    &dp->dp_empty_bpobj, tx) == 0);
53		}
54		spa_feature_incr(spa, SPA_FEATURE_EMPTY_BPOBJ, tx);
55		ASSERT(dp->dp_empty_bpobj != 0);
56		return (dp->dp_empty_bpobj);
57	} else {
58		return (bpobj_alloc(os, blocksize, tx));
59	}
60}
61
62void
63bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
64{
65	dsl_pool_t *dp = dmu_objset_pool(os);
66
67	spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
68	if (!spa_feature_is_active(dmu_objset_spa(os),
69	    SPA_FEATURE_EMPTY_BPOBJ)) {
70		VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
71		    DMU_POOL_DIRECTORY_OBJECT,
72		    DMU_POOL_EMPTY_BPOBJ, tx));
73		VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
74		dp->dp_empty_bpobj = 0;
75	}
76}
77
78uint64_t
79bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
80{
81	int size;
82
83	if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
84		size = BPOBJ_SIZE_V0;
85	else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
86		size = BPOBJ_SIZE_V1;
87	else
88		size = sizeof (bpobj_phys_t);
89
90	return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
91	    DMU_OT_BPOBJ_HDR, size, tx));
92}
93
94void
95bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
96{
97	int64_t i;
98	bpobj_t bpo;
99	dmu_object_info_t doi;
100	int epb;
101	dmu_buf_t *dbuf = NULL;
102
103	ASSERT(obj != dmu_objset_pool(os)->dp_empty_bpobj);
104	VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
105
106	mutex_enter(&bpo.bpo_lock);
107
108	if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
109		goto out;
110
111	VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
112	epb = doi.doi_data_block_size / sizeof (uint64_t);
113
114	for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
115		uint64_t *objarray;
116		uint64_t offset, blkoff;
117
118		offset = i * sizeof (uint64_t);
119		blkoff = P2PHASE(i, epb);
120
121		if (dbuf == NULL || dbuf->db_offset > offset) {
122			if (dbuf)
123				dmu_buf_rele(dbuf, FTAG);
124			VERIFY3U(0, ==, dmu_buf_hold(os,
125			    bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
126		}
127
128		ASSERT3U(offset, >=, dbuf->db_offset);
129		ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
130
131		objarray = dbuf->db_data;
132		bpobj_free(os, objarray[blkoff], tx);
133	}
134	if (dbuf) {
135		dmu_buf_rele(dbuf, FTAG);
136		dbuf = NULL;
137	}
138	VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
139
140out:
141	mutex_exit(&bpo.bpo_lock);
142	bpobj_close(&bpo);
143
144	VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
145}
146
147int
148bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
149{
150	dmu_object_info_t doi;
151	int err;
152
153	err = dmu_object_info(os, object, &doi);
154	if (err)
155		return (err);
156
157	bzero(bpo, sizeof (*bpo));
158	mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
159
160	ASSERT(bpo->bpo_dbuf == NULL);
161	ASSERT(bpo->bpo_phys == NULL);
162	ASSERT(object != 0);
163	ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
164	ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
165
166	err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
167	if (err)
168		return (err);
169
170	bpo->bpo_os = os;
171	bpo->bpo_object = object;
172	bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
173	bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
174	bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
175	bpo->bpo_phys = bpo->bpo_dbuf->db_data;
176	return (0);
177}
178
179void
180bpobj_close(bpobj_t *bpo)
181{
182	/* Lame workaround for closing a bpobj that was never opened. */
183	if (bpo->bpo_object == 0)
184		return;
185
186	dmu_buf_rele(bpo->bpo_dbuf, bpo);
187	if (bpo->bpo_cached_dbuf != NULL)
188		dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
189	bpo->bpo_dbuf = NULL;
190	bpo->bpo_phys = NULL;
191	bpo->bpo_cached_dbuf = NULL;
192	bpo->bpo_object = 0;
193
194	mutex_destroy(&bpo->bpo_lock);
195}
196
197static boolean_t
198bpobj_hasentries(bpobj_t *bpo)
199{
200	return (bpo->bpo_phys->bpo_num_blkptrs != 0 ||
201	    (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_num_subobjs != 0));
202}
203
204static int
205bpobj_iterate_impl(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx,
206    boolean_t free)
207{
208	dmu_object_info_t doi;
209	int epb;
210	int64_t i;
211	int err = 0;
212	dmu_buf_t *dbuf = NULL;
213
214	mutex_enter(&bpo->bpo_lock);
215
216	if (!bpobj_hasentries(bpo))
217		goto out;
218
219	if (free)
220		dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
221
222	for (i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= 0; i--) {
223		blkptr_t *bparray;
224		blkptr_t *bp;
225		uint64_t offset, blkoff;
226
227		offset = i * sizeof (blkptr_t);
228		blkoff = P2PHASE(i, bpo->bpo_epb);
229
230		if (dbuf == NULL || dbuf->db_offset > offset) {
231			if (dbuf)
232				dmu_buf_rele(dbuf, FTAG);
233			err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object, offset,
234			    FTAG, &dbuf, 0);
235			if (err)
236				break;
237		}
238
239		ASSERT3U(offset, >=, dbuf->db_offset);
240		ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
241
242		bparray = dbuf->db_data;
243		bp = &bparray[blkoff];
244		err = func(arg, bp, tx);
245		if (err)
246			break;
247		if (free) {
248			bpo->bpo_phys->bpo_bytes -=
249			    bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
250			ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
251			if (bpo->bpo_havecomp) {
252				bpo->bpo_phys->bpo_comp -= BP_GET_PSIZE(bp);
253				bpo->bpo_phys->bpo_uncomp -= BP_GET_UCSIZE(bp);
254			}
255			bpo->bpo_phys->bpo_num_blkptrs--;
256			ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
257		}
258	}
259	if (dbuf) {
260		dmu_buf_rele(dbuf, FTAG);
261		dbuf = NULL;
262	}
263	if (free) {
264		VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_object,
265		    (i + 1) * sizeof (blkptr_t), -1ULL, tx));
266	}
267	if (err || !bpo->bpo_havesubobj || bpo->bpo_phys->bpo_subobjs == 0)
268		goto out;
269
270	ASSERT(bpo->bpo_havecomp);
271	err = dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi);
272	if (err) {
273		mutex_exit(&bpo->bpo_lock);
274		return (err);
275	}
276	ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
277	epb = doi.doi_data_block_size / sizeof (uint64_t);
278
279	for (i = bpo->bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
280		uint64_t *objarray;
281		uint64_t offset, blkoff;
282		bpobj_t sublist;
283		uint64_t used_before, comp_before, uncomp_before;
284		uint64_t used_after, comp_after, uncomp_after;
285
286		offset = i * sizeof (uint64_t);
287		blkoff = P2PHASE(i, epb);
288
289		if (dbuf == NULL || dbuf->db_offset > offset) {
290			if (dbuf)
291				dmu_buf_rele(dbuf, FTAG);
292			err = dmu_buf_hold(bpo->bpo_os,
293			    bpo->bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0);
294			if (err)
295				break;
296		}
297
298		ASSERT3U(offset, >=, dbuf->db_offset);
299		ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
300
301		objarray = dbuf->db_data;
302		err = bpobj_open(&sublist, bpo->bpo_os, objarray[blkoff]);
303		if (err)
304			break;
305		if (free) {
306			err = bpobj_space(&sublist,
307			    &used_before, &comp_before, &uncomp_before);
308			if (err != 0) {
309				bpobj_close(&sublist);
310				break;
311			}
312		}
313		err = bpobj_iterate_impl(&sublist, func, arg, tx, free);
314		if (free) {
315			VERIFY3U(0, ==, bpobj_space(&sublist,
316			    &used_after, &comp_after, &uncomp_after));
317			bpo->bpo_phys->bpo_bytes -= used_before - used_after;
318			ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
319			bpo->bpo_phys->bpo_comp -= comp_before - comp_after;
320			bpo->bpo_phys->bpo_uncomp -=
321			    uncomp_before - uncomp_after;
322		}
323
324		bpobj_close(&sublist);
325		if (err)
326			break;
327		if (free) {
328			err = dmu_object_free(bpo->bpo_os,
329			    objarray[blkoff], tx);
330			if (err)
331				break;
332			bpo->bpo_phys->bpo_num_subobjs--;
333			ASSERT3S(bpo->bpo_phys->bpo_num_subobjs, >=, 0);
334		}
335	}
336	if (dbuf) {
337		dmu_buf_rele(dbuf, FTAG);
338		dbuf = NULL;
339	}
340	if (free) {
341		VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os,
342		    bpo->bpo_phys->bpo_subobjs,
343		    (i + 1) * sizeof (uint64_t), -1ULL, tx));
344	}
345
346out:
347	/* If there are no entries, there should be no bytes. */
348	if (!bpobj_hasentries(bpo)) {
349		ASSERT0(bpo->bpo_phys->bpo_bytes);
350		ASSERT0(bpo->bpo_phys->bpo_comp);
351		ASSERT0(bpo->bpo_phys->bpo_uncomp);
352	}
353
354	mutex_exit(&bpo->bpo_lock);
355	return (err);
356}
357
358/*
359 * Iterate and remove the entries.  If func returns nonzero, iteration
360 * will stop and that entry will not be removed.
361 */
362int
363bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
364{
365	return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE));
366}
367
368/*
369 * Iterate the entries.  If func returns nonzero, iteration will stop.
370 */
371int
372bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
373{
374	return (bpobj_iterate_impl(bpo, func, arg, tx, B_FALSE));
375}
376
377void
378bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
379{
380	bpobj_t subbpo;
381	uint64_t used, comp, uncomp, subsubobjs;
382
383	ASSERT(bpo->bpo_havesubobj);
384	ASSERT(bpo->bpo_havecomp);
385	ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
386
387	if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj) {
388		bpobj_decr_empty(bpo->bpo_os, tx);
389		return;
390	}
391
392	VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
393	VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
394
395	if (!bpobj_hasentries(&subbpo)) {
396		/* No point in having an empty subobj. */
397		bpobj_close(&subbpo);
398		bpobj_free(bpo->bpo_os, subobj, tx);
399		return;
400	}
401
402	mutex_enter(&bpo->bpo_lock);
403	dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
404	if (bpo->bpo_phys->bpo_subobjs == 0) {
405		bpo->bpo_phys->bpo_subobjs = dmu_object_alloc(bpo->bpo_os,
406		    DMU_OT_BPOBJ_SUBOBJ, SPA_OLD_MAXBLOCKSIZE,
407		    DMU_OT_NONE, 0, tx);
408	}
409
410	dmu_object_info_t doi;
411	ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi));
412	ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
413
414	dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
415	    bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
416	    sizeof (subobj), &subobj, tx);
417	bpo->bpo_phys->bpo_num_subobjs++;
418
419	/*
420	 * If subobj has only one block of subobjs, then move subobj's
421	 * subobjs to bpo's subobj list directly.  This reduces
422	 * recursion in bpobj_iterate due to nested subobjs.
423	 */
424	subsubobjs = subbpo.bpo_phys->bpo_subobjs;
425	if (subsubobjs != 0) {
426		dmu_object_info_t doi;
427
428		VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
429		if (doi.doi_max_offset == doi.doi_data_block_size) {
430			dmu_buf_t *subdb;
431			uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
432
433			VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
434			    0, FTAG, &subdb, 0));
435			/*
436			 * Make sure that we are not asking dmu_write()
437			 * to write more data than we have in our buffer.
438			 */
439			VERIFY3U(subdb->db_size, >=,
440			    numsubsub * sizeof (subobj));
441			dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
442			    bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
443			    numsubsub * sizeof (subobj), subdb->db_data, tx);
444			dmu_buf_rele(subdb, FTAG);
445			bpo->bpo_phys->bpo_num_subobjs += numsubsub;
446
447			dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
448			subbpo.bpo_phys->bpo_subobjs = 0;
449			VERIFY3U(0, ==, dmu_object_free(bpo->bpo_os,
450			    subsubobjs, tx));
451		}
452	}
453	bpo->bpo_phys->bpo_bytes += used;
454	bpo->bpo_phys->bpo_comp += comp;
455	bpo->bpo_phys->bpo_uncomp += uncomp;
456	mutex_exit(&bpo->bpo_lock);
457
458	bpobj_close(&subbpo);
459}
460
461void
462bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, dmu_tx_t *tx)
463{
464	blkptr_t stored_bp = *bp;
465	uint64_t offset;
466	int blkoff;
467	blkptr_t *bparray;
468
469	ASSERT(!BP_IS_HOLE(bp));
470	ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
471
472	if (BP_IS_EMBEDDED(bp)) {
473		/*
474		 * The bpobj will compress better without the payload.
475		 *
476		 * Note that we store EMBEDDED bp's because they have an
477		 * uncompressed size, which must be accounted for.  An
478		 * alternative would be to add their size to bpo_uncomp
479		 * without storing the bp, but that would create additional
480		 * complications: bpo_uncomp would be inconsistent with the
481		 * set of BP's stored, and bpobj_iterate() wouldn't visit
482		 * all the space accounted for in the bpobj.
483		 */
484		bzero(&stored_bp, sizeof (stored_bp));
485		stored_bp.blk_prop = bp->blk_prop;
486		stored_bp.blk_birth = bp->blk_birth;
487	} else if (!BP_GET_DEDUP(bp)) {
488		/* The bpobj will compress better without the checksum */
489		bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
490	}
491
492	/* We never need the fill count. */
493	stored_bp.blk_fill = 0;
494
495	mutex_enter(&bpo->bpo_lock);
496
497	offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
498	blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
499
500	if (bpo->bpo_cached_dbuf == NULL ||
501	    offset < bpo->bpo_cached_dbuf->db_offset ||
502	    offset >= bpo->bpo_cached_dbuf->db_offset +
503	    bpo->bpo_cached_dbuf->db_size) {
504		if (bpo->bpo_cached_dbuf)
505			dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
506		VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
507		    offset, bpo, &bpo->bpo_cached_dbuf, 0));
508	}
509
510	dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
511	bparray = bpo->bpo_cached_dbuf->db_data;
512	bparray[blkoff] = stored_bp;
513
514	dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
515	bpo->bpo_phys->bpo_num_blkptrs++;
516	bpo->bpo_phys->bpo_bytes +=
517	    bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
518	if (bpo->bpo_havecomp) {
519		bpo->bpo_phys->bpo_comp += BP_GET_PSIZE(bp);
520		bpo->bpo_phys->bpo_uncomp += BP_GET_UCSIZE(bp);
521	}
522	mutex_exit(&bpo->bpo_lock);
523}
524
525struct space_range_arg {
526	spa_t *spa;
527	uint64_t mintxg;
528	uint64_t maxtxg;
529	uint64_t used;
530	uint64_t comp;
531	uint64_t uncomp;
532};
533
534/* ARGSUSED */
535static int
536space_range_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
537{
538	struct space_range_arg *sra = arg;
539
540	if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
541		if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
542			sra->used += bp_get_dsize_sync(sra->spa, bp);
543		else
544			sra->used += bp_get_dsize(sra->spa, bp);
545		sra->comp += BP_GET_PSIZE(bp);
546		sra->uncomp += BP_GET_UCSIZE(bp);
547	}
548	return (0);
549}
550
551int
552bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
553{
554	mutex_enter(&bpo->bpo_lock);
555
556	*usedp = bpo->bpo_phys->bpo_bytes;
557	if (bpo->bpo_havecomp) {
558		*compp = bpo->bpo_phys->bpo_comp;
559		*uncompp = bpo->bpo_phys->bpo_uncomp;
560		mutex_exit(&bpo->bpo_lock);
561		return (0);
562	} else {
563		mutex_exit(&bpo->bpo_lock);
564		return (bpobj_space_range(bpo, 0, UINT64_MAX,
565		    usedp, compp, uncompp));
566	}
567}
568
569/*
570 * Return the amount of space in the bpobj which is:
571 * mintxg < blk_birth <= maxtxg
572 */
573int
574bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
575    uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
576{
577	struct space_range_arg sra = { 0 };
578	int err;
579
580	/*
581	 * As an optimization, if they want the whole txg range, just
582	 * get bpo_bytes rather than iterating over the bps.
583	 */
584	if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
585		return (bpobj_space(bpo, usedp, compp, uncompp));
586
587	sra.spa = dmu_objset_spa(bpo->bpo_os);
588	sra.mintxg = mintxg;
589	sra.maxtxg = maxtxg;
590
591	err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
592	*usedp = sra.used;
593	*compp = sra.comp;
594	*uncompp = sra.uncomp;
595	return (err);
596}
597