bpobj.c revision 297112
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Integros [integros.com]
25 */
26
27#include <sys/bpobj.h>
28#include <sys/zfs_context.h>
29#include <sys/refcount.h>
30#include <sys/dsl_pool.h>
31#include <sys/zfeature.h>
32#include <sys/zap.h>
33
34/*
35 * Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
36 */
37uint64_t
38bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
39{
40	spa_t *spa = dmu_objset_spa(os);
41	dsl_pool_t *dp = dmu_objset_pool(os);
42
43	if (spa_feature_is_enabled(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
44		if (!spa_feature_is_active(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
45			ASSERT0(dp->dp_empty_bpobj);
46			dp->dp_empty_bpobj =
47			    bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx);
48			VERIFY(zap_add(os,
49			    DMU_POOL_DIRECTORY_OBJECT,
50			    DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
51			    &dp->dp_empty_bpobj, tx) == 0);
52		}
53		spa_feature_incr(spa, SPA_FEATURE_EMPTY_BPOBJ, tx);
54		ASSERT(dp->dp_empty_bpobj != 0);
55		return (dp->dp_empty_bpobj);
56	} else {
57		return (bpobj_alloc(os, blocksize, tx));
58	}
59}
60
61void
62bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
63{
64	dsl_pool_t *dp = dmu_objset_pool(os);
65
66	spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
67	if (!spa_feature_is_active(dmu_objset_spa(os),
68	    SPA_FEATURE_EMPTY_BPOBJ)) {
69		VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
70		    DMU_POOL_DIRECTORY_OBJECT,
71		    DMU_POOL_EMPTY_BPOBJ, tx));
72		VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
73		dp->dp_empty_bpobj = 0;
74	}
75}
76
77uint64_t
78bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
79{
80	int size;
81
82	if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
83		size = BPOBJ_SIZE_V0;
84	else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
85		size = BPOBJ_SIZE_V1;
86	else
87		size = sizeof (bpobj_phys_t);
88
89	return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
90	    DMU_OT_BPOBJ_HDR, size, tx));
91}
92
93void
94bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
95{
96	int64_t i;
97	bpobj_t bpo;
98	dmu_object_info_t doi;
99	int epb;
100	dmu_buf_t *dbuf = NULL;
101
102	ASSERT(obj != dmu_objset_pool(os)->dp_empty_bpobj);
103	VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
104
105	mutex_enter(&bpo.bpo_lock);
106
107	if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
108		goto out;
109
110	VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
111	epb = doi.doi_data_block_size / sizeof (uint64_t);
112
113	for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
114		uint64_t *objarray;
115		uint64_t offset, blkoff;
116
117		offset = i * sizeof (uint64_t);
118		blkoff = P2PHASE(i, epb);
119
120		if (dbuf == NULL || dbuf->db_offset > offset) {
121			if (dbuf)
122				dmu_buf_rele(dbuf, FTAG);
123			VERIFY3U(0, ==, dmu_buf_hold(os,
124			    bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
125		}
126
127		ASSERT3U(offset, >=, dbuf->db_offset);
128		ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
129
130		objarray = dbuf->db_data;
131		bpobj_free(os, objarray[blkoff], tx);
132	}
133	if (dbuf) {
134		dmu_buf_rele(dbuf, FTAG);
135		dbuf = NULL;
136	}
137	VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
138
139out:
140	mutex_exit(&bpo.bpo_lock);
141	bpobj_close(&bpo);
142
143	VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
144}
145
146int
147bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
148{
149	dmu_object_info_t doi;
150	int err;
151
152	err = dmu_object_info(os, object, &doi);
153	if (err)
154		return (err);
155
156	bzero(bpo, sizeof (*bpo));
157	mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
158
159	ASSERT(bpo->bpo_dbuf == NULL);
160	ASSERT(bpo->bpo_phys == NULL);
161	ASSERT(object != 0);
162	ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
163	ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
164
165	err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
166	if (err)
167		return (err);
168
169	bpo->bpo_os = os;
170	bpo->bpo_object = object;
171	bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
172	bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
173	bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
174	bpo->bpo_phys = bpo->bpo_dbuf->db_data;
175	return (0);
176}
177
178void
179bpobj_close(bpobj_t *bpo)
180{
181	/* Lame workaround for closing a bpobj that was never opened. */
182	if (bpo->bpo_object == 0)
183		return;
184
185	dmu_buf_rele(bpo->bpo_dbuf, bpo);
186	if (bpo->bpo_cached_dbuf != NULL)
187		dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
188	bpo->bpo_dbuf = NULL;
189	bpo->bpo_phys = NULL;
190	bpo->bpo_cached_dbuf = NULL;
191	bpo->bpo_object = 0;
192
193	mutex_destroy(&bpo->bpo_lock);
194}
195
196static boolean_t
197bpobj_hasentries(bpobj_t *bpo)
198{
199	return (bpo->bpo_phys->bpo_num_blkptrs != 0 ||
200	    (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_num_subobjs != 0));
201}
202
203static int
204bpobj_iterate_impl(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx,
205    boolean_t free)
206{
207	dmu_object_info_t doi;
208	int epb;
209	int64_t i;
210	int err = 0;
211	dmu_buf_t *dbuf = NULL;
212
213	mutex_enter(&bpo->bpo_lock);
214
215	if (free)
216		dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
217
218	for (i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= 0; i--) {
219		blkptr_t *bparray;
220		blkptr_t *bp;
221		uint64_t offset, blkoff;
222
223		offset = i * sizeof (blkptr_t);
224		blkoff = P2PHASE(i, bpo->bpo_epb);
225
226		if (dbuf == NULL || dbuf->db_offset > offset) {
227			if (dbuf)
228				dmu_buf_rele(dbuf, FTAG);
229			err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object, offset,
230			    FTAG, &dbuf, 0);
231			if (err)
232				break;
233		}
234
235		ASSERT3U(offset, >=, dbuf->db_offset);
236		ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
237
238		bparray = dbuf->db_data;
239		bp = &bparray[blkoff];
240		err = func(arg, bp, tx);
241		if (err)
242			break;
243		if (free) {
244			bpo->bpo_phys->bpo_bytes -=
245			    bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
246			ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
247			if (bpo->bpo_havecomp) {
248				bpo->bpo_phys->bpo_comp -= BP_GET_PSIZE(bp);
249				bpo->bpo_phys->bpo_uncomp -= BP_GET_UCSIZE(bp);
250			}
251			bpo->bpo_phys->bpo_num_blkptrs--;
252			ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
253		}
254	}
255	if (dbuf) {
256		dmu_buf_rele(dbuf, FTAG);
257		dbuf = NULL;
258	}
259	if (free) {
260		VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_object,
261		    (i + 1) * sizeof (blkptr_t), -1ULL, tx));
262	}
263	if (err || !bpo->bpo_havesubobj || bpo->bpo_phys->bpo_subobjs == 0)
264		goto out;
265
266	ASSERT(bpo->bpo_havecomp);
267	err = dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi);
268	if (err) {
269		mutex_exit(&bpo->bpo_lock);
270		return (err);
271	}
272	ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
273	epb = doi.doi_data_block_size / sizeof (uint64_t);
274
275	for (i = bpo->bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
276		uint64_t *objarray;
277		uint64_t offset, blkoff;
278		bpobj_t sublist;
279		uint64_t used_before, comp_before, uncomp_before;
280		uint64_t used_after, comp_after, uncomp_after;
281
282		offset = i * sizeof (uint64_t);
283		blkoff = P2PHASE(i, epb);
284
285		if (dbuf == NULL || dbuf->db_offset > offset) {
286			if (dbuf)
287				dmu_buf_rele(dbuf, FTAG);
288			err = dmu_buf_hold(bpo->bpo_os,
289			    bpo->bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0);
290			if (err)
291				break;
292		}
293
294		ASSERT3U(offset, >=, dbuf->db_offset);
295		ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
296
297		objarray = dbuf->db_data;
298		err = bpobj_open(&sublist, bpo->bpo_os, objarray[blkoff]);
299		if (err)
300			break;
301		if (free) {
302			err = bpobj_space(&sublist,
303			    &used_before, &comp_before, &uncomp_before);
304			if (err != 0) {
305				bpobj_close(&sublist);
306				break;
307			}
308		}
309		err = bpobj_iterate_impl(&sublist, func, arg, tx, free);
310		if (free) {
311			VERIFY3U(0, ==, bpobj_space(&sublist,
312			    &used_after, &comp_after, &uncomp_after));
313			bpo->bpo_phys->bpo_bytes -= used_before - used_after;
314			ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
315			bpo->bpo_phys->bpo_comp -= comp_before - comp_after;
316			bpo->bpo_phys->bpo_uncomp -=
317			    uncomp_before - uncomp_after;
318		}
319
320		bpobj_close(&sublist);
321		if (err)
322			break;
323		if (free) {
324			err = dmu_object_free(bpo->bpo_os,
325			    objarray[blkoff], tx);
326			if (err)
327				break;
328			bpo->bpo_phys->bpo_num_subobjs--;
329			ASSERT3S(bpo->bpo_phys->bpo_num_subobjs, >=, 0);
330		}
331	}
332	if (dbuf) {
333		dmu_buf_rele(dbuf, FTAG);
334		dbuf = NULL;
335	}
336	if (free) {
337		VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os,
338		    bpo->bpo_phys->bpo_subobjs,
339		    (i + 1) * sizeof (uint64_t), -1ULL, tx));
340	}
341
342out:
343	/* If there are no entries, there should be no bytes. */
344	if (!bpobj_hasentries(bpo)) {
345		ASSERT0(bpo->bpo_phys->bpo_bytes);
346		ASSERT0(bpo->bpo_phys->bpo_comp);
347		ASSERT0(bpo->bpo_phys->bpo_uncomp);
348	}
349
350	mutex_exit(&bpo->bpo_lock);
351	return (err);
352}
353
354/*
355 * Iterate and remove the entries.  If func returns nonzero, iteration
356 * will stop and that entry will not be removed.
357 */
358int
359bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
360{
361	return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE));
362}
363
364/*
365 * Iterate the entries.  If func returns nonzero, iteration will stop.
366 */
367int
368bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
369{
370	return (bpobj_iterate_impl(bpo, func, arg, tx, B_FALSE));
371}
372
373void
374bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
375{
376	bpobj_t subbpo;
377	uint64_t used, comp, uncomp, subsubobjs;
378
379	ASSERT(bpo->bpo_havesubobj);
380	ASSERT(bpo->bpo_havecomp);
381	ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
382
383	if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj) {
384		bpobj_decr_empty(bpo->bpo_os, tx);
385		return;
386	}
387
388	VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
389	VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
390
391	if (!bpobj_hasentries(&subbpo)) {
392		/* No point in having an empty subobj. */
393		bpobj_close(&subbpo);
394		bpobj_free(bpo->bpo_os, subobj, tx);
395		return;
396	}
397
398	dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
399	if (bpo->bpo_phys->bpo_subobjs == 0) {
400		bpo->bpo_phys->bpo_subobjs = dmu_object_alloc(bpo->bpo_os,
401		    DMU_OT_BPOBJ_SUBOBJ, SPA_OLD_MAXBLOCKSIZE,
402		    DMU_OT_NONE, 0, tx);
403	}
404
405	dmu_object_info_t doi;
406	ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi));
407	ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
408
409	mutex_enter(&bpo->bpo_lock);
410	dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
411	    bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
412	    sizeof (subobj), &subobj, tx);
413	bpo->bpo_phys->bpo_num_subobjs++;
414
415	/*
416	 * If subobj has only one block of subobjs, then move subobj's
417	 * subobjs to bpo's subobj list directly.  This reduces
418	 * recursion in bpobj_iterate due to nested subobjs.
419	 */
420	subsubobjs = subbpo.bpo_phys->bpo_subobjs;
421	if (subsubobjs != 0) {
422		dmu_object_info_t doi;
423
424		VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
425		if (doi.doi_max_offset == doi.doi_data_block_size) {
426			dmu_buf_t *subdb;
427			uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
428
429			VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
430			    0, FTAG, &subdb, 0));
431			/*
432			 * Make sure that we are not asking dmu_write()
433			 * to write more data than we have in our buffer.
434			 */
435			VERIFY3U(subdb->db_size, >=,
436			    numsubsub * sizeof (subobj));
437			dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
438			    bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
439			    numsubsub * sizeof (subobj), subdb->db_data, tx);
440			dmu_buf_rele(subdb, FTAG);
441			bpo->bpo_phys->bpo_num_subobjs += numsubsub;
442
443			dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
444			subbpo.bpo_phys->bpo_subobjs = 0;
445			VERIFY3U(0, ==, dmu_object_free(bpo->bpo_os,
446			    subsubobjs, tx));
447		}
448	}
449	bpo->bpo_phys->bpo_bytes += used;
450	bpo->bpo_phys->bpo_comp += comp;
451	bpo->bpo_phys->bpo_uncomp += uncomp;
452	mutex_exit(&bpo->bpo_lock);
453
454	bpobj_close(&subbpo);
455}
456
457void
458bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, dmu_tx_t *tx)
459{
460	blkptr_t stored_bp = *bp;
461	uint64_t offset;
462	int blkoff;
463	blkptr_t *bparray;
464
465	ASSERT(!BP_IS_HOLE(bp));
466	ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
467
468	if (BP_IS_EMBEDDED(bp)) {
469		/*
470		 * The bpobj will compress better without the payload.
471		 *
472		 * Note that we store EMBEDDED bp's because they have an
473		 * uncompressed size, which must be accounted for.  An
474		 * alternative would be to add their size to bpo_uncomp
475		 * without storing the bp, but that would create additional
476		 * complications: bpo_uncomp would be inconsistent with the
477		 * set of BP's stored, and bpobj_iterate() wouldn't visit
478		 * all the space accounted for in the bpobj.
479		 */
480		bzero(&stored_bp, sizeof (stored_bp));
481		stored_bp.blk_prop = bp->blk_prop;
482		stored_bp.blk_birth = bp->blk_birth;
483	} else if (!BP_GET_DEDUP(bp)) {
484		/* The bpobj will compress better without the checksum */
485		bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
486	}
487
488	/* We never need the fill count. */
489	stored_bp.blk_fill = 0;
490
491	mutex_enter(&bpo->bpo_lock);
492
493	offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
494	blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
495
496	if (bpo->bpo_cached_dbuf == NULL ||
497	    offset < bpo->bpo_cached_dbuf->db_offset ||
498	    offset >= bpo->bpo_cached_dbuf->db_offset +
499	    bpo->bpo_cached_dbuf->db_size) {
500		if (bpo->bpo_cached_dbuf)
501			dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
502		VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
503		    offset, bpo, &bpo->bpo_cached_dbuf, 0));
504	}
505
506	dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
507	bparray = bpo->bpo_cached_dbuf->db_data;
508	bparray[blkoff] = stored_bp;
509
510	dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
511	bpo->bpo_phys->bpo_num_blkptrs++;
512	bpo->bpo_phys->bpo_bytes +=
513	    bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
514	if (bpo->bpo_havecomp) {
515		bpo->bpo_phys->bpo_comp += BP_GET_PSIZE(bp);
516		bpo->bpo_phys->bpo_uncomp += BP_GET_UCSIZE(bp);
517	}
518	mutex_exit(&bpo->bpo_lock);
519}
520
521struct space_range_arg {
522	spa_t *spa;
523	uint64_t mintxg;
524	uint64_t maxtxg;
525	uint64_t used;
526	uint64_t comp;
527	uint64_t uncomp;
528};
529
530/* ARGSUSED */
531static int
532space_range_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
533{
534	struct space_range_arg *sra = arg;
535
536	if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
537		if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
538			sra->used += bp_get_dsize_sync(sra->spa, bp);
539		else
540			sra->used += bp_get_dsize(sra->spa, bp);
541		sra->comp += BP_GET_PSIZE(bp);
542		sra->uncomp += BP_GET_UCSIZE(bp);
543	}
544	return (0);
545}
546
547int
548bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
549{
550	mutex_enter(&bpo->bpo_lock);
551
552	*usedp = bpo->bpo_phys->bpo_bytes;
553	if (bpo->bpo_havecomp) {
554		*compp = bpo->bpo_phys->bpo_comp;
555		*uncompp = bpo->bpo_phys->bpo_uncomp;
556		mutex_exit(&bpo->bpo_lock);
557		return (0);
558	} else {
559		mutex_exit(&bpo->bpo_lock);
560		return (bpobj_space_range(bpo, 0, UINT64_MAX,
561		    usedp, compp, uncompp));
562	}
563}
564
565/*
566 * Return the amount of space in the bpobj which is:
567 * mintxg < blk_birth <= maxtxg
568 */
569int
570bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
571    uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
572{
573	struct space_range_arg sra = { 0 };
574	int err;
575
576	/*
577	 * As an optimization, if they want the whole txg range, just
578	 * get bpo_bytes rather than iterating over the bps.
579	 */
580	if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
581		return (bpobj_space(bpo, usedp, compp, uncompp));
582
583	sra.spa = dmu_objset_spa(bpo->bpo_os);
584	sra.mintxg = mintxg;
585	sra.maxtxg = maxtxg;
586
587	err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
588	*usedp = sra.used;
589	*compp = sra.comp;
590	*uncompp = sra.uncomp;
591	return (err);
592}
593