space_map.c revision 273341
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25/*
26 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/spa.h>
31#include <sys/dmu.h>
32#include <sys/dmu_tx.h>
33#include <sys/dnode.h>
34#include <sys/dsl_pool.h>
35#include <sys/zio.h>
36#include <sys/space_map.h>
37#include <sys/refcount.h>
38#include <sys/zfeature.h>
39
40/*
41 * The data for a given space map can be kept on blocks of any size.
42 * Larger blocks entail fewer i/o operations, but they also cause the
43 * DMU to keep more data in-core, and also to waste more i/o bandwidth
44 * when only a few blocks have changed since the last transaction group.
45 */
46int space_map_blksz = (1 << 12);
47
48/*
49 * Load the space map disk into the specified range tree. Segments of maptype
50 * are added to the range tree, other segment types are removed.
51 *
52 * Note: space_map_load() will drop sm_lock across dmu_read() calls.
53 * The caller must be OK with this.
54 */
55int
56space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
57{
58	uint64_t *entry, *entry_map, *entry_map_end;
59	uint64_t bufsize, size, offset, end, space;
60	int error = 0;
61
62	ASSERT(MUTEX_HELD(sm->sm_lock));
63
64	end = space_map_length(sm);
65	space = space_map_allocated(sm);
66
67	VERIFY0(range_tree_space(rt));
68
69	if (maptype == SM_FREE) {
70		range_tree_add(rt, sm->sm_start, sm->sm_size);
71		space = sm->sm_size - space;
72	}
73
74	bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
75	entry_map = zio_buf_alloc(bufsize);
76
77	mutex_exit(sm->sm_lock);
78	if (end > bufsize) {
79		dmu_prefetch(sm->sm_os, space_map_object(sm), bufsize,
80		    end - bufsize);
81	}
82	mutex_enter(sm->sm_lock);
83
84	for (offset = 0; offset < end; offset += bufsize) {
85		size = MIN(end - offset, bufsize);
86		VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
87		VERIFY(size != 0);
88		ASSERT3U(sm->sm_blksz, !=, 0);
89
90		dprintf("object=%llu  offset=%llx  size=%llx\n",
91		    space_map_object(sm), offset, size);
92
93		mutex_exit(sm->sm_lock);
94		error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
95		    entry_map, DMU_READ_PREFETCH);
96		mutex_enter(sm->sm_lock);
97		if (error != 0)
98			break;
99
100		entry_map_end = entry_map + (size / sizeof (uint64_t));
101		for (entry = entry_map; entry < entry_map_end; entry++) {
102			uint64_t e = *entry;
103			uint64_t offset, size;
104
105			if (SM_DEBUG_DECODE(e))		/* Skip debug entries */
106				continue;
107
108			offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
109			    sm->sm_start;
110			size = SM_RUN_DECODE(e) << sm->sm_shift;
111
112			VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
113			VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
114			VERIFY3U(offset, >=, sm->sm_start);
115			VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
116			if (SM_TYPE_DECODE(e) == maptype) {
117				VERIFY3U(range_tree_space(rt) + size, <=,
118				    sm->sm_size);
119				range_tree_add(rt, offset, size);
120			} else {
121				range_tree_remove(rt, offset, size);
122			}
123		}
124	}
125
126	if (error == 0)
127		VERIFY3U(range_tree_space(rt), ==, space);
128	else
129		range_tree_vacate(rt, NULL, NULL);
130
131	zio_buf_free(entry_map, bufsize);
132	return (error);
133}
134
135void
136space_map_histogram_clear(space_map_t *sm)
137{
138	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
139		return;
140
141	bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
142}
143
144boolean_t
145space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
146{
147	/*
148	 * Verify that the in-core range tree does not have any
149	 * ranges smaller than our sm_shift size.
150	 */
151	for (int i = 0; i < sm->sm_shift; i++) {
152		if (rt->rt_histogram[i] != 0)
153			return (B_FALSE);
154	}
155	return (B_TRUE);
156}
157
158void
159space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
160{
161	int idx = 0;
162
163	ASSERT(MUTEX_HELD(rt->rt_lock));
164	ASSERT(dmu_tx_is_syncing(tx));
165	VERIFY3U(space_map_object(sm), !=, 0);
166
167	if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
168		return;
169
170	dmu_buf_will_dirty(sm->sm_dbuf, tx);
171
172	ASSERT(space_map_histogram_verify(sm, rt));
173
174	/*
175	 * Transfer the content of the range tree histogram to the space
176	 * map histogram. The space map histogram contains 32 buckets ranging
177	 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
178	 * however, can represent ranges from 2^0 to 2^63. Since the space
179	 * map only cares about allocatable blocks (minimum of sm_shift) we
180	 * can safely ignore all ranges in the range tree smaller than sm_shift.
181	 */
182	for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
183
184		/*
185		 * Since the largest histogram bucket in the space map is
186		 * 2^(32+sm_shift-1), we need to normalize the values in
187		 * the range tree for any bucket larger than that size. For
188		 * example given an sm_shift of 9, ranges larger than 2^40
189		 * would get normalized as if they were 1TB ranges. Assume
190		 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
191		 * the calculation below would normalize this to 5 * 2^4 (16).
192		 */
193		ASSERT3U(i, >=, idx + sm->sm_shift);
194		sm->sm_phys->smp_histogram[idx] +=
195		    rt->rt_histogram[i] << (i - idx - sm->sm_shift);
196
197		/*
198		 * Increment the space map's index as long as we haven't
199		 * reached the maximum bucket size. Accumulate all ranges
200		 * larger than the max bucket size into the last bucket.
201		 */
202		if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
203			ASSERT3U(idx + sm->sm_shift, ==, i);
204			idx++;
205			ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
206		}
207	}
208}
209
210uint64_t
211space_map_entries(space_map_t *sm, range_tree_t *rt)
212{
213	avl_tree_t *t = &rt->rt_root;
214	range_seg_t *rs;
215	uint64_t size, entries;
216
217	/*
218	 * All space_maps always have a debug entry so account for it here.
219	 */
220	entries = 1;
221
222	/*
223	 * Traverse the range tree and calculate the number of space map
224	 * entries that would be required to write out the range tree.
225	 */
226	for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
227		size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
228		entries += howmany(size, SM_RUN_MAX);
229	}
230	return (entries);
231}
232
233/*
234 * Note: space_map_write() will drop sm_lock across dmu_write() calls.
235 */
236void
237space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
238    dmu_tx_t *tx)
239{
240	objset_t *os = sm->sm_os;
241	spa_t *spa = dmu_objset_spa(os);
242	avl_tree_t *t = &rt->rt_root;
243	range_seg_t *rs;
244	uint64_t size, total, rt_space, nodes;
245	uint64_t *entry, *entry_map, *entry_map_end;
246	uint64_t expected_entries, actual_entries = 1;
247
248	ASSERT(MUTEX_HELD(rt->rt_lock));
249	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
250	VERIFY3U(space_map_object(sm), !=, 0);
251	dmu_buf_will_dirty(sm->sm_dbuf, tx);
252
253	/*
254	 * This field is no longer necessary since the in-core space map
255	 * now contains the object number but is maintained for backwards
256	 * compatibility.
257	 */
258	sm->sm_phys->smp_object = sm->sm_object;
259
260	if (range_tree_space(rt) == 0) {
261		VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
262		return;
263	}
264
265	if (maptype == SM_ALLOC)
266		sm->sm_phys->smp_alloc += range_tree_space(rt);
267	else
268		sm->sm_phys->smp_alloc -= range_tree_space(rt);
269
270	expected_entries = space_map_entries(sm, rt);
271
272	entry_map = zio_buf_alloc(sm->sm_blksz);
273	entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
274	entry = entry_map;
275
276	*entry++ = SM_DEBUG_ENCODE(1) |
277	    SM_DEBUG_ACTION_ENCODE(maptype) |
278	    SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
279	    SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
280
281	total = 0;
282	nodes = avl_numnodes(&rt->rt_root);
283	rt_space = range_tree_space(rt);
284	for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
285		uint64_t start;
286
287		size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
288		start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
289
290		total += size << sm->sm_shift;
291
292		while (size != 0) {
293			uint64_t run_len;
294
295			run_len = MIN(size, SM_RUN_MAX);
296
297			if (entry == entry_map_end) {
298				mutex_exit(rt->rt_lock);
299				dmu_write(os, space_map_object(sm),
300				    sm->sm_phys->smp_objsize, sm->sm_blksz,
301				    entry_map, tx);
302				mutex_enter(rt->rt_lock);
303				sm->sm_phys->smp_objsize += sm->sm_blksz;
304				entry = entry_map;
305			}
306
307			*entry++ = SM_OFFSET_ENCODE(start) |
308			    SM_TYPE_ENCODE(maptype) |
309			    SM_RUN_ENCODE(run_len);
310
311			start += run_len;
312			size -= run_len;
313			actual_entries++;
314		}
315	}
316
317	if (entry != entry_map) {
318		size = (entry - entry_map) * sizeof (uint64_t);
319		mutex_exit(rt->rt_lock);
320		dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
321		    size, entry_map, tx);
322		mutex_enter(rt->rt_lock);
323		sm->sm_phys->smp_objsize += size;
324	}
325	ASSERT3U(expected_entries, ==, actual_entries);
326
327	/*
328	 * Ensure that the space_map's accounting wasn't changed
329	 * while we were in the middle of writing it out.
330	 */
331	VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
332	VERIFY3U(range_tree_space(rt), ==, rt_space);
333	VERIFY3U(range_tree_space(rt), ==, total);
334
335	zio_buf_free(entry_map, sm->sm_blksz);
336}
337
338static int
339space_map_open_impl(space_map_t *sm)
340{
341	int error;
342	u_longlong_t blocks;
343
344	error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
345	if (error)
346		return (error);
347
348	dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
349	sm->sm_phys = sm->sm_dbuf->db_data;
350	return (0);
351}
352
353int
354space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
355    uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
356{
357	space_map_t *sm;
358	int error;
359
360	ASSERT(*smp == NULL);
361	ASSERT(os != NULL);
362	ASSERT(object != 0);
363
364	sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
365
366	sm->sm_start = start;
367	sm->sm_size = size;
368	sm->sm_shift = shift;
369	sm->sm_lock = lp;
370	sm->sm_os = os;
371	sm->sm_object = object;
372
373	error = space_map_open_impl(sm);
374	if (error != 0) {
375		space_map_close(sm);
376		return (error);
377	}
378
379	*smp = sm;
380
381	return (0);
382}
383
384void
385space_map_close(space_map_t *sm)
386{
387	if (sm == NULL)
388		return;
389
390	if (sm->sm_dbuf != NULL)
391		dmu_buf_rele(sm->sm_dbuf, sm);
392	sm->sm_dbuf = NULL;
393	sm->sm_phys = NULL;
394
395	kmem_free(sm, sizeof (*sm));
396}
397
398void
399space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
400{
401	objset_t *os = sm->sm_os;
402	spa_t *spa = dmu_objset_spa(os);
403	dmu_object_info_t doi;
404
405	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
406	ASSERT(dmu_tx_is_syncing(tx));
407
408	dmu_object_info_from_db(sm->sm_dbuf, &doi);
409
410	/*
411	 * If the space map has the wrong bonus size (because
412	 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
413	 * the wrong block size (because space_map_blksz has changed),
414	 * free and re-allocate its object with the updated sizes.
415	 *
416	 * Otherwise, just truncate the current object.
417	 */
418	if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
419	    doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
420	    doi.doi_data_block_size != space_map_blksz) {
421		zfs_dbgmsg("txg %llu, spa %s, reallocating: "
422		    "old bonus %u, old blocksz %u", dmu_tx_get_txg(tx),
423		    spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size);
424
425		space_map_free(sm, tx);
426		dmu_buf_rele(sm->sm_dbuf, sm);
427
428		sm->sm_object = space_map_alloc(sm->sm_os, tx);
429		VERIFY0(space_map_open_impl(sm));
430	} else {
431		VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
432
433		/*
434		 * If the spacemap is reallocated, its histogram
435		 * will be reset.  Do the same in the common case so that
436		 * bugs related to the uncommon case do not go unnoticed.
437		 */
438		bzero(sm->sm_phys->smp_histogram,
439		    sizeof (sm->sm_phys->smp_histogram));
440	}
441
442	dmu_buf_will_dirty(sm->sm_dbuf, tx);
443	sm->sm_phys->smp_objsize = 0;
444	sm->sm_phys->smp_alloc = 0;
445}
446
447/*
448 * Update the in-core space_map allocation and length values.
449 */
450void
451space_map_update(space_map_t *sm)
452{
453	if (sm == NULL)
454		return;
455
456	ASSERT(MUTEX_HELD(sm->sm_lock));
457
458	sm->sm_alloc = sm->sm_phys->smp_alloc;
459	sm->sm_length = sm->sm_phys->smp_objsize;
460}
461
462uint64_t
463space_map_alloc(objset_t *os, dmu_tx_t *tx)
464{
465	spa_t *spa = dmu_objset_spa(os);
466	uint64_t object;
467	int bonuslen;
468
469	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
470		spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
471		bonuslen = sizeof (space_map_phys_t);
472		ASSERT3U(bonuslen, <=, dmu_bonus_max());
473	} else {
474		bonuslen = SPACE_MAP_SIZE_V0;
475	}
476
477	object = dmu_object_alloc(os,
478	    DMU_OT_SPACE_MAP, space_map_blksz,
479	    DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
480
481	return (object);
482}
483
484void
485space_map_free(space_map_t *sm, dmu_tx_t *tx)
486{
487	spa_t *spa;
488
489	if (sm == NULL)
490		return;
491
492	spa = dmu_objset_spa(sm->sm_os);
493	if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
494		dmu_object_info_t doi;
495
496		dmu_object_info_from_db(sm->sm_dbuf, &doi);
497		if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
498			VERIFY(spa_feature_is_active(spa,
499			    SPA_FEATURE_SPACEMAP_HISTOGRAM));
500			spa_feature_decr(spa,
501			    SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
502		}
503	}
504
505	VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
506	sm->sm_object = 0;
507}
508
509uint64_t
510space_map_object(space_map_t *sm)
511{
512	return (sm != NULL ? sm->sm_object : 0);
513}
514
515/*
516 * Returns the already synced, on-disk allocated space.
517 */
518uint64_t
519space_map_allocated(space_map_t *sm)
520{
521	return (sm != NULL ? sm->sm_alloc : 0);
522}
523
524/*
525 * Returns the already synced, on-disk length;
526 */
527uint64_t
528space_map_length(space_map_t *sm)
529{
530	return (sm != NULL ? sm->sm_length : 0);
531}
532
533/*
534 * Returns the allocated space that is currently syncing.
535 */
536int64_t
537space_map_alloc_delta(space_map_t *sm)
538{
539	if (sm == NULL)
540		return (0);
541	ASSERT(sm->sm_dbuf != NULL);
542	return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
543}
544