1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2009-2010 The FreeBSD Foundation
5 * All rights reserved.
6 *
7 * This software was developed by Pawel Jakub Dawidek under sponsorship from
8 * the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sbin/hastd/activemap.c 330449 2018-03-05 07:26:05Z eadler $");
34
35#include <sys/param.h>	/* powerof2() */
36#include <sys/queue.h>
37
38#include <bitstring.h>
39#include <errno.h>
40#include <stdint.h>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44
45#include <pjdlog.h>
46
47#include "activemap.h"
48
49#ifndef	PJDLOG_ASSERT
50#include <assert.h>
51#define	PJDLOG_ASSERT(...)	assert(__VA_ARGS__)
52#endif
53
54#define	ACTIVEMAP_MAGIC	0xac71e4
55struct activemap {
56	int		 am_magic;	/* Magic value. */
57	off_t		 am_mediasize;	/* Media size in bytes. */
58	uint32_t	 am_extentsize;	/* Extent size in bytes,
59					   must be power of 2. */
60	uint8_t		 am_extentshift;/* 2 ^ extentbits == extentsize */
61	int		 am_nextents;	/* Number of extents. */
62	size_t		 am_mapsize;	/* Bitmap size in bytes. */
63	uint16_t	*am_memtab;	/* An array that holds number of pending
64					   writes per extent. */
65	bitstr_t	*am_diskmap;	/* On-disk bitmap of dirty extents. */
66	bitstr_t	*am_memmap;	/* In-memory bitmap of dirty extents. */
67	size_t		 am_diskmapsize; /* Map size rounded up to sector size. */
68	uint64_t	 am_ndirty;	/* Number of dirty regions. */
69	bitstr_t	*am_syncmap;	/* Bitmap of extents to sync. */
70	off_t		 am_syncoff;	/* Next synchronization offset. */
71	TAILQ_HEAD(skeepdirty, keepdirty) am_keepdirty; /* List of extents that
72					   we keep dirty to reduce bitmap
73					   updates. */
74	int		 am_nkeepdirty;	/* Number of am_keepdirty elements. */
75	int		 am_nkeepdirty_limit; /* Maximum number of am_keepdirty
76					         elements. */
77};
78
79struct keepdirty {
80	int	kd_extent;
81	TAILQ_ENTRY(keepdirty) kd_next;
82};
83
84/*
85 * Helper function taken from sys/systm.h to calculate extentshift.
86 */
87static uint32_t
88bitcount32(uint32_t x)
89{
90
91	x = (x & 0x55555555) + ((x & 0xaaaaaaaa) >> 1);
92	x = (x & 0x33333333) + ((x & 0xcccccccc) >> 2);
93	x = (x + (x >> 4)) & 0x0f0f0f0f;
94	x = (x + (x >> 8));
95	x = (x + (x >> 16)) & 0x000000ff;
96	return (x);
97}
98
99static __inline int
100off2ext(const struct activemap *amp, off_t offset)
101{
102	int extent;
103
104	PJDLOG_ASSERT(offset >= 0 && offset < amp->am_mediasize);
105	extent = (offset >> amp->am_extentshift);
106	PJDLOG_ASSERT(extent >= 0 && extent < amp->am_nextents);
107	return (extent);
108}
109
110static __inline off_t
111ext2off(const struct activemap *amp, int extent)
112{
113	off_t offset;
114
115	PJDLOG_ASSERT(extent >= 0 && extent < amp->am_nextents);
116	offset = ((off_t)extent << amp->am_extentshift);
117	PJDLOG_ASSERT(offset >= 0 && offset < amp->am_mediasize);
118	return (offset);
119}
120
121/*
122 * Function calculates number of requests needed to synchronize the given
123 * extent.
124 */
125static __inline int
126ext2reqs(const struct activemap *amp, int ext)
127{
128	off_t left;
129
130	if (ext < amp->am_nextents - 1)
131		return (((amp->am_extentsize - 1) / MAXPHYS) + 1);
132
133	PJDLOG_ASSERT(ext == amp->am_nextents - 1);
134	left = amp->am_mediasize % amp->am_extentsize;
135	if (left == 0)
136		left = amp->am_extentsize;
137	return (((left - 1) / MAXPHYS) + 1);
138}
139
140/*
141 * Initialize activemap structure and allocate memory for internal needs.
142 * Function returns 0 on success and -1 if any of the allocations failed.
143 */
144int
145activemap_init(struct activemap **ampp, uint64_t mediasize, uint32_t extentsize,
146    uint32_t sectorsize, uint32_t keepdirty)
147{
148	struct activemap *amp;
149
150	PJDLOG_ASSERT(ampp != NULL);
151	PJDLOG_ASSERT(mediasize > 0);
152	PJDLOG_ASSERT(extentsize > 0);
153	PJDLOG_ASSERT(powerof2(extentsize));
154	PJDLOG_ASSERT(sectorsize > 0);
155	PJDLOG_ASSERT(powerof2(sectorsize));
156	PJDLOG_ASSERT(keepdirty > 0);
157
158	amp = malloc(sizeof(*amp));
159	if (amp == NULL)
160		return (-1);
161
162	amp->am_mediasize = mediasize;
163	amp->am_nkeepdirty_limit = keepdirty;
164	amp->am_extentsize = extentsize;
165	amp->am_extentshift = bitcount32(extentsize - 1);
166	amp->am_nextents = ((mediasize - 1) / extentsize) + 1;
167	amp->am_mapsize = bitstr_size(amp->am_nextents);
168	amp->am_diskmapsize = roundup2(amp->am_mapsize, sectorsize);
169	amp->am_ndirty = 0;
170	amp->am_syncoff = -2;
171	TAILQ_INIT(&amp->am_keepdirty);
172	amp->am_nkeepdirty = 0;
173
174	amp->am_memtab = calloc(amp->am_nextents, sizeof(amp->am_memtab[0]));
175	amp->am_diskmap = calloc(1, amp->am_diskmapsize);
176	amp->am_memmap = bit_alloc(amp->am_nextents);
177	amp->am_syncmap = bit_alloc(amp->am_nextents);
178
179	/*
180	 * Check to see if any of the allocations above failed.
181	 */
182	if (amp->am_memtab == NULL || amp->am_diskmap == NULL ||
183	    amp->am_memmap == NULL || amp->am_syncmap == NULL) {
184		if (amp->am_memtab != NULL)
185			free(amp->am_memtab);
186		if (amp->am_diskmap != NULL)
187			free(amp->am_diskmap);
188		if (amp->am_memmap != NULL)
189			free(amp->am_memmap);
190		if (amp->am_syncmap != NULL)
191			free(amp->am_syncmap);
192		amp->am_magic = 0;
193		free(amp);
194		errno = ENOMEM;
195		return (-1);
196	}
197
198	amp->am_magic = ACTIVEMAP_MAGIC;
199	*ampp = amp;
200
201	return (0);
202}
203
204static struct keepdirty *
205keepdirty_find(struct activemap *amp, int extent)
206{
207	struct keepdirty *kd;
208
209	TAILQ_FOREACH(kd, &amp->am_keepdirty, kd_next) {
210		if (kd->kd_extent == extent)
211			break;
212	}
213	return (kd);
214}
215
216static bool
217keepdirty_add(struct activemap *amp, int extent)
218{
219	struct keepdirty *kd;
220
221	kd = keepdirty_find(amp, extent);
222	if (kd != NULL) {
223		/*
224		 * Only move element at the beginning.
225		 */
226		TAILQ_REMOVE(&amp->am_keepdirty, kd, kd_next);
227		TAILQ_INSERT_HEAD(&amp->am_keepdirty, kd, kd_next);
228		return (false);
229	}
230	/*
231	 * Add new element, but first remove the most unused one if
232	 * we have too many.
233	 */
234	if (amp->am_nkeepdirty >= amp->am_nkeepdirty_limit) {
235		kd = TAILQ_LAST(&amp->am_keepdirty, skeepdirty);
236		PJDLOG_ASSERT(kd != NULL);
237		TAILQ_REMOVE(&amp->am_keepdirty, kd, kd_next);
238		amp->am_nkeepdirty--;
239		PJDLOG_ASSERT(amp->am_nkeepdirty > 0);
240	}
241	if (kd == NULL)
242		kd = malloc(sizeof(*kd));
243	/* We can ignore allocation failure. */
244	if (kd != NULL) {
245		kd->kd_extent = extent;
246		amp->am_nkeepdirty++;
247		TAILQ_INSERT_HEAD(&amp->am_keepdirty, kd, kd_next);
248	}
249
250	return (true);
251}
252
253static void
254keepdirty_fill(struct activemap *amp)
255{
256	struct keepdirty *kd;
257
258	TAILQ_FOREACH(kd, &amp->am_keepdirty, kd_next)
259		bit_set(amp->am_diskmap, kd->kd_extent);
260}
261
262static void
263keepdirty_free(struct activemap *amp)
264{
265	struct keepdirty *kd;
266
267	while ((kd = TAILQ_FIRST(&amp->am_keepdirty)) != NULL) {
268		TAILQ_REMOVE(&amp->am_keepdirty, kd, kd_next);
269		amp->am_nkeepdirty--;
270		free(kd);
271	}
272	PJDLOG_ASSERT(amp->am_nkeepdirty == 0);
273}
274
275/*
276 * Function frees resources allocated by activemap_init() function.
277 */
278void
279activemap_free(struct activemap *amp)
280{
281
282	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
283
284	amp->am_magic = 0;
285
286	keepdirty_free(amp);
287	free(amp->am_memtab);
288	free(amp->am_diskmap);
289	free(amp->am_memmap);
290	free(amp->am_syncmap);
291}
292
293/*
294 * Function should be called before we handle write requests. It updates
295 * internal structures and returns true if on-disk metadata should be updated.
296 */
297bool
298activemap_write_start(struct activemap *amp, off_t offset, off_t length)
299{
300	bool modified;
301	off_t end;
302	int ext;
303
304	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
305	PJDLOG_ASSERT(length > 0);
306
307	modified = false;
308	end = offset + length - 1;
309
310	for (ext = off2ext(amp, offset); ext <= off2ext(amp, end); ext++) {
311		/*
312		 * If the number of pending writes is increased from 0,
313		 * we have to mark the extent as dirty also in on-disk bitmap.
314		 * By returning true we inform the caller that on-disk bitmap
315		 * was modified and has to be flushed to disk.
316		 */
317		if (amp->am_memtab[ext]++ == 0) {
318			PJDLOG_ASSERT(!bit_test(amp->am_memmap, ext));
319			bit_set(amp->am_memmap, ext);
320			amp->am_ndirty++;
321		}
322		if (keepdirty_add(amp, ext))
323			modified = true;
324	}
325
326	return (modified);
327}
328
329/*
330 * Function should be called after receiving write confirmation. It updates
331 * internal structures and returns true if on-disk metadata should be updated.
332 */
333bool
334activemap_write_complete(struct activemap *amp, off_t offset, off_t length)
335{
336	bool modified;
337	off_t end;
338	int ext;
339
340	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
341	PJDLOG_ASSERT(length > 0);
342
343	modified = false;
344	end = offset + length - 1;
345
346	for (ext = off2ext(amp, offset); ext <= off2ext(amp, end); ext++) {
347		/*
348		 * If the number of pending writes goes down to 0, we have to
349		 * mark the extent as clean also in on-disk bitmap.
350		 * By returning true we inform the caller that on-disk bitmap
351		 * was modified and has to be flushed to disk.
352		 */
353		PJDLOG_ASSERT(amp->am_memtab[ext] > 0);
354		PJDLOG_ASSERT(bit_test(amp->am_memmap, ext));
355		if (--amp->am_memtab[ext] == 0) {
356			bit_clear(amp->am_memmap, ext);
357			amp->am_ndirty--;
358			if (keepdirty_find(amp, ext) == NULL)
359				modified = true;
360		}
361	}
362
363	return (modified);
364}
365
366/*
367 * Function should be called after finishing synchronization of one extent.
368 * It returns true if on-disk metadata should be updated.
369 */
370bool
371activemap_extent_complete(struct activemap *amp, int extent)
372{
373	bool modified;
374	int reqs;
375
376	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
377	PJDLOG_ASSERT(extent >= 0 && extent < amp->am_nextents);
378
379	modified = false;
380
381	reqs = ext2reqs(amp, extent);
382	PJDLOG_ASSERT(amp->am_memtab[extent] >= reqs);
383	amp->am_memtab[extent] -= reqs;
384	PJDLOG_ASSERT(bit_test(amp->am_memmap, extent));
385	if (amp->am_memtab[extent] == 0) {
386		bit_clear(amp->am_memmap, extent);
387		amp->am_ndirty--;
388		modified = true;
389	}
390
391	return (modified);
392}
393
394/*
395 * Function returns number of dirty regions.
396 */
397uint64_t
398activemap_ndirty(const struct activemap *amp)
399{
400
401	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
402
403	return (amp->am_ndirty);
404}
405
406/*
407 * Function compare on-disk bitmap and in-memory bitmap and returns true if
408 * they differ and should be flushed to the disk.
409 */
410bool
411activemap_differ(const struct activemap *amp)
412{
413
414	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
415
416	return (memcmp(amp->am_diskmap, amp->am_memmap,
417	    amp->am_mapsize) != 0);
418}
419
420/*
421 * Function returns number of bytes used by bitmap.
422 */
423size_t
424activemap_size(const struct activemap *amp)
425{
426
427	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
428
429	return (amp->am_mapsize);
430}
431
432/*
433 * Function returns number of bytes needed for storing on-disk bitmap.
434 * This is the same as activemap_size(), but rounded up to sector size.
435 */
436size_t
437activemap_ondisk_size(const struct activemap *amp)
438{
439
440	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
441
442	return (amp->am_diskmapsize);
443}
444
445/*
446 * Function copies the given buffer read from disk to the internal bitmap.
447 */
448void
449activemap_copyin(struct activemap *amp, const unsigned char *buf, size_t size)
450{
451	int ext;
452
453	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
454	PJDLOG_ASSERT(size >= amp->am_mapsize);
455
456	memcpy(amp->am_diskmap, buf, amp->am_mapsize);
457	memcpy(amp->am_memmap, buf, amp->am_mapsize);
458	memcpy(amp->am_syncmap, buf, amp->am_mapsize);
459
460	bit_ffs(amp->am_memmap, amp->am_nextents, &ext);
461	if (ext == -1) {
462		/* There are no dirty extents, so we can leave now. */
463		return;
464	}
465	/*
466	 * Set synchronization offset to the first dirty extent.
467	 */
468	activemap_sync_rewind(amp);
469	/*
470	 * We have dirty extents and we want them to stay that way until
471	 * we synchronize, so we set number of pending writes to number
472	 * of requests needed to synchronize one extent.
473	 */
474	amp->am_ndirty = 0;
475	for (; ext < amp->am_nextents; ext++) {
476		if (bit_test(amp->am_memmap, ext)) {
477			amp->am_memtab[ext] = ext2reqs(amp, ext);
478			amp->am_ndirty++;
479		}
480	}
481}
482
483/*
484 * Function merges the given bitmap with existing one.
485 */
486void
487activemap_merge(struct activemap *amp, const unsigned char *buf, size_t size)
488{
489	bitstr_t *remmap = __DECONST(bitstr_t *, buf);
490	int ext;
491
492	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
493	PJDLOG_ASSERT(size >= amp->am_mapsize);
494
495	bit_ffs(remmap, amp->am_nextents, &ext);
496	if (ext == -1) {
497		/* There are no dirty extents, so we can leave now. */
498		return;
499	}
500	/*
501	 * We have dirty extents and we want them to stay that way until
502	 * we synchronize, so we set number of pending writes to number
503	 * of requests needed to synchronize one extent.
504	 */
505	for (; ext < amp->am_nextents; ext++) {
506		/* Local extent already dirty. */
507		if (bit_test(amp->am_syncmap, ext))
508			continue;
509		/* Remote extent isn't dirty. */
510		if (!bit_test(remmap, ext))
511			continue;
512		bit_set(amp->am_syncmap, ext);
513		bit_set(amp->am_memmap, ext);
514		bit_set(amp->am_diskmap, ext);
515		if (amp->am_memtab[ext] == 0)
516			amp->am_ndirty++;
517		amp->am_memtab[ext] = ext2reqs(amp, ext);
518	}
519	/*
520	 * Set synchronization offset to the first dirty extent.
521	 */
522	activemap_sync_rewind(amp);
523}
524
525/*
526 * Function returns pointer to internal bitmap that should be written to disk.
527 */
528const unsigned char *
529activemap_bitmap(struct activemap *amp, size_t *sizep)
530{
531
532	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
533
534	if (sizep != NULL)
535		*sizep = amp->am_diskmapsize;
536	memcpy(amp->am_diskmap, amp->am_memmap, amp->am_mapsize);
537	keepdirty_fill(amp);
538	return ((const unsigned char *)amp->am_diskmap);
539}
540
541/*
542 * Function calculates size needed to store bitmap on disk.
543 */
544size_t
545activemap_calc_ondisk_size(uint64_t mediasize, uint32_t extentsize,
546    uint32_t sectorsize)
547{
548	uint64_t nextents, mapsize;
549
550	PJDLOG_ASSERT(mediasize > 0);
551	PJDLOG_ASSERT(extentsize > 0);
552	PJDLOG_ASSERT(powerof2(extentsize));
553	PJDLOG_ASSERT(sectorsize > 0);
554	PJDLOG_ASSERT(powerof2(sectorsize));
555
556	nextents = ((mediasize - 1) / extentsize) + 1;
557	mapsize = bitstr_size(nextents);
558	return (roundup2(mapsize, sectorsize));
559}
560
561/*
562 * Set synchronization offset to the first dirty extent.
563 */
564void
565activemap_sync_rewind(struct activemap *amp)
566{
567	int ext;
568
569	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
570
571	bit_ffs(amp->am_syncmap, amp->am_nextents, &ext);
572	if (ext == -1) {
573		/* There are no extents to synchronize. */
574		amp->am_syncoff = -2;
575		return;
576	}
577	/*
578	 * Mark that we want to start synchronization from the beginning.
579	 */
580	amp->am_syncoff = -1;
581}
582
583/*
584 * Return next offset of where we should synchronize.
585 */
586off_t
587activemap_sync_offset(struct activemap *amp, off_t *lengthp, int *syncextp)
588{
589	off_t syncoff, left;
590	int ext;
591
592	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
593	PJDLOG_ASSERT(lengthp != NULL);
594	PJDLOG_ASSERT(syncextp != NULL);
595
596	*syncextp = -1;
597
598	if (amp->am_syncoff == -2)
599		return (-1);
600
601	if (amp->am_syncoff >= 0 &&
602	    (amp->am_syncoff + MAXPHYS >= amp->am_mediasize ||
603	     off2ext(amp, amp->am_syncoff) !=
604	     off2ext(amp, amp->am_syncoff + MAXPHYS))) {
605		/*
606		 * We are about to change extent, so mark previous one as clean.
607		 */
608		ext = off2ext(amp, amp->am_syncoff);
609		bit_clear(amp->am_syncmap, ext);
610		*syncextp = ext;
611		amp->am_syncoff = -1;
612	}
613
614	if (amp->am_syncoff == -1) {
615		/*
616		 * Let's find first extent to synchronize.
617		 */
618		bit_ffs(amp->am_syncmap, amp->am_nextents, &ext);
619		if (ext == -1) {
620			amp->am_syncoff = -2;
621			return (-1);
622		}
623		amp->am_syncoff = ext2off(amp, ext);
624	} else {
625		/*
626		 * We don't change extent, so just increase offset.
627		 */
628		amp->am_syncoff += MAXPHYS;
629		if (amp->am_syncoff >= amp->am_mediasize) {
630			amp->am_syncoff = -2;
631			return (-1);
632		}
633	}
634
635	syncoff = amp->am_syncoff;
636	left = ext2off(amp, off2ext(amp, syncoff)) +
637	    amp->am_extentsize - syncoff;
638	if (syncoff + left > amp->am_mediasize)
639		left = amp->am_mediasize - syncoff;
640	if (left > MAXPHYS)
641		left = MAXPHYS;
642
643	PJDLOG_ASSERT(left >= 0 && left <= MAXPHYS);
644	PJDLOG_ASSERT(syncoff >= 0 && syncoff < amp->am_mediasize);
645	PJDLOG_ASSERT(syncoff + left >= 0 &&
646	    syncoff + left <= amp->am_mediasize);
647
648	*lengthp = left;
649	return (syncoff);
650}
651
652/*
653 * Mark extent(s) containing the given region for synchronization.
654 * Most likely one of the components is unavailable.
655 */
656bool
657activemap_need_sync(struct activemap *amp, off_t offset, off_t length)
658{
659	bool modified;
660	off_t end;
661	int ext;
662
663	PJDLOG_ASSERT(amp->am_magic == ACTIVEMAP_MAGIC);
664
665	modified = false;
666	end = offset + length - 1;
667
668	for (ext = off2ext(amp, offset); ext <= off2ext(amp, end); ext++) {
669		if (bit_test(amp->am_syncmap, ext)) {
670			/* Already marked for synchronization. */
671			PJDLOG_ASSERT(bit_test(amp->am_memmap, ext));
672			continue;
673		}
674		bit_set(amp->am_syncmap, ext);
675		if (!bit_test(amp->am_memmap, ext)) {
676			bit_set(amp->am_memmap, ext);
677			amp->am_ndirty++;
678		}
679		amp->am_memtab[ext] += ext2reqs(amp, ext);
680		modified = true;
681	}
682
683	return (modified);
684}
685
686void
687activemap_dump(const struct activemap *amp)
688{
689	int bit;
690
691	printf("M: ");
692	for (bit = 0; bit < amp->am_nextents; bit++)
693		printf("%d", bit_test(amp->am_memmap, bit) ? 1 : 0);
694	printf("\n");
695	printf("D: ");
696	for (bit = 0; bit < amp->am_nextents; bit++)
697		printf("%d", bit_test(amp->am_diskmap, bit) ? 1 : 0);
698	printf("\n");
699	printf("S: ");
700	for (bit = 0; bit < amp->am_nextents; bit++)
701		printf("%d", bit_test(amp->am_syncmap, bit) ? 1 : 0);
702	printf("\n");
703}
704