space_map.c revision 332547
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25/* 26 * Copyright (c) 2012, 2017 by Delphix. All rights reserved. 27 */ 28 29#include <sys/zfs_context.h> 30#include <sys/spa.h> 31#include <sys/dmu.h> 32#include <sys/dmu_tx.h> 33#include <sys/dnode.h> 34#include <sys/dsl_pool.h> 35#include <sys/zio.h> 36#include <sys/space_map.h> 37#include <sys/refcount.h> 38#include <sys/zfeature.h> 39 40SYSCTL_DECL(_vfs_zfs); 41 42/* 43 * Note on space map block size: 44 * 45 * The data for a given space map can be kept on blocks of any size. 46 * Larger blocks entail fewer i/o operations, but they also cause the 47 * DMU to keep more data in-core, and also to waste more i/o bandwidth 48 * when only a few blocks have changed since the last transaction group. 49 */ 50 51/* 52 * Iterate through the space map, invoking the callback on each (non-debug) 53 * space map entry. 54 */ 55int 56space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg) 57{ 58 uint64_t *entry, *entry_map, *entry_map_end; 59 uint64_t bufsize, size, offset, end; 60 int error = 0; 61 62 end = space_map_length(sm); 63 64 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 65 entry_map = zio_buf_alloc(bufsize); 66 67 if (end > bufsize) { 68 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize, 69 end - bufsize, ZIO_PRIORITY_SYNC_READ); 70 } 71 72 for (offset = 0; offset < end && error == 0; offset += bufsize) { 73 size = MIN(end - offset, bufsize); 74 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); 75 VERIFY(size != 0); 76 ASSERT3U(sm->sm_blksz, !=, 0); 77 78 dprintf("object=%llu offset=%llx size=%llx\n", 79 space_map_object(sm), offset, size); 80 81 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size, 82 entry_map, DMU_READ_PREFETCH); 83 if (error != 0) 84 break; 85 86 entry_map_end = entry_map + (size / sizeof (uint64_t)); 87 for (entry = entry_map; entry < entry_map_end && error == 0; 88 entry++) { 89 uint64_t e = *entry; 90 uint64_t offset, size; 91 92 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */ 93 continue; 94 95 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) + 96 sm->sm_start; 97 size = SM_RUN_DECODE(e) << sm->sm_shift; 98 99 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift)); 100 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift)); 101 VERIFY3U(offset, >=, sm->sm_start); 102 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size); 103 error = callback(SM_TYPE_DECODE(e), offset, size, arg); 104 } 105 } 106 107 zio_buf_free(entry_map, bufsize); 108 return (error); 109} 110 111/* 112 * Note: This function performs destructive actions - specifically 113 * it deletes entries from the end of the space map. Thus, callers 114 * should ensure that they are holding the appropriate locks for 115 * the space map that they provide. 116 */ 117int 118space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg, 119 dmu_tx_t *tx) 120{ 121 uint64_t bufsize, len; 122 uint64_t *entry_map; 123 int error = 0; 124 125 len = space_map_length(sm); 126 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 127 entry_map = zio_buf_alloc(bufsize); 128 129 dmu_buf_will_dirty(sm->sm_dbuf, tx); 130 131 /* 132 * Since we can't move the starting offset of the space map 133 * (e.g there are reference on-disk pointing to it), we destroy 134 * its entries incrementally starting from the end. 135 * 136 * The logic that follows is basically the same as the one used 137 * in space_map_iterate() but it traverses the space map 138 * backwards: 139 * 140 * 1] We figure out the size of the buffer that we want to use 141 * to read the on-disk space map entries. 142 * 2] We figure out the offset at the end of the space map where 143 * we will start reading entries into our buffer. 144 * 3] We read the on-disk entries into the buffer. 145 * 4] We iterate over the entries from end to beginning calling 146 * the callback function on each one. As we move from entry 147 * to entry we decrease the size of the space map, deleting 148 * effectively each entry. 149 * 5] If there are no more entries in the space map or the 150 * callback returns a value other than 0, we stop iterating 151 * over the space map. If there are entries remaining and 152 * the callback returned zero we go back to step [1]. 153 */ 154 uint64_t offset = 0, size = 0; 155 while (len > 0 && error == 0) { 156 size = MIN(bufsize, len); 157 158 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); 159 VERIFY3U(size, >, 0); 160 ASSERT3U(sm->sm_blksz, !=, 0); 161 162 offset = len - size; 163 164 IMPLY(bufsize > len, offset == 0); 165 IMPLY(bufsize == len, offset == 0); 166 IMPLY(bufsize < len, offset > 0); 167 168 169 EQUIV(size == len, offset == 0); 170 IMPLY(size < len, bufsize < len); 171 172 dprintf("object=%llu offset=%llx size=%llx\n", 173 space_map_object(sm), offset, size); 174 175 error = dmu_read(sm->sm_os, space_map_object(sm), 176 offset, size, entry_map, DMU_READ_PREFETCH); 177 if (error != 0) 178 break; 179 180 uint64_t num_entries = size / sizeof (uint64_t); 181 182 ASSERT3U(num_entries, >, 0); 183 184 while (num_entries > 0) { 185 uint64_t e, entry_offset, entry_size; 186 maptype_t type; 187 188 e = entry_map[num_entries - 1]; 189 190 ASSERT3U(num_entries, >, 0); 191 ASSERT0(error); 192 193 if (SM_DEBUG_DECODE(e)) { 194 sm->sm_phys->smp_objsize -= sizeof (uint64_t); 195 space_map_update(sm); 196 len -= sizeof (uint64_t); 197 num_entries--; 198 continue; 199 } 200 201 type = SM_TYPE_DECODE(e); 202 entry_offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) + 203 sm->sm_start; 204 entry_size = SM_RUN_DECODE(e) << sm->sm_shift; 205 206 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift)); 207 VERIFY0(P2PHASE(entry_size, 1ULL << sm->sm_shift)); 208 VERIFY3U(entry_offset, >=, sm->sm_start); 209 VERIFY3U(entry_offset + entry_size, <=, 210 sm->sm_start + sm->sm_size); 211 212 error = callback(type, entry_offset, entry_size, arg); 213 if (error != 0) 214 break; 215 216 if (type == SM_ALLOC) 217 sm->sm_phys->smp_alloc -= entry_size; 218 else 219 sm->sm_phys->smp_alloc += entry_size; 220 221 sm->sm_phys->smp_objsize -= sizeof (uint64_t); 222 space_map_update(sm); 223 len -= sizeof (uint64_t); 224 num_entries--; 225 } 226 IMPLY(error == 0, num_entries == 0); 227 EQUIV(offset == 0 && error == 0, len == 0 && num_entries == 0); 228 } 229 230 if (len == 0) { 231 ASSERT0(error); 232 ASSERT0(offset); 233 ASSERT0(sm->sm_length); 234 ASSERT0(sm->sm_phys->smp_objsize); 235 ASSERT0(sm->sm_alloc); 236 } 237 238 zio_buf_free(entry_map, bufsize); 239 return (error); 240} 241 242typedef struct space_map_load_arg { 243 space_map_t *smla_sm; 244 range_tree_t *smla_rt; 245 maptype_t smla_type; 246} space_map_load_arg_t; 247 248static int 249space_map_load_callback(maptype_t type, uint64_t offset, uint64_t size, 250 void *arg) 251{ 252 space_map_load_arg_t *smla = arg; 253 if (type == smla->smla_type) { 254 VERIFY3U(range_tree_space(smla->smla_rt) + size, <=, 255 smla->smla_sm->sm_size); 256 range_tree_add(smla->smla_rt, offset, size); 257 } else { 258 range_tree_remove(smla->smla_rt, offset, size); 259 } 260 261 return (0); 262} 263 264/* 265 * Load the space map disk into the specified range tree. Segments of maptype 266 * are added to the range tree, other segment types are removed. 267 */ 268int 269space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype) 270{ 271 uint64_t space; 272 int err; 273 space_map_load_arg_t smla; 274 275 VERIFY0(range_tree_space(rt)); 276 space = space_map_allocated(sm); 277 278 if (maptype == SM_FREE) { 279 range_tree_add(rt, sm->sm_start, sm->sm_size); 280 space = sm->sm_size - space; 281 } 282 283 smla.smla_rt = rt; 284 smla.smla_sm = sm; 285 smla.smla_type = maptype; 286 err = space_map_iterate(sm, space_map_load_callback, &smla); 287 288 if (err == 0) { 289 VERIFY3U(range_tree_space(rt), ==, space); 290 } else { 291 range_tree_vacate(rt, NULL, NULL); 292 } 293 294 return (err); 295} 296 297void 298space_map_histogram_clear(space_map_t *sm) 299{ 300 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 301 return; 302 303 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram)); 304} 305 306boolean_t 307space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) 308{ 309 /* 310 * Verify that the in-core range tree does not have any 311 * ranges smaller than our sm_shift size. 312 */ 313 for (int i = 0; i < sm->sm_shift; i++) { 314 if (rt->rt_histogram[i] != 0) 315 return (B_FALSE); 316 } 317 return (B_TRUE); 318} 319 320void 321space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) 322{ 323 int idx = 0; 324 325 ASSERT(dmu_tx_is_syncing(tx)); 326 VERIFY3U(space_map_object(sm), !=, 0); 327 328 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 329 return; 330 331 dmu_buf_will_dirty(sm->sm_dbuf, tx); 332 333 ASSERT(space_map_histogram_verify(sm, rt)); 334 /* 335 * Transfer the content of the range tree histogram to the space 336 * map histogram. The space map histogram contains 32 buckets ranging 337 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree, 338 * however, can represent ranges from 2^0 to 2^63. Since the space 339 * map only cares about allocatable blocks (minimum of sm_shift) we 340 * can safely ignore all ranges in the range tree smaller than sm_shift. 341 */ 342 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 343 344 /* 345 * Since the largest histogram bucket in the space map is 346 * 2^(32+sm_shift-1), we need to normalize the values in 347 * the range tree for any bucket larger than that size. For 348 * example given an sm_shift of 9, ranges larger than 2^40 349 * would get normalized as if they were 1TB ranges. Assume 350 * the range tree had a count of 5 in the 2^44 (16TB) bucket, 351 * the calculation below would normalize this to 5 * 2^4 (16). 352 */ 353 ASSERT3U(i, >=, idx + sm->sm_shift); 354 sm->sm_phys->smp_histogram[idx] += 355 rt->rt_histogram[i] << (i - idx - sm->sm_shift); 356 357 /* 358 * Increment the space map's index as long as we haven't 359 * reached the maximum bucket size. Accumulate all ranges 360 * larger than the max bucket size into the last bucket. 361 */ 362 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 363 ASSERT3U(idx + sm->sm_shift, ==, i); 364 idx++; 365 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 366 } 367 } 368} 369 370uint64_t 371space_map_entries(space_map_t *sm, range_tree_t *rt) 372{ 373 avl_tree_t *t = &rt->rt_root; 374 range_seg_t *rs; 375 uint64_t size, entries; 376 377 /* 378 * All space_maps always have a debug entry so account for it here. 379 */ 380 entries = 1; 381 382 /* 383 * Traverse the range tree and calculate the number of space map 384 * entries that would be required to write out the range tree. 385 */ 386 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 387 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 388 entries += howmany(size, SM_RUN_MAX); 389 } 390 return (entries); 391} 392 393void 394space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 395 dmu_tx_t *tx) 396{ 397 objset_t *os = sm->sm_os; 398 spa_t *spa = dmu_objset_spa(os); 399 avl_tree_t *t = &rt->rt_root; 400 range_seg_t *rs; 401 uint64_t size, total, rt_space, nodes; 402 uint64_t *entry, *entry_map, *entry_map_end; 403 uint64_t expected_entries, actual_entries = 1; 404 405 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 406 VERIFY3U(space_map_object(sm), !=, 0); 407 dmu_buf_will_dirty(sm->sm_dbuf, tx); 408 409 /* 410 * This field is no longer necessary since the in-core space map 411 * now contains the object number but is maintained for backwards 412 * compatibility. 413 */ 414 sm->sm_phys->smp_object = sm->sm_object; 415 416 if (range_tree_is_empty(rt)) { 417 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); 418 return; 419 } 420 421 if (maptype == SM_ALLOC) 422 sm->sm_phys->smp_alloc += range_tree_space(rt); 423 else 424 sm->sm_phys->smp_alloc -= range_tree_space(rt); 425 426 expected_entries = space_map_entries(sm, rt); 427 428 entry_map = zio_buf_alloc(sm->sm_blksz); 429 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t)); 430 entry = entry_map; 431 432 *entry++ = SM_DEBUG_ENCODE(1) | 433 SM_DEBUG_ACTION_ENCODE(maptype) | 434 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) | 435 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 436 437 total = 0; 438 nodes = avl_numnodes(&rt->rt_root); 439 rt_space = range_tree_space(rt); 440 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 441 uint64_t start; 442 443 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 444 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift; 445 446 total += size << sm->sm_shift; 447 448 while (size != 0) { 449 uint64_t run_len; 450 451 run_len = MIN(size, SM_RUN_MAX); 452 453 if (entry == entry_map_end) { 454 dmu_write(os, space_map_object(sm), 455 sm->sm_phys->smp_objsize, sm->sm_blksz, 456 entry_map, tx); 457 sm->sm_phys->smp_objsize += sm->sm_blksz; 458 entry = entry_map; 459 } 460 461 *entry++ = SM_OFFSET_ENCODE(start) | 462 SM_TYPE_ENCODE(maptype) | 463 SM_RUN_ENCODE(run_len); 464 465 start += run_len; 466 size -= run_len; 467 actual_entries++; 468 } 469 } 470 471 if (entry != entry_map) { 472 size = (entry - entry_map) * sizeof (uint64_t); 473 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize, 474 size, entry_map, tx); 475 sm->sm_phys->smp_objsize += size; 476 } 477 ASSERT3U(expected_entries, ==, actual_entries); 478 479 /* 480 * Ensure that the space_map's accounting wasn't changed 481 * while we were in the middle of writing it out. 482 */ 483 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root)); 484 VERIFY3U(range_tree_space(rt), ==, rt_space); 485 VERIFY3U(range_tree_space(rt), ==, total); 486 487 zio_buf_free(entry_map, sm->sm_blksz); 488} 489 490static int 491space_map_open_impl(space_map_t *sm) 492{ 493 int error; 494 u_longlong_t blocks; 495 496 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf); 497 if (error) 498 return (error); 499 500 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks); 501 sm->sm_phys = sm->sm_dbuf->db_data; 502 return (0); 503} 504 505int 506space_map_open(space_map_t **smp, objset_t *os, uint64_t object, 507 uint64_t start, uint64_t size, uint8_t shift) 508{ 509 space_map_t *sm; 510 int error; 511 512 ASSERT(*smp == NULL); 513 ASSERT(os != NULL); 514 ASSERT(object != 0); 515 516 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP); 517 518 sm->sm_start = start; 519 sm->sm_size = size; 520 sm->sm_shift = shift; 521 sm->sm_os = os; 522 sm->sm_object = object; 523 524 error = space_map_open_impl(sm); 525 if (error != 0) { 526 space_map_close(sm); 527 return (error); 528 } 529 530 *smp = sm; 531 532 return (0); 533} 534 535void 536space_map_close(space_map_t *sm) 537{ 538 if (sm == NULL) 539 return; 540 541 if (sm->sm_dbuf != NULL) 542 dmu_buf_rele(sm->sm_dbuf, sm); 543 sm->sm_dbuf = NULL; 544 sm->sm_phys = NULL; 545 546 kmem_free(sm, sizeof (*sm)); 547} 548 549void 550space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx) 551{ 552 objset_t *os = sm->sm_os; 553 spa_t *spa = dmu_objset_spa(os); 554 dmu_object_info_t doi; 555 556 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 557 ASSERT(dmu_tx_is_syncing(tx)); 558 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa)); 559 560 dmu_object_info_from_db(sm->sm_dbuf, &doi); 561 562 /* 563 * If the space map has the wrong bonus size (because 564 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or 565 * the wrong block size (because space_map_blksz has changed), 566 * free and re-allocate its object with the updated sizes. 567 * 568 * Otherwise, just truncate the current object. 569 */ 570 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 571 doi.doi_bonus_size != sizeof (space_map_phys_t)) || 572 doi.doi_data_block_size != blocksize) { 573 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating " 574 "object[%llu]: old bonus %u, old blocksz %u", 575 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object, 576 doi.doi_bonus_size, doi.doi_data_block_size); 577 578 space_map_free(sm, tx); 579 dmu_buf_rele(sm->sm_dbuf, sm); 580 581 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx); 582 VERIFY0(space_map_open_impl(sm)); 583 } else { 584 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx)); 585 586 /* 587 * If the spacemap is reallocated, its histogram 588 * will be reset. Do the same in the common case so that 589 * bugs related to the uncommon case do not go unnoticed. 590 */ 591 bzero(sm->sm_phys->smp_histogram, 592 sizeof (sm->sm_phys->smp_histogram)); 593 } 594 595 dmu_buf_will_dirty(sm->sm_dbuf, tx); 596 sm->sm_phys->smp_objsize = 0; 597 sm->sm_phys->smp_alloc = 0; 598} 599 600/* 601 * Update the in-core space_map allocation and length values. 602 */ 603void 604space_map_update(space_map_t *sm) 605{ 606 if (sm == NULL) 607 return; 608 609 sm->sm_alloc = sm->sm_phys->smp_alloc; 610 sm->sm_length = sm->sm_phys->smp_objsize; 611} 612 613uint64_t 614space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx) 615{ 616 spa_t *spa = dmu_objset_spa(os); 617 uint64_t object; 618 int bonuslen; 619 620 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 621 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 622 bonuslen = sizeof (space_map_phys_t); 623 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 624 } else { 625 bonuslen = SPACE_MAP_SIZE_V0; 626 } 627 628 object = dmu_object_alloc(os, DMU_OT_SPACE_MAP, blocksize, 629 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx); 630 631 return (object); 632} 633 634void 635space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx) 636{ 637 spa_t *spa = dmu_objset_spa(os); 638 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 639 dmu_object_info_t doi; 640 641 VERIFY0(dmu_object_info(os, smobj, &doi)); 642 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) { 643 spa_feature_decr(spa, 644 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 645 } 646 } 647 648 VERIFY0(dmu_object_free(os, smobj, tx)); 649} 650 651void 652space_map_free(space_map_t *sm, dmu_tx_t *tx) 653{ 654 if (sm == NULL) 655 return; 656 657 space_map_free_obj(sm->sm_os, space_map_object(sm), tx); 658 sm->sm_object = 0; 659} 660 661uint64_t 662space_map_object(space_map_t *sm) 663{ 664 return (sm != NULL ? sm->sm_object : 0); 665} 666 667/* 668 * Returns the already synced, on-disk allocated space. 669 */ 670uint64_t 671space_map_allocated(space_map_t *sm) 672{ 673 return (sm != NULL ? sm->sm_alloc : 0); 674} 675 676/* 677 * Returns the already synced, on-disk length; 678 */ 679uint64_t 680space_map_length(space_map_t *sm) 681{ 682 return (sm != NULL ? sm->sm_length : 0); 683} 684 685/* 686 * Returns the allocated space that is currently syncing. 687 */ 688int64_t 689space_map_alloc_delta(space_map_t *sm) 690{ 691 if (sm == NULL) 692 return (0); 693 ASSERT(sm->sm_dbuf != NULL); 694 return (sm->sm_phys->smp_alloc - space_map_allocated(sm)); 695} 696