space_map.c revision 288571
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25/* 26 * Copyright (c) 2012, 2014 by Delphix. All rights reserved. 27 */ 28 29#include <sys/zfs_context.h> 30#include <sys/spa.h> 31#include <sys/dmu.h> 32#include <sys/dmu_tx.h> 33#include <sys/dnode.h> 34#include <sys/dsl_pool.h> 35#include <sys/zio.h> 36#include <sys/space_map.h> 37#include <sys/refcount.h> 38#include <sys/zfeature.h> 39 40SYSCTL_DECL(_vfs_zfs); 41 42/* 43 * The data for a given space map can be kept on blocks of any size. 44 * Larger blocks entail fewer i/o operations, but they also cause the 45 * DMU to keep more data in-core, and also to waste more i/o bandwidth 46 * when only a few blocks have changed since the last transaction group. 47 */ 48int space_map_blksz = (1 << 12); 49SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map_blksz, CTLFLAG_RDTUN, &space_map_blksz, 0, 50 "Maximum block size for space map. Must be power of 2 and greater than 4096."); 51 52/* 53 * Load the space map disk into the specified range tree. Segments of maptype 54 * are added to the range tree, other segment types are removed. 55 * 56 * Note: space_map_load() will drop sm_lock across dmu_read() calls. 57 * The caller must be OK with this. 58 */ 59int 60space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype) 61{ 62 uint64_t *entry, *entry_map, *entry_map_end; 63 uint64_t bufsize, size, offset, end, space; 64 int error = 0; 65 66 ASSERT(MUTEX_HELD(sm->sm_lock)); 67 68 end = space_map_length(sm); 69 space = space_map_allocated(sm); 70 71 VERIFY0(range_tree_space(rt)); 72 73 if (maptype == SM_FREE) { 74 range_tree_add(rt, sm->sm_start, sm->sm_size); 75 space = sm->sm_size - space; 76 } 77 78 bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE); 79 entry_map = zio_buf_alloc(bufsize); 80 81 mutex_exit(sm->sm_lock); 82 if (end > bufsize) { 83 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize, 84 end - bufsize, ZIO_PRIORITY_SYNC_READ); 85 } 86 mutex_enter(sm->sm_lock); 87 88 for (offset = 0; offset < end; offset += bufsize) { 89 size = MIN(end - offset, bufsize); 90 VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0); 91 VERIFY(size != 0); 92 ASSERT3U(sm->sm_blksz, !=, 0); 93 94 dprintf("object=%llu offset=%llx size=%llx\n", 95 space_map_object(sm), offset, size); 96 97 mutex_exit(sm->sm_lock); 98 error = dmu_read(sm->sm_os, space_map_object(sm), offset, size, 99 entry_map, DMU_READ_PREFETCH); 100 mutex_enter(sm->sm_lock); 101 if (error != 0) 102 break; 103 104 entry_map_end = entry_map + (size / sizeof (uint64_t)); 105 for (entry = entry_map; entry < entry_map_end; entry++) { 106 uint64_t e = *entry; 107 uint64_t offset, size; 108 109 if (SM_DEBUG_DECODE(e)) /* Skip debug entries */ 110 continue; 111 112 offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) + 113 sm->sm_start; 114 size = SM_RUN_DECODE(e) << sm->sm_shift; 115 116 VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift)); 117 VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift)); 118 VERIFY3U(offset, >=, sm->sm_start); 119 VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size); 120 if (SM_TYPE_DECODE(e) == maptype) { 121 VERIFY3U(range_tree_space(rt) + size, <=, 122 sm->sm_size); 123 range_tree_add(rt, offset, size); 124 } else { 125 range_tree_remove(rt, offset, size); 126 } 127 } 128 } 129 130 if (error == 0) 131 VERIFY3U(range_tree_space(rt), ==, space); 132 else 133 range_tree_vacate(rt, NULL, NULL); 134 135 zio_buf_free(entry_map, bufsize); 136 return (error); 137} 138 139void 140space_map_histogram_clear(space_map_t *sm) 141{ 142 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 143 return; 144 145 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram)); 146} 147 148boolean_t 149space_map_histogram_verify(space_map_t *sm, range_tree_t *rt) 150{ 151 /* 152 * Verify that the in-core range tree does not have any 153 * ranges smaller than our sm_shift size. 154 */ 155 for (int i = 0; i < sm->sm_shift; i++) { 156 if (rt->rt_histogram[i] != 0) 157 return (B_FALSE); 158 } 159 return (B_TRUE); 160} 161 162void 163space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx) 164{ 165 int idx = 0; 166 167 ASSERT(MUTEX_HELD(rt->rt_lock)); 168 ASSERT(dmu_tx_is_syncing(tx)); 169 VERIFY3U(space_map_object(sm), !=, 0); 170 171 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) 172 return; 173 174 dmu_buf_will_dirty(sm->sm_dbuf, tx); 175 176 ASSERT(space_map_histogram_verify(sm, rt)); 177 178 /* 179 * Transfer the content of the range tree histogram to the space 180 * map histogram. The space map histogram contains 32 buckets ranging 181 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree, 182 * however, can represent ranges from 2^0 to 2^63. Since the space 183 * map only cares about allocatable blocks (minimum of sm_shift) we 184 * can safely ignore all ranges in the range tree smaller than sm_shift. 185 */ 186 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) { 187 188 /* 189 * Since the largest histogram bucket in the space map is 190 * 2^(32+sm_shift-1), we need to normalize the values in 191 * the range tree for any bucket larger than that size. For 192 * example given an sm_shift of 9, ranges larger than 2^40 193 * would get normalized as if they were 1TB ranges. Assume 194 * the range tree had a count of 5 in the 2^44 (16TB) bucket, 195 * the calculation below would normalize this to 5 * 2^4 (16). 196 */ 197 ASSERT3U(i, >=, idx + sm->sm_shift); 198 sm->sm_phys->smp_histogram[idx] += 199 rt->rt_histogram[i] << (i - idx - sm->sm_shift); 200 201 /* 202 * Increment the space map's index as long as we haven't 203 * reached the maximum bucket size. Accumulate all ranges 204 * larger than the max bucket size into the last bucket. 205 */ 206 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) { 207 ASSERT3U(idx + sm->sm_shift, ==, i); 208 idx++; 209 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE); 210 } 211 } 212} 213 214uint64_t 215space_map_entries(space_map_t *sm, range_tree_t *rt) 216{ 217 avl_tree_t *t = &rt->rt_root; 218 range_seg_t *rs; 219 uint64_t size, entries; 220 221 /* 222 * All space_maps always have a debug entry so account for it here. 223 */ 224 entries = 1; 225 226 /* 227 * Traverse the range tree and calculate the number of space map 228 * entries that would be required to write out the range tree. 229 */ 230 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 231 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 232 entries += howmany(size, SM_RUN_MAX); 233 } 234 return (entries); 235} 236 237/* 238 * Note: space_map_write() will drop sm_lock across dmu_write() calls. 239 */ 240void 241space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype, 242 dmu_tx_t *tx) 243{ 244 objset_t *os = sm->sm_os; 245 spa_t *spa = dmu_objset_spa(os); 246 avl_tree_t *t = &rt->rt_root; 247 range_seg_t *rs; 248 uint64_t size, total, rt_space, nodes; 249 uint64_t *entry, *entry_map, *entry_map_end; 250 uint64_t expected_entries, actual_entries = 1; 251 252 ASSERT(MUTEX_HELD(rt->rt_lock)); 253 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 254 VERIFY3U(space_map_object(sm), !=, 0); 255 dmu_buf_will_dirty(sm->sm_dbuf, tx); 256 257 /* 258 * This field is no longer necessary since the in-core space map 259 * now contains the object number but is maintained for backwards 260 * compatibility. 261 */ 262 sm->sm_phys->smp_object = sm->sm_object; 263 264 if (range_tree_space(rt) == 0) { 265 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object); 266 return; 267 } 268 269 if (maptype == SM_ALLOC) 270 sm->sm_phys->smp_alloc += range_tree_space(rt); 271 else 272 sm->sm_phys->smp_alloc -= range_tree_space(rt); 273 274 expected_entries = space_map_entries(sm, rt); 275 276 entry_map = zio_buf_alloc(sm->sm_blksz); 277 entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t)); 278 entry = entry_map; 279 280 *entry++ = SM_DEBUG_ENCODE(1) | 281 SM_DEBUG_ACTION_ENCODE(maptype) | 282 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) | 283 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx)); 284 285 total = 0; 286 nodes = avl_numnodes(&rt->rt_root); 287 rt_space = range_tree_space(rt); 288 for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) { 289 uint64_t start; 290 291 size = (rs->rs_end - rs->rs_start) >> sm->sm_shift; 292 start = (rs->rs_start - sm->sm_start) >> sm->sm_shift; 293 294 total += size << sm->sm_shift; 295 296 while (size != 0) { 297 uint64_t run_len; 298 299 run_len = MIN(size, SM_RUN_MAX); 300 301 if (entry == entry_map_end) { 302 mutex_exit(rt->rt_lock); 303 dmu_write(os, space_map_object(sm), 304 sm->sm_phys->smp_objsize, sm->sm_blksz, 305 entry_map, tx); 306 mutex_enter(rt->rt_lock); 307 sm->sm_phys->smp_objsize += sm->sm_blksz; 308 entry = entry_map; 309 } 310 311 *entry++ = SM_OFFSET_ENCODE(start) | 312 SM_TYPE_ENCODE(maptype) | 313 SM_RUN_ENCODE(run_len); 314 315 start += run_len; 316 size -= run_len; 317 actual_entries++; 318 } 319 } 320 321 if (entry != entry_map) { 322 size = (entry - entry_map) * sizeof (uint64_t); 323 mutex_exit(rt->rt_lock); 324 dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize, 325 size, entry_map, tx); 326 mutex_enter(rt->rt_lock); 327 sm->sm_phys->smp_objsize += size; 328 } 329 ASSERT3U(expected_entries, ==, actual_entries); 330 331 /* 332 * Ensure that the space_map's accounting wasn't changed 333 * while we were in the middle of writing it out. 334 */ 335 VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root)); 336 VERIFY3U(range_tree_space(rt), ==, rt_space); 337 VERIFY3U(range_tree_space(rt), ==, total); 338 339 zio_buf_free(entry_map, sm->sm_blksz); 340} 341 342static int 343space_map_open_impl(space_map_t *sm) 344{ 345 int error; 346 u_longlong_t blocks; 347 348 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf); 349 if (error) 350 return (error); 351 352 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks); 353 sm->sm_phys = sm->sm_dbuf->db_data; 354 return (0); 355} 356 357int 358space_map_open(space_map_t **smp, objset_t *os, uint64_t object, 359 uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp) 360{ 361 space_map_t *sm; 362 int error; 363 364 ASSERT(*smp == NULL); 365 ASSERT(os != NULL); 366 ASSERT(object != 0); 367 368 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP); 369 370 sm->sm_start = start; 371 sm->sm_size = size; 372 sm->sm_shift = shift; 373 sm->sm_lock = lp; 374 sm->sm_os = os; 375 sm->sm_object = object; 376 377 error = space_map_open_impl(sm); 378 if (error != 0) { 379 space_map_close(sm); 380 return (error); 381 } 382 383 *smp = sm; 384 385 return (0); 386} 387 388void 389space_map_close(space_map_t *sm) 390{ 391 if (sm == NULL) 392 return; 393 394 if (sm->sm_dbuf != NULL) 395 dmu_buf_rele(sm->sm_dbuf, sm); 396 sm->sm_dbuf = NULL; 397 sm->sm_phys = NULL; 398 399 kmem_free(sm, sizeof (*sm)); 400} 401 402void 403space_map_truncate(space_map_t *sm, dmu_tx_t *tx) 404{ 405 objset_t *os = sm->sm_os; 406 spa_t *spa = dmu_objset_spa(os); 407 dmu_object_info_t doi; 408 409 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 410 ASSERT(dmu_tx_is_syncing(tx)); 411 412 dmu_object_info_from_db(sm->sm_dbuf, &doi); 413 414 /* 415 * If the space map has the wrong bonus size (because 416 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or 417 * the wrong block size (because space_map_blksz has changed), 418 * free and re-allocate its object with the updated sizes. 419 * 420 * Otherwise, just truncate the current object. 421 */ 422 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) && 423 doi.doi_bonus_size != sizeof (space_map_phys_t)) || 424 doi.doi_data_block_size != space_map_blksz) { 425 zfs_dbgmsg("txg %llu, spa %s, reallocating: " 426 "old bonus %u, old blocksz %u", dmu_tx_get_txg(tx), 427 spa_name(spa), doi.doi_bonus_size, doi.doi_data_block_size); 428 429 space_map_free(sm, tx); 430 dmu_buf_rele(sm->sm_dbuf, sm); 431 432 sm->sm_object = space_map_alloc(sm->sm_os, tx); 433 VERIFY0(space_map_open_impl(sm)); 434 } else { 435 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx)); 436 437 /* 438 * If the spacemap is reallocated, its histogram 439 * will be reset. Do the same in the common case so that 440 * bugs related to the uncommon case do not go unnoticed. 441 */ 442 bzero(sm->sm_phys->smp_histogram, 443 sizeof (sm->sm_phys->smp_histogram)); 444 } 445 446 dmu_buf_will_dirty(sm->sm_dbuf, tx); 447 sm->sm_phys->smp_objsize = 0; 448 sm->sm_phys->smp_alloc = 0; 449} 450 451/* 452 * Update the in-core space_map allocation and length values. 453 */ 454void 455space_map_update(space_map_t *sm) 456{ 457 if (sm == NULL) 458 return; 459 460 ASSERT(MUTEX_HELD(sm->sm_lock)); 461 462 sm->sm_alloc = sm->sm_phys->smp_alloc; 463 sm->sm_length = sm->sm_phys->smp_objsize; 464} 465 466uint64_t 467space_map_alloc(objset_t *os, dmu_tx_t *tx) 468{ 469 spa_t *spa = dmu_objset_spa(os); 470 uint64_t object; 471 int bonuslen; 472 473 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 474 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 475 bonuslen = sizeof (space_map_phys_t); 476 ASSERT3U(bonuslen, <=, dmu_bonus_max()); 477 } else { 478 bonuslen = SPACE_MAP_SIZE_V0; 479 } 480 481 object = dmu_object_alloc(os, 482 DMU_OT_SPACE_MAP, space_map_blksz, 483 DMU_OT_SPACE_MAP_HEADER, bonuslen, tx); 484 485 return (object); 486} 487 488void 489space_map_free(space_map_t *sm, dmu_tx_t *tx) 490{ 491 spa_t *spa; 492 493 if (sm == NULL) 494 return; 495 496 spa = dmu_objset_spa(sm->sm_os); 497 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) { 498 dmu_object_info_t doi; 499 500 dmu_object_info_from_db(sm->sm_dbuf, &doi); 501 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) { 502 VERIFY(spa_feature_is_active(spa, 503 SPA_FEATURE_SPACEMAP_HISTOGRAM)); 504 spa_feature_decr(spa, 505 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx); 506 } 507 } 508 509 VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0); 510 sm->sm_object = 0; 511} 512 513uint64_t 514space_map_object(space_map_t *sm) 515{ 516 return (sm != NULL ? sm->sm_object : 0); 517} 518 519/* 520 * Returns the already synced, on-disk allocated space. 521 */ 522uint64_t 523space_map_allocated(space_map_t *sm) 524{ 525 return (sm != NULL ? sm->sm_alloc : 0); 526} 527 528/* 529 * Returns the already synced, on-disk length; 530 */ 531uint64_t 532space_map_length(space_map_t *sm) 533{ 534 return (sm != NULL ? sm->sm_length : 0); 535} 536 537/* 538 * Returns the allocated space that is currently syncing. 539 */ 540int64_t 541space_map_alloc_delta(space_map_t *sm) 542{ 543 if (sm == NULL) 544 return (0); 545 ASSERT(sm->sm_dbuf != NULL); 546 return (sm->sm_phys->smp_alloc - space_map_allocated(sm)); 547} 548