dbuf.c revision 260617
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2013 by Delphix. All rights reserved. 25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 26 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 27 */ 28 29#include <sys/zfs_context.h> 30#include <sys/dmu.h> 31#include <sys/dmu_send.h> 32#include <sys/dmu_impl.h> 33#include <sys/dbuf.h> 34#include <sys/dmu_objset.h> 35#include <sys/dsl_dataset.h> 36#include <sys/dsl_dir.h> 37#include <sys/dmu_tx.h> 38#include <sys/spa.h> 39#include <sys/zio.h> 40#include <sys/dmu_zfetch.h> 41#include <sys/sa.h> 42#include <sys/sa_impl.h> 43 44/* 45 * Number of times that zfs_free_range() took the slow path while doing 46 * a zfs receive. A nonzero value indicates a potential performance problem. 47 */ 48uint64_t zfs_free_range_recv_miss; 49 50static void dbuf_destroy(dmu_buf_impl_t *db); 51static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); 52static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); 53 54/* 55 * Global data structures and functions for the dbuf cache. 56 */ 57static kmem_cache_t *dbuf_cache; 58 59/* ARGSUSED */ 60static int 61dbuf_cons(void *vdb, void *unused, int kmflag) 62{ 63 dmu_buf_impl_t *db = vdb; 64 bzero(db, sizeof (dmu_buf_impl_t)); 65 66 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL); 67 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL); 68 refcount_create(&db->db_holds); 69 return (0); 70} 71 72/* ARGSUSED */ 73static void 74dbuf_dest(void *vdb, void *unused) 75{ 76 dmu_buf_impl_t *db = vdb; 77 mutex_destroy(&db->db_mtx); 78 cv_destroy(&db->db_changed); 79 refcount_destroy(&db->db_holds); 80} 81 82/* 83 * dbuf hash table routines 84 */ 85static dbuf_hash_table_t dbuf_hash_table; 86 87static uint64_t dbuf_hash_count; 88 89static uint64_t 90dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid) 91{ 92 uintptr_t osv = (uintptr_t)os; 93 uint64_t crc = -1ULL; 94 95 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 96 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF]; 97 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF]; 98 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF]; 99 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF]; 100 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF]; 101 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF]; 102 103 crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16); 104 105 return (crc); 106} 107 108#define DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid); 109 110#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \ 111 ((dbuf)->db.db_object == (obj) && \ 112 (dbuf)->db_objset == (os) && \ 113 (dbuf)->db_level == (level) && \ 114 (dbuf)->db_blkid == (blkid)) 115 116dmu_buf_impl_t * 117dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid) 118{ 119 dbuf_hash_table_t *h = &dbuf_hash_table; 120 objset_t *os = dn->dn_objset; 121 uint64_t obj = dn->dn_object; 122 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 123 uint64_t idx = hv & h->hash_table_mask; 124 dmu_buf_impl_t *db; 125 126 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 127 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) { 128 if (DBUF_EQUAL(db, os, obj, level, blkid)) { 129 mutex_enter(&db->db_mtx); 130 if (db->db_state != DB_EVICTING) { 131 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 132 return (db); 133 } 134 mutex_exit(&db->db_mtx); 135 } 136 } 137 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 138 return (NULL); 139} 140 141/* 142 * Insert an entry into the hash table. If there is already an element 143 * equal to elem in the hash table, then the already existing element 144 * will be returned and the new element will not be inserted. 145 * Otherwise returns NULL. 146 */ 147static dmu_buf_impl_t * 148dbuf_hash_insert(dmu_buf_impl_t *db) 149{ 150 dbuf_hash_table_t *h = &dbuf_hash_table; 151 objset_t *os = db->db_objset; 152 uint64_t obj = db->db.db_object; 153 int level = db->db_level; 154 uint64_t blkid = db->db_blkid; 155 uint64_t hv = DBUF_HASH(os, obj, level, blkid); 156 uint64_t idx = hv & h->hash_table_mask; 157 dmu_buf_impl_t *dbf; 158 159 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 160 for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) { 161 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) { 162 mutex_enter(&dbf->db_mtx); 163 if (dbf->db_state != DB_EVICTING) { 164 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 165 return (dbf); 166 } 167 mutex_exit(&dbf->db_mtx); 168 } 169 } 170 171 mutex_enter(&db->db_mtx); 172 db->db_hash_next = h->hash_table[idx]; 173 h->hash_table[idx] = db; 174 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 175 atomic_add_64(&dbuf_hash_count, 1); 176 177 return (NULL); 178} 179 180/* 181 * Remove an entry from the hash table. This operation will 182 * fail if there are any existing holds on the db. 183 */ 184static void 185dbuf_hash_remove(dmu_buf_impl_t *db) 186{ 187 dbuf_hash_table_t *h = &dbuf_hash_table; 188 uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object, 189 db->db_level, db->db_blkid); 190 uint64_t idx = hv & h->hash_table_mask; 191 dmu_buf_impl_t *dbf, **dbp; 192 193 /* 194 * We musn't hold db_mtx to maintin lock ordering: 195 * DBUF_HASH_MUTEX > db_mtx. 196 */ 197 ASSERT(refcount_is_zero(&db->db_holds)); 198 ASSERT(db->db_state == DB_EVICTING); 199 ASSERT(!MUTEX_HELD(&db->db_mtx)); 200 201 mutex_enter(DBUF_HASH_MUTEX(h, idx)); 202 dbp = &h->hash_table[idx]; 203 while ((dbf = *dbp) != db) { 204 dbp = &dbf->db_hash_next; 205 ASSERT(dbf != NULL); 206 } 207 *dbp = db->db_hash_next; 208 db->db_hash_next = NULL; 209 mutex_exit(DBUF_HASH_MUTEX(h, idx)); 210 atomic_add_64(&dbuf_hash_count, -1); 211} 212 213static arc_evict_func_t dbuf_do_evict; 214 215static void 216dbuf_evict_user(dmu_buf_impl_t *db) 217{ 218 ASSERT(MUTEX_HELD(&db->db_mtx)); 219 220 if (db->db_level != 0 || db->db_evict_func == NULL) 221 return; 222 223 if (db->db_user_data_ptr_ptr) 224 *db->db_user_data_ptr_ptr = db->db.db_data; 225 db->db_evict_func(&db->db, db->db_user_ptr); 226 db->db_user_ptr = NULL; 227 db->db_user_data_ptr_ptr = NULL; 228 db->db_evict_func = NULL; 229} 230 231boolean_t 232dbuf_is_metadata(dmu_buf_impl_t *db) 233{ 234 if (db->db_level > 0) { 235 return (B_TRUE); 236 } else { 237 boolean_t is_metadata; 238 239 DB_DNODE_ENTER(db); 240 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type); 241 DB_DNODE_EXIT(db); 242 243 return (is_metadata); 244 } 245} 246 247void 248dbuf_evict(dmu_buf_impl_t *db) 249{ 250 ASSERT(MUTEX_HELD(&db->db_mtx)); 251 ASSERT(db->db_buf == NULL); 252 ASSERT(db->db_data_pending == NULL); 253 254 dbuf_clear(db); 255 dbuf_destroy(db); 256} 257 258void 259dbuf_init(void) 260{ 261 uint64_t hsize = 1ULL << 16; 262 dbuf_hash_table_t *h = &dbuf_hash_table; 263 int i; 264 265 /* 266 * The hash table is big enough to fill all of physical memory 267 * with an average 4K block size. The table will take up 268 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers). 269 */ 270 while (hsize * 4096 < (uint64_t)physmem * PAGESIZE) 271 hsize <<= 1; 272 273retry: 274 h->hash_table_mask = hsize - 1; 275 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP); 276 if (h->hash_table == NULL) { 277 /* XXX - we should really return an error instead of assert */ 278 ASSERT(hsize > (1ULL << 10)); 279 hsize >>= 1; 280 goto retry; 281 } 282 283 dbuf_cache = kmem_cache_create("dmu_buf_impl_t", 284 sizeof (dmu_buf_impl_t), 285 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0); 286 287 for (i = 0; i < DBUF_MUTEXES; i++) 288 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL); 289} 290 291void 292dbuf_fini(void) 293{ 294 dbuf_hash_table_t *h = &dbuf_hash_table; 295 int i; 296 297 for (i = 0; i < DBUF_MUTEXES; i++) 298 mutex_destroy(&h->hash_mutexes[i]); 299 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *)); 300 kmem_cache_destroy(dbuf_cache); 301} 302 303/* 304 * Other stuff. 305 */ 306 307#ifdef ZFS_DEBUG 308static void 309dbuf_verify(dmu_buf_impl_t *db) 310{ 311 dnode_t *dn; 312 dbuf_dirty_record_t *dr; 313 314 ASSERT(MUTEX_HELD(&db->db_mtx)); 315 316 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY)) 317 return; 318 319 ASSERT(db->db_objset != NULL); 320 DB_DNODE_ENTER(db); 321 dn = DB_DNODE(db); 322 if (dn == NULL) { 323 ASSERT(db->db_parent == NULL); 324 ASSERT(db->db_blkptr == NULL); 325 } else { 326 ASSERT3U(db->db.db_object, ==, dn->dn_object); 327 ASSERT3P(db->db_objset, ==, dn->dn_objset); 328 ASSERT3U(db->db_level, <, dn->dn_nlevels); 329 ASSERT(db->db_blkid == DMU_BONUS_BLKID || 330 db->db_blkid == DMU_SPILL_BLKID || 331 !list_is_empty(&dn->dn_dbufs)); 332 } 333 if (db->db_blkid == DMU_BONUS_BLKID) { 334 ASSERT(dn != NULL); 335 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 336 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID); 337 } else if (db->db_blkid == DMU_SPILL_BLKID) { 338 ASSERT(dn != NULL); 339 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 340 ASSERT0(db->db.db_offset); 341 } else { 342 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size); 343 } 344 345 for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next) 346 ASSERT(dr->dr_dbuf == db); 347 348 for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next) 349 ASSERT(dr->dr_dbuf == db); 350 351 /* 352 * We can't assert that db_size matches dn_datablksz because it 353 * can be momentarily different when another thread is doing 354 * dnode_set_blksz(). 355 */ 356 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) { 357 dr = db->db_data_pending; 358 /* 359 * It should only be modified in syncing context, so 360 * make sure we only have one copy of the data. 361 */ 362 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf); 363 } 364 365 /* verify db->db_blkptr */ 366 if (db->db_blkptr) { 367 if (db->db_parent == dn->dn_dbuf) { 368 /* db is pointed to by the dnode */ 369 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */ 370 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object)) 371 ASSERT(db->db_parent == NULL); 372 else 373 ASSERT(db->db_parent != NULL); 374 if (db->db_blkid != DMU_SPILL_BLKID) 375 ASSERT3P(db->db_blkptr, ==, 376 &dn->dn_phys->dn_blkptr[db->db_blkid]); 377 } else { 378 /* db is pointed to by an indirect block */ 379 int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT; 380 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1); 381 ASSERT3U(db->db_parent->db.db_object, ==, 382 db->db.db_object); 383 /* 384 * dnode_grow_indblksz() can make this fail if we don't 385 * have the struct_rwlock. XXX indblksz no longer 386 * grows. safe to do this now? 387 */ 388 if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 389 ASSERT3P(db->db_blkptr, ==, 390 ((blkptr_t *)db->db_parent->db.db_data + 391 db->db_blkid % epb)); 392 } 393 } 394 } 395 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) && 396 (db->db_buf == NULL || db->db_buf->b_data) && 397 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID && 398 db->db_state != DB_FILL && !dn->dn_free_txg) { 399 /* 400 * If the blkptr isn't set but they have nonzero data, 401 * it had better be dirty, otherwise we'll lose that 402 * data when we evict this buffer. 403 */ 404 if (db->db_dirtycnt == 0) { 405 uint64_t *buf = db->db.db_data; 406 int i; 407 408 for (i = 0; i < db->db.db_size >> 3; i++) { 409 ASSERT(buf[i] == 0); 410 } 411 } 412 } 413 DB_DNODE_EXIT(db); 414} 415#endif 416 417static void 418dbuf_update_data(dmu_buf_impl_t *db) 419{ 420 ASSERT(MUTEX_HELD(&db->db_mtx)); 421 if (db->db_level == 0 && db->db_user_data_ptr_ptr) { 422 ASSERT(!refcount_is_zero(&db->db_holds)); 423 *db->db_user_data_ptr_ptr = db->db.db_data; 424 } 425} 426 427static void 428dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) 429{ 430 ASSERT(MUTEX_HELD(&db->db_mtx)); 431 ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf)); 432 db->db_buf = buf; 433 if (buf != NULL) { 434 ASSERT(buf->b_data != NULL); 435 db->db.db_data = buf->b_data; 436 if (!arc_released(buf)) 437 arc_set_callback(buf, dbuf_do_evict, db); 438 dbuf_update_data(db); 439 } else { 440 dbuf_evict_user(db); 441 db->db.db_data = NULL; 442 if (db->db_state != DB_NOFILL) 443 db->db_state = DB_UNCACHED; 444 } 445} 446 447/* 448 * Loan out an arc_buf for read. Return the loaned arc_buf. 449 */ 450arc_buf_t * 451dbuf_loan_arcbuf(dmu_buf_impl_t *db) 452{ 453 arc_buf_t *abuf; 454 455 mutex_enter(&db->db_mtx); 456 if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) { 457 int blksz = db->db.db_size; 458 spa_t *spa; 459 460 mutex_exit(&db->db_mtx); 461 DB_GET_SPA(&spa, db); 462 abuf = arc_loan_buf(spa, blksz); 463 bcopy(db->db.db_data, abuf->b_data, blksz); 464 } else { 465 abuf = db->db_buf; 466 arc_loan_inuse_buf(abuf, db); 467 dbuf_set_data(db, NULL); 468 mutex_exit(&db->db_mtx); 469 } 470 return (abuf); 471} 472 473uint64_t 474dbuf_whichblock(dnode_t *dn, uint64_t offset) 475{ 476 if (dn->dn_datablkshift) { 477 return (offset >> dn->dn_datablkshift); 478 } else { 479 ASSERT3U(offset, <, dn->dn_datablksz); 480 return (0); 481 } 482} 483 484static void 485dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb) 486{ 487 dmu_buf_impl_t *db = vdb; 488 489 mutex_enter(&db->db_mtx); 490 ASSERT3U(db->db_state, ==, DB_READ); 491 /* 492 * All reads are synchronous, so we must have a hold on the dbuf 493 */ 494 ASSERT(refcount_count(&db->db_holds) > 0); 495 ASSERT(db->db_buf == NULL); 496 ASSERT(db->db.db_data == NULL); 497 if (db->db_level == 0 && db->db_freed_in_flight) { 498 /* we were freed in flight; disregard any error */ 499 arc_release(buf, db); 500 bzero(buf->b_data, db->db.db_size); 501 arc_buf_freeze(buf); 502 db->db_freed_in_flight = FALSE; 503 dbuf_set_data(db, buf); 504 db->db_state = DB_CACHED; 505 } else if (zio == NULL || zio->io_error == 0) { 506 dbuf_set_data(db, buf); 507 db->db_state = DB_CACHED; 508 } else { 509 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 510 ASSERT3P(db->db_buf, ==, NULL); 511 VERIFY(arc_buf_remove_ref(buf, db)); 512 db->db_state = DB_UNCACHED; 513 } 514 cv_broadcast(&db->db_changed); 515 dbuf_rele_and_unlock(db, NULL); 516} 517 518static void 519dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags) 520{ 521 dnode_t *dn; 522 spa_t *spa; 523 zbookmark_t zb; 524 uint32_t aflags = ARC_NOWAIT; 525 526 DB_DNODE_ENTER(db); 527 dn = DB_DNODE(db); 528 ASSERT(!refcount_is_zero(&db->db_holds)); 529 /* We need the struct_rwlock to prevent db_blkptr from changing. */ 530 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 531 ASSERT(MUTEX_HELD(&db->db_mtx)); 532 ASSERT(db->db_state == DB_UNCACHED); 533 ASSERT(db->db_buf == NULL); 534 535 if (db->db_blkid == DMU_BONUS_BLKID) { 536 int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen); 537 538 ASSERT3U(bonuslen, <=, db->db.db_size); 539 db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN); 540 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 541 if (bonuslen < DN_MAX_BONUSLEN) 542 bzero(db->db.db_data, DN_MAX_BONUSLEN); 543 if (bonuslen) 544 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen); 545 DB_DNODE_EXIT(db); 546 dbuf_update_data(db); 547 db->db_state = DB_CACHED; 548 mutex_exit(&db->db_mtx); 549 return; 550 } 551 552 /* 553 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync() 554 * processes the delete record and clears the bp while we are waiting 555 * for the dn_mtx (resulting in a "no" from block_freed). 556 */ 557 if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) || 558 (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || 559 BP_IS_HOLE(db->db_blkptr)))) { 560 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 561 562 dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa, 563 db->db.db_size, db, type)); 564 DB_DNODE_EXIT(db); 565 bzero(db->db.db_data, db->db.db_size); 566 db->db_state = DB_CACHED; 567 *flags |= DB_RF_CACHED; 568 mutex_exit(&db->db_mtx); 569 return; 570 } 571 572 spa = dn->dn_objset->os_spa; 573 DB_DNODE_EXIT(db); 574 575 db->db_state = DB_READ; 576 mutex_exit(&db->db_mtx); 577 578 if (DBUF_IS_L2CACHEABLE(db)) 579 aflags |= ARC_L2CACHE; 580 if (DBUF_IS_L2COMPRESSIBLE(db)) 581 aflags |= ARC_L2COMPRESS; 582 583 SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ? 584 db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET, 585 db->db.db_object, db->db_level, db->db_blkid); 586 587 dbuf_add_ref(db, NULL); 588 589 (void) arc_read(zio, spa, db->db_blkptr, 590 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, 591 (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED, 592 &aflags, &zb); 593 if (aflags & ARC_CACHED) 594 *flags |= DB_RF_CACHED; 595} 596 597int 598dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags) 599{ 600 int err = 0; 601 int havepzio = (zio != NULL); 602 int prefetch; 603 dnode_t *dn; 604 605 /* 606 * We don't have to hold the mutex to check db_state because it 607 * can't be freed while we have a hold on the buffer. 608 */ 609 ASSERT(!refcount_is_zero(&db->db_holds)); 610 611 if (db->db_state == DB_NOFILL) 612 return (SET_ERROR(EIO)); 613 614 DB_DNODE_ENTER(db); 615 dn = DB_DNODE(db); 616 if ((flags & DB_RF_HAVESTRUCT) == 0) 617 rw_enter(&dn->dn_struct_rwlock, RW_READER); 618 619 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 620 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL && 621 DBUF_IS_CACHEABLE(db); 622 623 mutex_enter(&db->db_mtx); 624 if (db->db_state == DB_CACHED) { 625 mutex_exit(&db->db_mtx); 626 if (prefetch) 627 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset, 628 db->db.db_size, TRUE); 629 if ((flags & DB_RF_HAVESTRUCT) == 0) 630 rw_exit(&dn->dn_struct_rwlock); 631 DB_DNODE_EXIT(db); 632 } else if (db->db_state == DB_UNCACHED) { 633 spa_t *spa = dn->dn_objset->os_spa; 634 635 if (zio == NULL) 636 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL); 637 dbuf_read_impl(db, zio, &flags); 638 639 /* dbuf_read_impl has dropped db_mtx for us */ 640 641 if (prefetch) 642 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset, 643 db->db.db_size, flags & DB_RF_CACHED); 644 645 if ((flags & DB_RF_HAVESTRUCT) == 0) 646 rw_exit(&dn->dn_struct_rwlock); 647 DB_DNODE_EXIT(db); 648 649 if (!havepzio) 650 err = zio_wait(zio); 651 } else { 652 /* 653 * Another reader came in while the dbuf was in flight 654 * between UNCACHED and CACHED. Either a writer will finish 655 * writing the buffer (sending the dbuf to CACHED) or the 656 * first reader's request will reach the read_done callback 657 * and send the dbuf to CACHED. Otherwise, a failure 658 * occurred and the dbuf went to UNCACHED. 659 */ 660 mutex_exit(&db->db_mtx); 661 if (prefetch) 662 dmu_zfetch(&dn->dn_zfetch, db->db.db_offset, 663 db->db.db_size, TRUE); 664 if ((flags & DB_RF_HAVESTRUCT) == 0) 665 rw_exit(&dn->dn_struct_rwlock); 666 DB_DNODE_EXIT(db); 667 668 /* Skip the wait per the caller's request. */ 669 mutex_enter(&db->db_mtx); 670 if ((flags & DB_RF_NEVERWAIT) == 0) { 671 while (db->db_state == DB_READ || 672 db->db_state == DB_FILL) { 673 ASSERT(db->db_state == DB_READ || 674 (flags & DB_RF_HAVESTRUCT) == 0); 675 cv_wait(&db->db_changed, &db->db_mtx); 676 } 677 if (db->db_state == DB_UNCACHED) 678 err = SET_ERROR(EIO); 679 } 680 mutex_exit(&db->db_mtx); 681 } 682 683 ASSERT(err || havepzio || db->db_state == DB_CACHED); 684 return (err); 685} 686 687static void 688dbuf_noread(dmu_buf_impl_t *db) 689{ 690 ASSERT(!refcount_is_zero(&db->db_holds)); 691 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 692 mutex_enter(&db->db_mtx); 693 while (db->db_state == DB_READ || db->db_state == DB_FILL) 694 cv_wait(&db->db_changed, &db->db_mtx); 695 if (db->db_state == DB_UNCACHED) { 696 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 697 spa_t *spa; 698 699 ASSERT(db->db_buf == NULL); 700 ASSERT(db->db.db_data == NULL); 701 DB_GET_SPA(&spa, db); 702 dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type)); 703 db->db_state = DB_FILL; 704 } else if (db->db_state == DB_NOFILL) { 705 dbuf_set_data(db, NULL); 706 } else { 707 ASSERT3U(db->db_state, ==, DB_CACHED); 708 } 709 mutex_exit(&db->db_mtx); 710} 711 712/* 713 * This is our just-in-time copy function. It makes a copy of 714 * buffers, that have been modified in a previous transaction 715 * group, before we modify them in the current active group. 716 * 717 * This function is used in two places: when we are dirtying a 718 * buffer for the first time in a txg, and when we are freeing 719 * a range in a dnode that includes this buffer. 720 * 721 * Note that when we are called from dbuf_free_range() we do 722 * not put a hold on the buffer, we just traverse the active 723 * dbuf list for the dnode. 724 */ 725static void 726dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg) 727{ 728 dbuf_dirty_record_t *dr = db->db_last_dirty; 729 730 ASSERT(MUTEX_HELD(&db->db_mtx)); 731 ASSERT(db->db.db_data != NULL); 732 ASSERT(db->db_level == 0); 733 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT); 734 735 if (dr == NULL || 736 (dr->dt.dl.dr_data != 737 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf))) 738 return; 739 740 /* 741 * If the last dirty record for this dbuf has not yet synced 742 * and its referencing the dbuf data, either: 743 * reset the reference to point to a new copy, 744 * or (if there a no active holders) 745 * just null out the current db_data pointer. 746 */ 747 ASSERT(dr->dr_txg >= txg - 2); 748 if (db->db_blkid == DMU_BONUS_BLKID) { 749 /* Note that the data bufs here are zio_bufs */ 750 dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN); 751 arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 752 bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN); 753 } else if (refcount_count(&db->db_holds) > db->db_dirtycnt) { 754 int size = db->db.db_size; 755 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 756 spa_t *spa; 757 758 DB_GET_SPA(&spa, db); 759 dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type); 760 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size); 761 } else { 762 dbuf_set_data(db, NULL); 763 } 764} 765 766void 767dbuf_unoverride(dbuf_dirty_record_t *dr) 768{ 769 dmu_buf_impl_t *db = dr->dr_dbuf; 770 blkptr_t *bp = &dr->dt.dl.dr_overridden_by; 771 uint64_t txg = dr->dr_txg; 772 773 ASSERT(MUTEX_HELD(&db->db_mtx)); 774 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC); 775 ASSERT(db->db_level == 0); 776 777 if (db->db_blkid == DMU_BONUS_BLKID || 778 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN) 779 return; 780 781 ASSERT(db->db_data_pending != dr); 782 783 /* free this block */ 784 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite) { 785 spa_t *spa; 786 787 DB_GET_SPA(&spa, db); 788 zio_free(spa, txg, bp); 789 } 790 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 791 dr->dt.dl.dr_nopwrite = B_FALSE; 792 793 /* 794 * Release the already-written buffer, so we leave it in 795 * a consistent dirty state. Note that all callers are 796 * modifying the buffer, so they will immediately do 797 * another (redundant) arc_release(). Therefore, leave 798 * the buf thawed to save the effort of freezing & 799 * immediately re-thawing it. 800 */ 801 arc_release(dr->dt.dl.dr_data, db); 802} 803 804/* 805 * Evict (if its unreferenced) or clear (if its referenced) any level-0 806 * data blocks in the free range, so that any future readers will find 807 * empty blocks. Also, if we happen across any level-1 dbufs in the 808 * range that have not already been marked dirty, mark them dirty so 809 * they stay in memory. 810 * 811 * This is a no-op if the dataset is in the middle of an incremental 812 * receive; see comment below for details. 813 */ 814void 815dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx) 816{ 817 dmu_buf_impl_t *db, *db_next; 818 uint64_t txg = tx->tx_txg; 819 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 820 uint64_t first_l1 = start >> epbs; 821 uint64_t last_l1 = end >> epbs; 822 823 if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID)) { 824 end = dn->dn_maxblkid; 825 last_l1 = end >> epbs; 826 } 827 dprintf_dnode(dn, "start=%llu end=%llu\n", start, end); 828 829 mutex_enter(&dn->dn_dbufs_mtx); 830 if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) { 831 /* There can't be any dbufs in this range; no need to search. */ 832 mutex_exit(&dn->dn_dbufs_mtx); 833 return; 834 } else if (dmu_objset_is_receiving(dn->dn_objset)) { 835 /* 836 * If we are receiving, we expect there to be no dbufs in 837 * the range to be freed, because receive modifies each 838 * block at most once, and in offset order. If this is 839 * not the case, it can lead to performance problems, 840 * so note that we unexpectedly took the slow path. 841 */ 842 atomic_inc_64(&zfs_free_range_recv_miss); 843 } 844 845 for (db = list_head(&dn->dn_dbufs); db; db = db_next) { 846 db_next = list_next(&dn->dn_dbufs, db); 847 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 848 849 if (db->db_level == 1 && 850 db->db_blkid >= first_l1 && db->db_blkid <= last_l1) { 851 mutex_enter(&db->db_mtx); 852 if (db->db_last_dirty && 853 db->db_last_dirty->dr_txg < txg) { 854 dbuf_add_ref(db, FTAG); 855 mutex_exit(&db->db_mtx); 856 dbuf_will_dirty(db, tx); 857 dbuf_rele(db, FTAG); 858 } else { 859 mutex_exit(&db->db_mtx); 860 } 861 } 862 863 if (db->db_level != 0) 864 continue; 865 dprintf_dbuf(db, "found buf %s\n", ""); 866 if (db->db_blkid < start || db->db_blkid > end) 867 continue; 868 869 /* found a level 0 buffer in the range */ 870 mutex_enter(&db->db_mtx); 871 if (dbuf_undirty(db, tx)) { 872 /* mutex has been dropped and dbuf destroyed */ 873 continue; 874 } 875 876 if (db->db_state == DB_UNCACHED || 877 db->db_state == DB_NOFILL || 878 db->db_state == DB_EVICTING) { 879 ASSERT(db->db.db_data == NULL); 880 mutex_exit(&db->db_mtx); 881 continue; 882 } 883 if (db->db_state == DB_READ || db->db_state == DB_FILL) { 884 /* will be handled in dbuf_read_done or dbuf_rele */ 885 db->db_freed_in_flight = TRUE; 886 mutex_exit(&db->db_mtx); 887 continue; 888 } 889 if (refcount_count(&db->db_holds) == 0) { 890 ASSERT(db->db_buf); 891 dbuf_clear(db); 892 continue; 893 } 894 /* The dbuf is referenced */ 895 896 if (db->db_last_dirty != NULL) { 897 dbuf_dirty_record_t *dr = db->db_last_dirty; 898 899 if (dr->dr_txg == txg) { 900 /* 901 * This buffer is "in-use", re-adjust the file 902 * size to reflect that this buffer may 903 * contain new data when we sync. 904 */ 905 if (db->db_blkid != DMU_SPILL_BLKID && 906 db->db_blkid > dn->dn_maxblkid) 907 dn->dn_maxblkid = db->db_blkid; 908 dbuf_unoverride(dr); 909 } else { 910 /* 911 * This dbuf is not dirty in the open context. 912 * Either uncache it (if its not referenced in 913 * the open context) or reset its contents to 914 * empty. 915 */ 916 dbuf_fix_old_data(db, txg); 917 } 918 } 919 /* clear the contents if its cached */ 920 if (db->db_state == DB_CACHED) { 921 ASSERT(db->db.db_data != NULL); 922 arc_release(db->db_buf, db); 923 bzero(db->db.db_data, db->db.db_size); 924 arc_buf_freeze(db->db_buf); 925 } 926 927 mutex_exit(&db->db_mtx); 928 } 929 mutex_exit(&dn->dn_dbufs_mtx); 930} 931 932static int 933dbuf_block_freeable(dmu_buf_impl_t *db) 934{ 935 dsl_dataset_t *ds = db->db_objset->os_dsl_dataset; 936 uint64_t birth_txg = 0; 937 938 /* 939 * We don't need any locking to protect db_blkptr: 940 * If it's syncing, then db_last_dirty will be set 941 * so we'll ignore db_blkptr. 942 */ 943 ASSERT(MUTEX_HELD(&db->db_mtx)); 944 if (db->db_last_dirty) 945 birth_txg = db->db_last_dirty->dr_txg; 946 else if (db->db_blkptr) 947 birth_txg = db->db_blkptr->blk_birth; 948 949 /* 950 * If we don't exist or are in a snapshot, we can't be freed. 951 * Don't pass the bp to dsl_dataset_block_freeable() since we 952 * are holding the db_mtx lock and might deadlock if we are 953 * prefetching a dedup-ed block. 954 */ 955 if (birth_txg) 956 return (ds == NULL || 957 dsl_dataset_block_freeable(ds, NULL, birth_txg)); 958 else 959 return (FALSE); 960} 961 962void 963dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx) 964{ 965 arc_buf_t *buf, *obuf; 966 int osize = db->db.db_size; 967 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 968 dnode_t *dn; 969 970 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 971 972 DB_DNODE_ENTER(db); 973 dn = DB_DNODE(db); 974 975 /* XXX does *this* func really need the lock? */ 976 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 977 978 /* 979 * This call to dbuf_will_dirty() with the dn_struct_rwlock held 980 * is OK, because there can be no other references to the db 981 * when we are changing its size, so no concurrent DB_FILL can 982 * be happening. 983 */ 984 /* 985 * XXX we should be doing a dbuf_read, checking the return 986 * value and returning that up to our callers 987 */ 988 dbuf_will_dirty(db, tx); 989 990 /* create the data buffer for the new block */ 991 buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type); 992 993 /* copy old block data to the new block */ 994 obuf = db->db_buf; 995 bcopy(obuf->b_data, buf->b_data, MIN(osize, size)); 996 /* zero the remainder */ 997 if (size > osize) 998 bzero((uint8_t *)buf->b_data + osize, size - osize); 999 1000 mutex_enter(&db->db_mtx); 1001 dbuf_set_data(db, buf); 1002 VERIFY(arc_buf_remove_ref(obuf, db)); 1003 db->db.db_size = size; 1004 1005 if (db->db_level == 0) { 1006 ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg); 1007 db->db_last_dirty->dt.dl.dr_data = buf; 1008 } 1009 mutex_exit(&db->db_mtx); 1010 1011 dnode_willuse_space(dn, size-osize, tx); 1012 DB_DNODE_EXIT(db); 1013} 1014 1015void 1016dbuf_release_bp(dmu_buf_impl_t *db) 1017{ 1018 objset_t *os; 1019 1020 DB_GET_OBJSET(&os, db); 1021 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os))); 1022 ASSERT(arc_released(os->os_phys_buf) || 1023 list_link_active(&os->os_dsl_dataset->ds_synced_link)); 1024 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf)); 1025 1026 (void) arc_release(db->db_buf, db); 1027} 1028 1029dbuf_dirty_record_t * 1030dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1031{ 1032 dnode_t *dn; 1033 objset_t *os; 1034 dbuf_dirty_record_t **drp, *dr; 1035 int drop_struct_lock = FALSE; 1036 boolean_t do_free_accounting = B_FALSE; 1037 int txgoff = tx->tx_txg & TXG_MASK; 1038 1039 ASSERT(tx->tx_txg != 0); 1040 ASSERT(!refcount_is_zero(&db->db_holds)); 1041 DMU_TX_DIRTY_BUF(tx, db); 1042 1043 DB_DNODE_ENTER(db); 1044 dn = DB_DNODE(db); 1045 /* 1046 * Shouldn't dirty a regular buffer in syncing context. Private 1047 * objects may be dirtied in syncing context, but only if they 1048 * were already pre-dirtied in open context. 1049 */ 1050 ASSERT(!dmu_tx_is_syncing(tx) || 1051 BP_IS_HOLE(dn->dn_objset->os_rootbp) || 1052 DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1053 dn->dn_objset->os_dsl_dataset == NULL); 1054 /* 1055 * We make this assert for private objects as well, but after we 1056 * check if we're already dirty. They are allowed to re-dirty 1057 * in syncing context. 1058 */ 1059 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1060 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1061 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1062 1063 mutex_enter(&db->db_mtx); 1064 /* 1065 * XXX make this true for indirects too? The problem is that 1066 * transactions created with dmu_tx_create_assigned() from 1067 * syncing context don't bother holding ahead. 1068 */ 1069 ASSERT(db->db_level != 0 || 1070 db->db_state == DB_CACHED || db->db_state == DB_FILL || 1071 db->db_state == DB_NOFILL); 1072 1073 mutex_enter(&dn->dn_mtx); 1074 /* 1075 * Don't set dirtyctx to SYNC if we're just modifying this as we 1076 * initialize the objset. 1077 */ 1078 if (dn->dn_dirtyctx == DN_UNDIRTIED && 1079 !BP_IS_HOLE(dn->dn_objset->os_rootbp)) { 1080 dn->dn_dirtyctx = 1081 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN); 1082 ASSERT(dn->dn_dirtyctx_firstset == NULL); 1083 dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP); 1084 } 1085 mutex_exit(&dn->dn_mtx); 1086 1087 if (db->db_blkid == DMU_SPILL_BLKID) 1088 dn->dn_have_spill = B_TRUE; 1089 1090 /* 1091 * If this buffer is already dirty, we're done. 1092 */ 1093 drp = &db->db_last_dirty; 1094 ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg || 1095 db->db.db_object == DMU_META_DNODE_OBJECT); 1096 while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg) 1097 drp = &dr->dr_next; 1098 if (dr && dr->dr_txg == tx->tx_txg) { 1099 DB_DNODE_EXIT(db); 1100 1101 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) { 1102 /* 1103 * If this buffer has already been written out, 1104 * we now need to reset its state. 1105 */ 1106 dbuf_unoverride(dr); 1107 if (db->db.db_object != DMU_META_DNODE_OBJECT && 1108 db->db_state != DB_NOFILL) 1109 arc_buf_thaw(db->db_buf); 1110 } 1111 mutex_exit(&db->db_mtx); 1112 return (dr); 1113 } 1114 1115 /* 1116 * Only valid if not already dirty. 1117 */ 1118 ASSERT(dn->dn_object == 0 || 1119 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx == 1120 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN)); 1121 1122 ASSERT3U(dn->dn_nlevels, >, db->db_level); 1123 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) || 1124 dn->dn_phys->dn_nlevels > db->db_level || 1125 dn->dn_next_nlevels[txgoff] > db->db_level || 1126 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level || 1127 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level); 1128 1129 /* 1130 * We should only be dirtying in syncing context if it's the 1131 * mos or we're initializing the os or it's a special object. 1132 * However, we are allowed to dirty in syncing context provided 1133 * we already dirtied it in open context. Hence we must make 1134 * this assertion only if we're not already dirty. 1135 */ 1136 os = dn->dn_objset; 1137 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) || 1138 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp)); 1139 ASSERT(db->db.db_size != 0); 1140 1141 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1142 1143 if (db->db_blkid != DMU_BONUS_BLKID) { 1144 /* 1145 * Update the accounting. 1146 * Note: we delay "free accounting" until after we drop 1147 * the db_mtx. This keeps us from grabbing other locks 1148 * (and possibly deadlocking) in bp_get_dsize() while 1149 * also holding the db_mtx. 1150 */ 1151 dnode_willuse_space(dn, db->db.db_size, tx); 1152 do_free_accounting = dbuf_block_freeable(db); 1153 } 1154 1155 /* 1156 * If this buffer is dirty in an old transaction group we need 1157 * to make a copy of it so that the changes we make in this 1158 * transaction group won't leak out when we sync the older txg. 1159 */ 1160 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP); 1161 if (db->db_level == 0) { 1162 void *data_old = db->db_buf; 1163 1164 if (db->db_state != DB_NOFILL) { 1165 if (db->db_blkid == DMU_BONUS_BLKID) { 1166 dbuf_fix_old_data(db, tx->tx_txg); 1167 data_old = db->db.db_data; 1168 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) { 1169 /* 1170 * Release the data buffer from the cache so 1171 * that we can modify it without impacting 1172 * possible other users of this cached data 1173 * block. Note that indirect blocks and 1174 * private objects are not released until the 1175 * syncing state (since they are only modified 1176 * then). 1177 */ 1178 arc_release(db->db_buf, db); 1179 dbuf_fix_old_data(db, tx->tx_txg); 1180 data_old = db->db_buf; 1181 } 1182 ASSERT(data_old != NULL); 1183 } 1184 dr->dt.dl.dr_data = data_old; 1185 } else { 1186 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL); 1187 list_create(&dr->dt.di.dr_children, 1188 sizeof (dbuf_dirty_record_t), 1189 offsetof(dbuf_dirty_record_t, dr_dirty_node)); 1190 } 1191 dr->dr_dbuf = db; 1192 dr->dr_txg = tx->tx_txg; 1193 dr->dr_next = *drp; 1194 *drp = dr; 1195 1196 /* 1197 * We could have been freed_in_flight between the dbuf_noread 1198 * and dbuf_dirty. We win, as though the dbuf_noread() had 1199 * happened after the free. 1200 */ 1201 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1202 db->db_blkid != DMU_SPILL_BLKID) { 1203 mutex_enter(&dn->dn_mtx); 1204 dnode_clear_range(dn, db->db_blkid, 1, tx); 1205 mutex_exit(&dn->dn_mtx); 1206 db->db_freed_in_flight = FALSE; 1207 } 1208 1209 /* 1210 * This buffer is now part of this txg 1211 */ 1212 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg); 1213 db->db_dirtycnt += 1; 1214 ASSERT3U(db->db_dirtycnt, <=, 3); 1215 1216 mutex_exit(&db->db_mtx); 1217 1218 if (db->db_blkid == DMU_BONUS_BLKID || 1219 db->db_blkid == DMU_SPILL_BLKID) { 1220 mutex_enter(&dn->dn_mtx); 1221 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1222 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1223 mutex_exit(&dn->dn_mtx); 1224 dnode_setdirty(dn, tx); 1225 DB_DNODE_EXIT(db); 1226 return (dr); 1227 } else if (do_free_accounting) { 1228 blkptr_t *bp = db->db_blkptr; 1229 int64_t willfree = (bp && !BP_IS_HOLE(bp)) ? 1230 bp_get_dsize(os->os_spa, bp) : db->db.db_size; 1231 /* 1232 * This is only a guess -- if the dbuf is dirty 1233 * in a previous txg, we don't know how much 1234 * space it will use on disk yet. We should 1235 * really have the struct_rwlock to access 1236 * db_blkptr, but since this is just a guess, 1237 * it's OK if we get an odd answer. 1238 */ 1239 ddt_prefetch(os->os_spa, bp); 1240 dnode_willuse_space(dn, -willfree, tx); 1241 } 1242 1243 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) { 1244 rw_enter(&dn->dn_struct_rwlock, RW_READER); 1245 drop_struct_lock = TRUE; 1246 } 1247 1248 if (db->db_level == 0) { 1249 dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock); 1250 ASSERT(dn->dn_maxblkid >= db->db_blkid); 1251 } 1252 1253 if (db->db_level+1 < dn->dn_nlevels) { 1254 dmu_buf_impl_t *parent = db->db_parent; 1255 dbuf_dirty_record_t *di; 1256 int parent_held = FALSE; 1257 1258 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) { 1259 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1260 1261 parent = dbuf_hold_level(dn, db->db_level+1, 1262 db->db_blkid >> epbs, FTAG); 1263 ASSERT(parent != NULL); 1264 parent_held = TRUE; 1265 } 1266 if (drop_struct_lock) 1267 rw_exit(&dn->dn_struct_rwlock); 1268 ASSERT3U(db->db_level+1, ==, parent->db_level); 1269 di = dbuf_dirty(parent, tx); 1270 if (parent_held) 1271 dbuf_rele(parent, FTAG); 1272 1273 mutex_enter(&db->db_mtx); 1274 /* possible race with dbuf_undirty() */ 1275 if (db->db_last_dirty == dr || 1276 dn->dn_object == DMU_META_DNODE_OBJECT) { 1277 mutex_enter(&di->dt.di.dr_mtx); 1278 ASSERT3U(di->dr_txg, ==, tx->tx_txg); 1279 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1280 list_insert_tail(&di->dt.di.dr_children, dr); 1281 mutex_exit(&di->dt.di.dr_mtx); 1282 dr->dr_parent = di; 1283 } 1284 mutex_exit(&db->db_mtx); 1285 } else { 1286 ASSERT(db->db_level+1 == dn->dn_nlevels); 1287 ASSERT(db->db_blkid < dn->dn_nblkptr); 1288 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf); 1289 mutex_enter(&dn->dn_mtx); 1290 ASSERT(!list_link_active(&dr->dr_dirty_node)); 1291 list_insert_tail(&dn->dn_dirty_records[txgoff], dr); 1292 mutex_exit(&dn->dn_mtx); 1293 if (drop_struct_lock) 1294 rw_exit(&dn->dn_struct_rwlock); 1295 } 1296 1297 dnode_setdirty(dn, tx); 1298 DB_DNODE_EXIT(db); 1299 return (dr); 1300} 1301 1302/* 1303 * Undirty a buffer in the transaction group referenced by the given 1304 * transaction. Return whether this evicted the dbuf. 1305 */ 1306static boolean_t 1307dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1308{ 1309 dnode_t *dn; 1310 uint64_t txg = tx->tx_txg; 1311 dbuf_dirty_record_t *dr, **drp; 1312 1313 ASSERT(txg != 0); 1314 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1315 ASSERT0(db->db_level); 1316 ASSERT(MUTEX_HELD(&db->db_mtx)); 1317 1318 /* 1319 * If this buffer is not dirty, we're done. 1320 */ 1321 for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) 1322 if (dr->dr_txg <= txg) 1323 break; 1324 if (dr == NULL || dr->dr_txg < txg) 1325 return (B_FALSE); 1326 ASSERT(dr->dr_txg == txg); 1327 ASSERT(dr->dr_dbuf == db); 1328 1329 DB_DNODE_ENTER(db); 1330 dn = DB_DNODE(db); 1331 1332 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size); 1333 1334 ASSERT(db->db.db_size != 0); 1335 1336 /* XXX would be nice to fix up dn_towrite_space[] */ 1337 1338 *drp = dr->dr_next; 1339 1340 /* 1341 * Note that there are three places in dbuf_dirty() 1342 * where this dirty record may be put on a list. 1343 * Make sure to do a list_remove corresponding to 1344 * every one of those list_insert calls. 1345 */ 1346 if (dr->dr_parent) { 1347 mutex_enter(&dr->dr_parent->dt.di.dr_mtx); 1348 list_remove(&dr->dr_parent->dt.di.dr_children, dr); 1349 mutex_exit(&dr->dr_parent->dt.di.dr_mtx); 1350 } else if (db->db_blkid == DMU_SPILL_BLKID || 1351 db->db_level+1 == dn->dn_nlevels) { 1352 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf); 1353 mutex_enter(&dn->dn_mtx); 1354 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr); 1355 mutex_exit(&dn->dn_mtx); 1356 } 1357 DB_DNODE_EXIT(db); 1358 1359 if (db->db_state != DB_NOFILL) { 1360 dbuf_unoverride(dr); 1361 1362 ASSERT(db->db_buf != NULL); 1363 ASSERT(dr->dt.dl.dr_data != NULL); 1364 if (dr->dt.dl.dr_data != db->db_buf) 1365 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db)); 1366 } 1367 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 1368 1369 ASSERT(db->db_dirtycnt > 0); 1370 db->db_dirtycnt -= 1; 1371 1372 if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) { 1373 arc_buf_t *buf = db->db_buf; 1374 1375 ASSERT(db->db_state == DB_NOFILL || arc_released(buf)); 1376 dbuf_set_data(db, NULL); 1377 VERIFY(arc_buf_remove_ref(buf, db)); 1378 dbuf_evict(db); 1379 return (B_TRUE); 1380 } 1381 1382 return (B_FALSE); 1383} 1384 1385#pragma weak dmu_buf_will_dirty = dbuf_will_dirty 1386void 1387dbuf_will_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) 1388{ 1389 int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH; 1390 1391 ASSERT(tx->tx_txg != 0); 1392 ASSERT(!refcount_is_zero(&db->db_holds)); 1393 1394 DB_DNODE_ENTER(db); 1395 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock)) 1396 rf |= DB_RF_HAVESTRUCT; 1397 DB_DNODE_EXIT(db); 1398 (void) dbuf_read(db, NULL, rf); 1399 (void) dbuf_dirty(db, tx); 1400} 1401 1402void 1403dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1404{ 1405 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1406 1407 db->db_state = DB_NOFILL; 1408 1409 dmu_buf_will_fill(db_fake, tx); 1410} 1411 1412void 1413dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx) 1414{ 1415 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1416 1417 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1418 ASSERT(tx->tx_txg != 0); 1419 ASSERT(db->db_level == 0); 1420 ASSERT(!refcount_is_zero(&db->db_holds)); 1421 1422 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT || 1423 dmu_tx_private_ok(tx)); 1424 1425 dbuf_noread(db); 1426 (void) dbuf_dirty(db, tx); 1427} 1428 1429#pragma weak dmu_buf_fill_done = dbuf_fill_done 1430/* ARGSUSED */ 1431void 1432dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx) 1433{ 1434 mutex_enter(&db->db_mtx); 1435 DBUF_VERIFY(db); 1436 1437 if (db->db_state == DB_FILL) { 1438 if (db->db_level == 0 && db->db_freed_in_flight) { 1439 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1440 /* we were freed while filling */ 1441 /* XXX dbuf_undirty? */ 1442 bzero(db->db.db_data, db->db.db_size); 1443 db->db_freed_in_flight = FALSE; 1444 } 1445 db->db_state = DB_CACHED; 1446 cv_broadcast(&db->db_changed); 1447 } 1448 mutex_exit(&db->db_mtx); 1449} 1450 1451/* 1452 * Directly assign a provided arc buf to a given dbuf if it's not referenced 1453 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. 1454 */ 1455void 1456dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx) 1457{ 1458 ASSERT(!refcount_is_zero(&db->db_holds)); 1459 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 1460 ASSERT(db->db_level == 0); 1461 ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA); 1462 ASSERT(buf != NULL); 1463 ASSERT(arc_buf_size(buf) == db->db.db_size); 1464 ASSERT(tx->tx_txg != 0); 1465 1466 arc_return_buf(buf, db); 1467 ASSERT(arc_released(buf)); 1468 1469 mutex_enter(&db->db_mtx); 1470 1471 while (db->db_state == DB_READ || db->db_state == DB_FILL) 1472 cv_wait(&db->db_changed, &db->db_mtx); 1473 1474 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); 1475 1476 if (db->db_state == DB_CACHED && 1477 refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) { 1478 mutex_exit(&db->db_mtx); 1479 (void) dbuf_dirty(db, tx); 1480 bcopy(buf->b_data, db->db.db_data, db->db.db_size); 1481 VERIFY(arc_buf_remove_ref(buf, db)); 1482 xuio_stat_wbuf_copied(); 1483 return; 1484 } 1485 1486 xuio_stat_wbuf_nocopy(); 1487 if (db->db_state == DB_CACHED) { 1488 dbuf_dirty_record_t *dr = db->db_last_dirty; 1489 1490 ASSERT(db->db_buf != NULL); 1491 if (dr != NULL && dr->dr_txg == tx->tx_txg) { 1492 ASSERT(dr->dt.dl.dr_data == db->db_buf); 1493 if (!arc_released(db->db_buf)) { 1494 ASSERT(dr->dt.dl.dr_override_state == 1495 DR_OVERRIDDEN); 1496 arc_release(db->db_buf, db); 1497 } 1498 dr->dt.dl.dr_data = buf; 1499 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1500 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) { 1501 arc_release(db->db_buf, db); 1502 VERIFY(arc_buf_remove_ref(db->db_buf, db)); 1503 } 1504 db->db_buf = NULL; 1505 } 1506 ASSERT(db->db_buf == NULL); 1507 dbuf_set_data(db, buf); 1508 db->db_state = DB_FILL; 1509 mutex_exit(&db->db_mtx); 1510 (void) dbuf_dirty(db, tx); 1511 dbuf_fill_done(db, tx); 1512} 1513 1514/* 1515 * "Clear" the contents of this dbuf. This will mark the dbuf 1516 * EVICTING and clear *most* of its references. Unfortunetely, 1517 * when we are not holding the dn_dbufs_mtx, we can't clear the 1518 * entry in the dn_dbufs list. We have to wait until dbuf_destroy() 1519 * in this case. For callers from the DMU we will usually see: 1520 * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() 1521 * For the arc callback, we will usually see: 1522 * dbuf_do_evict()->dbuf_clear();dbuf_destroy() 1523 * Sometimes, though, we will get a mix of these two: 1524 * DMU: dbuf_clear()->arc_buf_evict() 1525 * ARC: dbuf_do_evict()->dbuf_destroy() 1526 */ 1527void 1528dbuf_clear(dmu_buf_impl_t *db) 1529{ 1530 dnode_t *dn; 1531 dmu_buf_impl_t *parent = db->db_parent; 1532 dmu_buf_impl_t *dndb; 1533 int dbuf_gone = FALSE; 1534 1535 ASSERT(MUTEX_HELD(&db->db_mtx)); 1536 ASSERT(refcount_is_zero(&db->db_holds)); 1537 1538 dbuf_evict_user(db); 1539 1540 if (db->db_state == DB_CACHED) { 1541 ASSERT(db->db.db_data != NULL); 1542 if (db->db_blkid == DMU_BONUS_BLKID) { 1543 zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN); 1544 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 1545 } 1546 db->db.db_data = NULL; 1547 db->db_state = DB_UNCACHED; 1548 } 1549 1550 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL); 1551 ASSERT(db->db_data_pending == NULL); 1552 1553 db->db_state = DB_EVICTING; 1554 db->db_blkptr = NULL; 1555 1556 DB_DNODE_ENTER(db); 1557 dn = DB_DNODE(db); 1558 dndb = dn->dn_dbuf; 1559 if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) { 1560 list_remove(&dn->dn_dbufs, db); 1561 (void) atomic_dec_32_nv(&dn->dn_dbufs_count); 1562 membar_producer(); 1563 DB_DNODE_EXIT(db); 1564 /* 1565 * Decrementing the dbuf count means that the hold corresponding 1566 * to the removed dbuf is no longer discounted in dnode_move(), 1567 * so the dnode cannot be moved until after we release the hold. 1568 * The membar_producer() ensures visibility of the decremented 1569 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually 1570 * release any lock. 1571 */ 1572 dnode_rele(dn, db); 1573 db->db_dnode_handle = NULL; 1574 } else { 1575 DB_DNODE_EXIT(db); 1576 } 1577 1578 if (db->db_buf) 1579 dbuf_gone = arc_buf_evict(db->db_buf); 1580 1581 if (!dbuf_gone) 1582 mutex_exit(&db->db_mtx); 1583 1584 /* 1585 * If this dbuf is referenced from an indirect dbuf, 1586 * decrement the ref count on the indirect dbuf. 1587 */ 1588 if (parent && parent != dndb) 1589 dbuf_rele(parent, db); 1590} 1591 1592static int 1593dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse, 1594 dmu_buf_impl_t **parentp, blkptr_t **bpp) 1595{ 1596 int nlevels, epbs; 1597 1598 *parentp = NULL; 1599 *bpp = NULL; 1600 1601 ASSERT(blkid != DMU_BONUS_BLKID); 1602 1603 if (blkid == DMU_SPILL_BLKID) { 1604 mutex_enter(&dn->dn_mtx); 1605 if (dn->dn_have_spill && 1606 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) 1607 *bpp = &dn->dn_phys->dn_spill; 1608 else 1609 *bpp = NULL; 1610 dbuf_add_ref(dn->dn_dbuf, NULL); 1611 *parentp = dn->dn_dbuf; 1612 mutex_exit(&dn->dn_mtx); 1613 return (0); 1614 } 1615 1616 if (dn->dn_phys->dn_nlevels == 0) 1617 nlevels = 1; 1618 else 1619 nlevels = dn->dn_phys->dn_nlevels; 1620 1621 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 1622 1623 ASSERT3U(level * epbs, <, 64); 1624 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1625 if (level >= nlevels || 1626 (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) { 1627 /* the buffer has no parent yet */ 1628 return (SET_ERROR(ENOENT)); 1629 } else if (level < nlevels-1) { 1630 /* this block is referenced from an indirect block */ 1631 int err = dbuf_hold_impl(dn, level+1, 1632 blkid >> epbs, fail_sparse, NULL, parentp); 1633 if (err) 1634 return (err); 1635 err = dbuf_read(*parentp, NULL, 1636 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL)); 1637 if (err) { 1638 dbuf_rele(*parentp, NULL); 1639 *parentp = NULL; 1640 return (err); 1641 } 1642 *bpp = ((blkptr_t *)(*parentp)->db.db_data) + 1643 (blkid & ((1ULL << epbs) - 1)); 1644 return (0); 1645 } else { 1646 /* the block is referenced from the dnode */ 1647 ASSERT3U(level, ==, nlevels-1); 1648 ASSERT(dn->dn_phys->dn_nblkptr == 0 || 1649 blkid < dn->dn_phys->dn_nblkptr); 1650 if (dn->dn_dbuf) { 1651 dbuf_add_ref(dn->dn_dbuf, NULL); 1652 *parentp = dn->dn_dbuf; 1653 } 1654 *bpp = &dn->dn_phys->dn_blkptr[blkid]; 1655 return (0); 1656 } 1657} 1658 1659static dmu_buf_impl_t * 1660dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid, 1661 dmu_buf_impl_t *parent, blkptr_t *blkptr) 1662{ 1663 objset_t *os = dn->dn_objset; 1664 dmu_buf_impl_t *db, *odb; 1665 1666 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1667 ASSERT(dn->dn_type != DMU_OT_NONE); 1668 1669 db = kmem_cache_alloc(dbuf_cache, KM_SLEEP); 1670 1671 db->db_objset = os; 1672 db->db.db_object = dn->dn_object; 1673 db->db_level = level; 1674 db->db_blkid = blkid; 1675 db->db_last_dirty = NULL; 1676 db->db_dirtycnt = 0; 1677 db->db_dnode_handle = dn->dn_handle; 1678 db->db_parent = parent; 1679 db->db_blkptr = blkptr; 1680 1681 db->db_user_ptr = NULL; 1682 db->db_user_data_ptr_ptr = NULL; 1683 db->db_evict_func = NULL; 1684 db->db_immediate_evict = 0; 1685 db->db_freed_in_flight = 0; 1686 1687 if (blkid == DMU_BONUS_BLKID) { 1688 ASSERT3P(parent, ==, dn->dn_dbuf); 1689 db->db.db_size = DN_MAX_BONUSLEN - 1690 (dn->dn_nblkptr-1) * sizeof (blkptr_t); 1691 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen); 1692 db->db.db_offset = DMU_BONUS_BLKID; 1693 db->db_state = DB_UNCACHED; 1694 /* the bonus dbuf is not placed in the hash table */ 1695 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1696 return (db); 1697 } else if (blkid == DMU_SPILL_BLKID) { 1698 db->db.db_size = (blkptr != NULL) ? 1699 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE; 1700 db->db.db_offset = 0; 1701 } else { 1702 int blocksize = 1703 db->db_level ? 1<<dn->dn_indblkshift : dn->dn_datablksz; 1704 db->db.db_size = blocksize; 1705 db->db.db_offset = db->db_blkid * blocksize; 1706 } 1707 1708 /* 1709 * Hold the dn_dbufs_mtx while we get the new dbuf 1710 * in the hash table *and* added to the dbufs list. 1711 * This prevents a possible deadlock with someone 1712 * trying to look up this dbuf before its added to the 1713 * dn_dbufs list. 1714 */ 1715 mutex_enter(&dn->dn_dbufs_mtx); 1716 db->db_state = DB_EVICTING; 1717 if ((odb = dbuf_hash_insert(db)) != NULL) { 1718 /* someone else inserted it first */ 1719 kmem_cache_free(dbuf_cache, db); 1720 mutex_exit(&dn->dn_dbufs_mtx); 1721 return (odb); 1722 } 1723 list_insert_head(&dn->dn_dbufs, db); 1724 if (db->db_level == 0 && db->db_blkid >= 1725 dn->dn_unlisted_l0_blkid) 1726 dn->dn_unlisted_l0_blkid = db->db_blkid + 1; 1727 db->db_state = DB_UNCACHED; 1728 mutex_exit(&dn->dn_dbufs_mtx); 1729 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1730 1731 if (parent && parent != dn->dn_dbuf) 1732 dbuf_add_ref(parent, db); 1733 1734 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT || 1735 refcount_count(&dn->dn_holds) > 0); 1736 (void) refcount_add(&dn->dn_holds, db); 1737 (void) atomic_inc_32_nv(&dn->dn_dbufs_count); 1738 1739 dprintf_dbuf(db, "db=%p\n", db); 1740 1741 return (db); 1742} 1743 1744static int 1745dbuf_do_evict(void *private) 1746{ 1747 arc_buf_t *buf = private; 1748 dmu_buf_impl_t *db = buf->b_private; 1749 1750 if (!MUTEX_HELD(&db->db_mtx)) 1751 mutex_enter(&db->db_mtx); 1752 1753 ASSERT(refcount_is_zero(&db->db_holds)); 1754 1755 if (db->db_state != DB_EVICTING) { 1756 ASSERT(db->db_state == DB_CACHED); 1757 DBUF_VERIFY(db); 1758 db->db_buf = NULL; 1759 dbuf_evict(db); 1760 } else { 1761 mutex_exit(&db->db_mtx); 1762 dbuf_destroy(db); 1763 } 1764 return (0); 1765} 1766 1767static void 1768dbuf_destroy(dmu_buf_impl_t *db) 1769{ 1770 ASSERT(refcount_is_zero(&db->db_holds)); 1771 1772 if (db->db_blkid != DMU_BONUS_BLKID) { 1773 /* 1774 * If this dbuf is still on the dn_dbufs list, 1775 * remove it from that list. 1776 */ 1777 if (db->db_dnode_handle != NULL) { 1778 dnode_t *dn; 1779 1780 DB_DNODE_ENTER(db); 1781 dn = DB_DNODE(db); 1782 mutex_enter(&dn->dn_dbufs_mtx); 1783 list_remove(&dn->dn_dbufs, db); 1784 (void) atomic_dec_32_nv(&dn->dn_dbufs_count); 1785 mutex_exit(&dn->dn_dbufs_mtx); 1786 DB_DNODE_EXIT(db); 1787 /* 1788 * Decrementing the dbuf count means that the hold 1789 * corresponding to the removed dbuf is no longer 1790 * discounted in dnode_move(), so the dnode cannot be 1791 * moved until after we release the hold. 1792 */ 1793 dnode_rele(dn, db); 1794 db->db_dnode_handle = NULL; 1795 } 1796 dbuf_hash_remove(db); 1797 } 1798 db->db_parent = NULL; 1799 db->db_buf = NULL; 1800 1801 ASSERT(!list_link_active(&db->db_link)); 1802 ASSERT(db->db.db_data == NULL); 1803 ASSERT(db->db_hash_next == NULL); 1804 ASSERT(db->db_blkptr == NULL); 1805 ASSERT(db->db_data_pending == NULL); 1806 1807 kmem_cache_free(dbuf_cache, db); 1808 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER); 1809} 1810 1811void 1812dbuf_prefetch(dnode_t *dn, uint64_t blkid) 1813{ 1814 dmu_buf_impl_t *db = NULL; 1815 blkptr_t *bp = NULL; 1816 1817 ASSERT(blkid != DMU_BONUS_BLKID); 1818 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1819 1820 if (dnode_block_freed(dn, blkid)) 1821 return; 1822 1823 /* dbuf_find() returns with db_mtx held */ 1824 if (db = dbuf_find(dn, 0, blkid)) { 1825 /* 1826 * This dbuf is already in the cache. We assume that 1827 * it is already CACHED, or else about to be either 1828 * read or filled. 1829 */ 1830 mutex_exit(&db->db_mtx); 1831 return; 1832 } 1833 1834 if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) { 1835 if (bp && !BP_IS_HOLE(bp)) { 1836 int priority = dn->dn_type == DMU_OT_DDT_ZAP ? 1837 ZIO_PRIORITY_DDT_PREFETCH : ZIO_PRIORITY_ASYNC_READ; 1838 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 1839 uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH; 1840 zbookmark_t zb; 1841 1842 SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, 1843 dn->dn_object, 0, blkid); 1844 1845 (void) arc_read(NULL, dn->dn_objset->os_spa, 1846 bp, NULL, NULL, priority, 1847 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE, 1848 &aflags, &zb); 1849 } 1850 if (db) 1851 dbuf_rele(db, NULL); 1852 } 1853} 1854 1855/* 1856 * Returns with db_holds incremented, and db_mtx not held. 1857 * Note: dn_struct_rwlock must be held. 1858 */ 1859int 1860dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, 1861 void *tag, dmu_buf_impl_t **dbp) 1862{ 1863 dmu_buf_impl_t *db, *parent = NULL; 1864 1865 ASSERT(blkid != DMU_BONUS_BLKID); 1866 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock)); 1867 ASSERT3U(dn->dn_nlevels, >, level); 1868 1869 *dbp = NULL; 1870top: 1871 /* dbuf_find() returns with db_mtx held */ 1872 db = dbuf_find(dn, level, blkid); 1873 1874 if (db == NULL) { 1875 blkptr_t *bp = NULL; 1876 int err; 1877 1878 ASSERT3P(parent, ==, NULL); 1879 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp); 1880 if (fail_sparse) { 1881 if (err == 0 && bp && BP_IS_HOLE(bp)) 1882 err = SET_ERROR(ENOENT); 1883 if (err) { 1884 if (parent) 1885 dbuf_rele(parent, NULL); 1886 return (err); 1887 } 1888 } 1889 if (err && err != ENOENT) 1890 return (err); 1891 db = dbuf_create(dn, level, blkid, parent, bp); 1892 } 1893 1894 if (db->db_buf && refcount_is_zero(&db->db_holds)) { 1895 arc_buf_add_ref(db->db_buf, db); 1896 if (db->db_buf->b_data == NULL) { 1897 dbuf_clear(db); 1898 if (parent) { 1899 dbuf_rele(parent, NULL); 1900 parent = NULL; 1901 } 1902 goto top; 1903 } 1904 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data); 1905 } 1906 1907 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf)); 1908 1909 /* 1910 * If this buffer is currently syncing out, and we are are 1911 * still referencing it from db_data, we need to make a copy 1912 * of it in case we decide we want to dirty it again in this txg. 1913 */ 1914 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID && 1915 dn->dn_object != DMU_META_DNODE_OBJECT && 1916 db->db_state == DB_CACHED && db->db_data_pending) { 1917 dbuf_dirty_record_t *dr = db->db_data_pending; 1918 1919 if (dr->dt.dl.dr_data == db->db_buf) { 1920 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 1921 1922 dbuf_set_data(db, 1923 arc_buf_alloc(dn->dn_objset->os_spa, 1924 db->db.db_size, db, type)); 1925 bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data, 1926 db->db.db_size); 1927 } 1928 } 1929 1930 (void) refcount_add(&db->db_holds, tag); 1931 dbuf_update_data(db); 1932 DBUF_VERIFY(db); 1933 mutex_exit(&db->db_mtx); 1934 1935 /* NOTE: we can't rele the parent until after we drop the db_mtx */ 1936 if (parent) 1937 dbuf_rele(parent, NULL); 1938 1939 ASSERT3P(DB_DNODE(db), ==, dn); 1940 ASSERT3U(db->db_blkid, ==, blkid); 1941 ASSERT3U(db->db_level, ==, level); 1942 *dbp = db; 1943 1944 return (0); 1945} 1946 1947dmu_buf_impl_t * 1948dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag) 1949{ 1950 dmu_buf_impl_t *db; 1951 int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db); 1952 return (err ? NULL : db); 1953} 1954 1955dmu_buf_impl_t * 1956dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag) 1957{ 1958 dmu_buf_impl_t *db; 1959 int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db); 1960 return (err ? NULL : db); 1961} 1962 1963void 1964dbuf_create_bonus(dnode_t *dn) 1965{ 1966 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock)); 1967 1968 ASSERT(dn->dn_bonus == NULL); 1969 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL); 1970} 1971 1972int 1973dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx) 1974{ 1975 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 1976 dnode_t *dn; 1977 1978 if (db->db_blkid != DMU_SPILL_BLKID) 1979 return (SET_ERROR(ENOTSUP)); 1980 if (blksz == 0) 1981 blksz = SPA_MINBLOCKSIZE; 1982 if (blksz > SPA_MAXBLOCKSIZE) 1983 blksz = SPA_MAXBLOCKSIZE; 1984 else 1985 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE); 1986 1987 DB_DNODE_ENTER(db); 1988 dn = DB_DNODE(db); 1989 rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1990 dbuf_new_size(db, blksz, tx); 1991 rw_exit(&dn->dn_struct_rwlock); 1992 DB_DNODE_EXIT(db); 1993 1994 return (0); 1995} 1996 1997void 1998dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx) 1999{ 2000 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx); 2001} 2002 2003#pragma weak dmu_buf_add_ref = dbuf_add_ref 2004void 2005dbuf_add_ref(dmu_buf_impl_t *db, void *tag) 2006{ 2007 int64_t holds = refcount_add(&db->db_holds, tag); 2008 ASSERT(holds > 1); 2009} 2010 2011/* 2012 * If you call dbuf_rele() you had better not be referencing the dnode handle 2013 * unless you have some other direct or indirect hold on the dnode. (An indirect 2014 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) 2015 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the 2016 * dnode's parent dbuf evicting its dnode handles. 2017 */ 2018#pragma weak dmu_buf_rele = dbuf_rele 2019void 2020dbuf_rele(dmu_buf_impl_t *db, void *tag) 2021{ 2022 mutex_enter(&db->db_mtx); 2023 dbuf_rele_and_unlock(db, tag); 2024} 2025 2026/* 2027 * dbuf_rele() for an already-locked dbuf. This is necessary to allow 2028 * db_dirtycnt and db_holds to be updated atomically. 2029 */ 2030void 2031dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag) 2032{ 2033 int64_t holds; 2034 2035 ASSERT(MUTEX_HELD(&db->db_mtx)); 2036 DBUF_VERIFY(db); 2037 2038 /* 2039 * Remove the reference to the dbuf before removing its hold on the 2040 * dnode so we can guarantee in dnode_move() that a referenced bonus 2041 * buffer has a corresponding dnode hold. 2042 */ 2043 holds = refcount_remove(&db->db_holds, tag); 2044 ASSERT(holds >= 0); 2045 2046 /* 2047 * We can't freeze indirects if there is a possibility that they 2048 * may be modified in the current syncing context. 2049 */ 2050 if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) 2051 arc_buf_freeze(db->db_buf); 2052 2053 if (holds == db->db_dirtycnt && 2054 db->db_level == 0 && db->db_immediate_evict) 2055 dbuf_evict_user(db); 2056 2057 if (holds == 0) { 2058 if (db->db_blkid == DMU_BONUS_BLKID) { 2059 mutex_exit(&db->db_mtx); 2060 2061 /* 2062 * If the dnode moves here, we cannot cross this barrier 2063 * until the move completes. 2064 */ 2065 DB_DNODE_ENTER(db); 2066 (void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count); 2067 DB_DNODE_EXIT(db); 2068 /* 2069 * The bonus buffer's dnode hold is no longer discounted 2070 * in dnode_move(). The dnode cannot move until after 2071 * the dnode_rele(). 2072 */ 2073 dnode_rele(DB_DNODE(db), db); 2074 } else if (db->db_buf == NULL) { 2075 /* 2076 * This is a special case: we never associated this 2077 * dbuf with any data allocated from the ARC. 2078 */ 2079 ASSERT(db->db_state == DB_UNCACHED || 2080 db->db_state == DB_NOFILL); 2081 dbuf_evict(db); 2082 } else if (arc_released(db->db_buf)) { 2083 arc_buf_t *buf = db->db_buf; 2084 /* 2085 * This dbuf has anonymous data associated with it. 2086 */ 2087 dbuf_set_data(db, NULL); 2088 VERIFY(arc_buf_remove_ref(buf, db)); 2089 dbuf_evict(db); 2090 } else { 2091 VERIFY(!arc_buf_remove_ref(db->db_buf, db)); 2092 2093 /* 2094 * A dbuf will be eligible for eviction if either the 2095 * 'primarycache' property is set or a duplicate 2096 * copy of this buffer is already cached in the arc. 2097 * 2098 * In the case of the 'primarycache' a buffer 2099 * is considered for eviction if it matches the 2100 * criteria set in the property. 2101 * 2102 * To decide if our buffer is considered a 2103 * duplicate, we must call into the arc to determine 2104 * if multiple buffers are referencing the same 2105 * block on-disk. If so, then we simply evict 2106 * ourselves. 2107 */ 2108 if (!DBUF_IS_CACHEABLE(db) || 2109 arc_buf_eviction_needed(db->db_buf)) 2110 dbuf_clear(db); 2111 else 2112 mutex_exit(&db->db_mtx); 2113 } 2114 } else { 2115 mutex_exit(&db->db_mtx); 2116 } 2117} 2118 2119#pragma weak dmu_buf_refcount = dbuf_refcount 2120uint64_t 2121dbuf_refcount(dmu_buf_impl_t *db) 2122{ 2123 return (refcount_count(&db->db_holds)); 2124} 2125 2126void * 2127dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 2128 dmu_buf_evict_func_t *evict_func) 2129{ 2130 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 2131 user_data_ptr_ptr, evict_func)); 2132} 2133 2134void * 2135dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr, 2136 dmu_buf_evict_func_t *evict_func) 2137{ 2138 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2139 2140 db->db_immediate_evict = TRUE; 2141 return (dmu_buf_update_user(db_fake, NULL, user_ptr, 2142 user_data_ptr_ptr, evict_func)); 2143} 2144 2145void * 2146dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr, 2147 void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func) 2148{ 2149 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2150 ASSERT(db->db_level == 0); 2151 2152 ASSERT((user_ptr == NULL) == (evict_func == NULL)); 2153 2154 mutex_enter(&db->db_mtx); 2155 2156 if (db->db_user_ptr == old_user_ptr) { 2157 db->db_user_ptr = user_ptr; 2158 db->db_user_data_ptr_ptr = user_data_ptr_ptr; 2159 db->db_evict_func = evict_func; 2160 2161 dbuf_update_data(db); 2162 } else { 2163 old_user_ptr = db->db_user_ptr; 2164 } 2165 2166 mutex_exit(&db->db_mtx); 2167 return (old_user_ptr); 2168} 2169 2170void * 2171dmu_buf_get_user(dmu_buf_t *db_fake) 2172{ 2173 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2174 ASSERT(!refcount_is_zero(&db->db_holds)); 2175 2176 return (db->db_user_ptr); 2177} 2178 2179boolean_t 2180dmu_buf_freeable(dmu_buf_t *dbuf) 2181{ 2182 boolean_t res = B_FALSE; 2183 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf; 2184 2185 if (db->db_blkptr) 2186 res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset, 2187 db->db_blkptr, db->db_blkptr->blk_birth); 2188 2189 return (res); 2190} 2191 2192blkptr_t * 2193dmu_buf_get_blkptr(dmu_buf_t *db) 2194{ 2195 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db; 2196 return (dbi->db_blkptr); 2197} 2198 2199static void 2200dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db) 2201{ 2202 /* ASSERT(dmu_tx_is_syncing(tx) */ 2203 ASSERT(MUTEX_HELD(&db->db_mtx)); 2204 2205 if (db->db_blkptr != NULL) 2206 return; 2207 2208 if (db->db_blkid == DMU_SPILL_BLKID) { 2209 db->db_blkptr = &dn->dn_phys->dn_spill; 2210 BP_ZERO(db->db_blkptr); 2211 return; 2212 } 2213 if (db->db_level == dn->dn_phys->dn_nlevels-1) { 2214 /* 2215 * This buffer was allocated at a time when there was 2216 * no available blkptrs from the dnode, or it was 2217 * inappropriate to hook it in (i.e., nlevels mis-match). 2218 */ 2219 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr); 2220 ASSERT(db->db_parent == NULL); 2221 db->db_parent = dn->dn_dbuf; 2222 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid]; 2223 DBUF_VERIFY(db); 2224 } else { 2225 dmu_buf_impl_t *parent = db->db_parent; 2226 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2227 2228 ASSERT(dn->dn_phys->dn_nlevels > 1); 2229 if (parent == NULL) { 2230 mutex_exit(&db->db_mtx); 2231 rw_enter(&dn->dn_struct_rwlock, RW_READER); 2232 (void) dbuf_hold_impl(dn, db->db_level+1, 2233 db->db_blkid >> epbs, FALSE, db, &parent); 2234 rw_exit(&dn->dn_struct_rwlock); 2235 mutex_enter(&db->db_mtx); 2236 db->db_parent = parent; 2237 } 2238 db->db_blkptr = (blkptr_t *)parent->db.db_data + 2239 (db->db_blkid & ((1ULL << epbs) - 1)); 2240 DBUF_VERIFY(db); 2241 } 2242} 2243 2244static void 2245dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2246{ 2247 dmu_buf_impl_t *db = dr->dr_dbuf; 2248 dnode_t *dn; 2249 zio_t *zio; 2250 2251 ASSERT(dmu_tx_is_syncing(tx)); 2252 2253 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2254 2255 mutex_enter(&db->db_mtx); 2256 2257 ASSERT(db->db_level > 0); 2258 DBUF_VERIFY(db); 2259 2260 /* Read the block if it hasn't been read yet. */ 2261 if (db->db_buf == NULL) { 2262 mutex_exit(&db->db_mtx); 2263 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); 2264 mutex_enter(&db->db_mtx); 2265 } 2266 ASSERT3U(db->db_state, ==, DB_CACHED); 2267 ASSERT(db->db_buf != NULL); 2268 2269 DB_DNODE_ENTER(db); 2270 dn = DB_DNODE(db); 2271 /* Indirect block size must match what the dnode thinks it is. */ 2272 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2273 dbuf_check_blkptr(dn, db); 2274 DB_DNODE_EXIT(db); 2275 2276 /* Provide the pending dirty record to child dbufs */ 2277 db->db_data_pending = dr; 2278 2279 mutex_exit(&db->db_mtx); 2280 dbuf_write(dr, db->db_buf, tx); 2281 2282 zio = dr->dr_zio; 2283 mutex_enter(&dr->dt.di.dr_mtx); 2284 dbuf_sync_list(&dr->dt.di.dr_children, tx); 2285 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2286 mutex_exit(&dr->dt.di.dr_mtx); 2287 zio_nowait(zio); 2288} 2289 2290static void 2291dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx) 2292{ 2293 arc_buf_t **datap = &dr->dt.dl.dr_data; 2294 dmu_buf_impl_t *db = dr->dr_dbuf; 2295 dnode_t *dn; 2296 objset_t *os; 2297 uint64_t txg = tx->tx_txg; 2298 2299 ASSERT(dmu_tx_is_syncing(tx)); 2300 2301 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr); 2302 2303 mutex_enter(&db->db_mtx); 2304 /* 2305 * To be synced, we must be dirtied. But we 2306 * might have been freed after the dirty. 2307 */ 2308 if (db->db_state == DB_UNCACHED) { 2309 /* This buffer has been freed since it was dirtied */ 2310 ASSERT(db->db.db_data == NULL); 2311 } else if (db->db_state == DB_FILL) { 2312 /* This buffer was freed and is now being re-filled */ 2313 ASSERT(db->db.db_data != dr->dt.dl.dr_data); 2314 } else { 2315 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL); 2316 } 2317 DBUF_VERIFY(db); 2318 2319 DB_DNODE_ENTER(db); 2320 dn = DB_DNODE(db); 2321 2322 if (db->db_blkid == DMU_SPILL_BLKID) { 2323 mutex_enter(&dn->dn_mtx); 2324 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR; 2325 mutex_exit(&dn->dn_mtx); 2326 } 2327 2328 /* 2329 * If this is a bonus buffer, simply copy the bonus data into the 2330 * dnode. It will be written out when the dnode is synced (and it 2331 * will be synced, since it must have been dirty for dbuf_sync to 2332 * be called). 2333 */ 2334 if (db->db_blkid == DMU_BONUS_BLKID) { 2335 dbuf_dirty_record_t **drp; 2336 2337 ASSERT(*datap != NULL); 2338 ASSERT0(db->db_level); 2339 ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN); 2340 bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen); 2341 DB_DNODE_EXIT(db); 2342 2343 if (*datap != db->db.db_data) { 2344 zio_buf_free(*datap, DN_MAX_BONUSLEN); 2345 arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER); 2346 } 2347 db->db_data_pending = NULL; 2348 drp = &db->db_last_dirty; 2349 while (*drp != dr) 2350 drp = &(*drp)->dr_next; 2351 ASSERT(dr->dr_next == NULL); 2352 ASSERT(dr->dr_dbuf == db); 2353 *drp = dr->dr_next; 2354 if (dr->dr_dbuf->db_level != 0) { 2355 list_destroy(&dr->dt.di.dr_children); 2356 mutex_destroy(&dr->dt.di.dr_mtx); 2357 } 2358 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2359 ASSERT(db->db_dirtycnt > 0); 2360 db->db_dirtycnt -= 1; 2361 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 2362 return; 2363 } 2364 2365 os = dn->dn_objset; 2366 2367 /* 2368 * This function may have dropped the db_mtx lock allowing a dmu_sync 2369 * operation to sneak in. As a result, we need to ensure that we 2370 * don't check the dr_override_state until we have returned from 2371 * dbuf_check_blkptr. 2372 */ 2373 dbuf_check_blkptr(dn, db); 2374 2375 /* 2376 * If this buffer is in the middle of an immediate write, 2377 * wait for the synchronous IO to complete. 2378 */ 2379 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) { 2380 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); 2381 cv_wait(&db->db_changed, &db->db_mtx); 2382 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN); 2383 } 2384 2385 if (db->db_state != DB_NOFILL && 2386 dn->dn_object != DMU_META_DNODE_OBJECT && 2387 refcount_count(&db->db_holds) > 1 && 2388 dr->dt.dl.dr_override_state != DR_OVERRIDDEN && 2389 *datap == db->db_buf) { 2390 /* 2391 * If this buffer is currently "in use" (i.e., there 2392 * are active holds and db_data still references it), 2393 * then make a copy before we start the write so that 2394 * any modifications from the open txg will not leak 2395 * into this write. 2396 * 2397 * NOTE: this copy does not need to be made for 2398 * objects only modified in the syncing context (e.g. 2399 * DNONE_DNODE blocks). 2400 */ 2401 int blksz = arc_buf_size(*datap); 2402 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); 2403 *datap = arc_buf_alloc(os->os_spa, blksz, db, type); 2404 bcopy(db->db.db_data, (*datap)->b_data, blksz); 2405 } 2406 db->db_data_pending = dr; 2407 2408 mutex_exit(&db->db_mtx); 2409 2410 dbuf_write(dr, *datap, tx); 2411 2412 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2413 if (dn->dn_object == DMU_META_DNODE_OBJECT) { 2414 list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr); 2415 DB_DNODE_EXIT(db); 2416 } else { 2417 /* 2418 * Although zio_nowait() does not "wait for an IO", it does 2419 * initiate the IO. If this is an empty write it seems plausible 2420 * that the IO could actually be completed before the nowait 2421 * returns. We need to DB_DNODE_EXIT() first in case 2422 * zio_nowait() invalidates the dbuf. 2423 */ 2424 DB_DNODE_EXIT(db); 2425 zio_nowait(dr->dr_zio); 2426 } 2427} 2428 2429void 2430dbuf_sync_list(list_t *list, dmu_tx_t *tx) 2431{ 2432 dbuf_dirty_record_t *dr; 2433 2434 while (dr = list_head(list)) { 2435 if (dr->dr_zio != NULL) { 2436 /* 2437 * If we find an already initialized zio then we 2438 * are processing the meta-dnode, and we have finished. 2439 * The dbufs for all dnodes are put back on the list 2440 * during processing, so that we can zio_wait() 2441 * these IOs after initiating all child IOs. 2442 */ 2443 ASSERT3U(dr->dr_dbuf->db.db_object, ==, 2444 DMU_META_DNODE_OBJECT); 2445 break; 2446 } 2447 list_remove(list, dr); 2448 if (dr->dr_dbuf->db_level > 0) 2449 dbuf_sync_indirect(dr, tx); 2450 else 2451 dbuf_sync_leaf(dr, tx); 2452 } 2453} 2454 2455/* ARGSUSED */ 2456static void 2457dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb) 2458{ 2459 dmu_buf_impl_t *db = vdb; 2460 dnode_t *dn; 2461 blkptr_t *bp = zio->io_bp; 2462 blkptr_t *bp_orig = &zio->io_bp_orig; 2463 spa_t *spa = zio->io_spa; 2464 int64_t delta; 2465 uint64_t fill = 0; 2466 int i; 2467 2468 ASSERT(db->db_blkptr == bp); 2469 2470 DB_DNODE_ENTER(db); 2471 dn = DB_DNODE(db); 2472 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig); 2473 dnode_diduse_space(dn, delta - zio->io_prev_space_delta); 2474 zio->io_prev_space_delta = delta; 2475 2476 if (BP_IS_HOLE(bp)) { 2477 ASSERT(bp->blk_fill == 0); 2478 DB_DNODE_EXIT(db); 2479 return; 2480 } 2481 2482 ASSERT((db->db_blkid != DMU_SPILL_BLKID && 2483 BP_GET_TYPE(bp) == dn->dn_type) || 2484 (db->db_blkid == DMU_SPILL_BLKID && 2485 BP_GET_TYPE(bp) == dn->dn_bonustype)); 2486 ASSERT(BP_GET_LEVEL(bp) == db->db_level); 2487 2488 mutex_enter(&db->db_mtx); 2489 2490#ifdef ZFS_DEBUG 2491 if (db->db_blkid == DMU_SPILL_BLKID) { 2492 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 2493 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 2494 db->db_blkptr == &dn->dn_phys->dn_spill); 2495 } 2496#endif 2497 2498 if (db->db_level == 0) { 2499 mutex_enter(&dn->dn_mtx); 2500 if (db->db_blkid > dn->dn_phys->dn_maxblkid && 2501 db->db_blkid != DMU_SPILL_BLKID) 2502 dn->dn_phys->dn_maxblkid = db->db_blkid; 2503 mutex_exit(&dn->dn_mtx); 2504 2505 if (dn->dn_type == DMU_OT_DNODE) { 2506 dnode_phys_t *dnp = db->db.db_data; 2507 for (i = db->db.db_size >> DNODE_SHIFT; i > 0; 2508 i--, dnp++) { 2509 if (dnp->dn_type != DMU_OT_NONE) 2510 fill++; 2511 } 2512 } else { 2513 fill = 1; 2514 } 2515 } else { 2516 blkptr_t *ibp = db->db.db_data; 2517 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2518 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) { 2519 if (BP_IS_HOLE(ibp)) 2520 continue; 2521 fill += ibp->blk_fill; 2522 } 2523 } 2524 DB_DNODE_EXIT(db); 2525 2526 bp->blk_fill = fill; 2527 2528 mutex_exit(&db->db_mtx); 2529} 2530 2531/* ARGSUSED */ 2532static void 2533dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb) 2534{ 2535 dmu_buf_impl_t *db = vdb; 2536 blkptr_t *bp = zio->io_bp; 2537 blkptr_t *bp_orig = &zio->io_bp_orig; 2538 uint64_t txg = zio->io_txg; 2539 dbuf_dirty_record_t **drp, *dr; 2540 2541 ASSERT0(zio->io_error); 2542 ASSERT(db->db_blkptr == bp); 2543 2544 /* 2545 * For nopwrites and rewrites we ensure that the bp matches our 2546 * original and bypass all the accounting. 2547 */ 2548 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) { 2549 ASSERT(BP_EQUAL(bp, bp_orig)); 2550 } else { 2551 objset_t *os; 2552 dsl_dataset_t *ds; 2553 dmu_tx_t *tx; 2554 2555 DB_GET_OBJSET(&os, db); 2556 ds = os->os_dsl_dataset; 2557 tx = os->os_synctx; 2558 2559 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); 2560 dsl_dataset_block_born(ds, bp, tx); 2561 } 2562 2563 mutex_enter(&db->db_mtx); 2564 2565 DBUF_VERIFY(db); 2566 2567 drp = &db->db_last_dirty; 2568 while ((dr = *drp) != db->db_data_pending) 2569 drp = &dr->dr_next; 2570 ASSERT(!list_link_active(&dr->dr_dirty_node)); 2571 ASSERT(dr->dr_txg == txg); 2572 ASSERT(dr->dr_dbuf == db); 2573 ASSERT(dr->dr_next == NULL); 2574 *drp = dr->dr_next; 2575 2576#ifdef ZFS_DEBUG 2577 if (db->db_blkid == DMU_SPILL_BLKID) { 2578 dnode_t *dn; 2579 2580 DB_DNODE_ENTER(db); 2581 dn = DB_DNODE(db); 2582 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR); 2583 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) && 2584 db->db_blkptr == &dn->dn_phys->dn_spill); 2585 DB_DNODE_EXIT(db); 2586 } 2587#endif 2588 2589 if (db->db_level == 0) { 2590 ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2591 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 2592 if (db->db_state != DB_NOFILL) { 2593 if (dr->dt.dl.dr_data != db->db_buf) 2594 VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, 2595 db)); 2596 else if (!arc_released(db->db_buf)) 2597 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2598 } 2599 } else { 2600 dnode_t *dn; 2601 2602 DB_DNODE_ENTER(db); 2603 dn = DB_DNODE(db); 2604 ASSERT(list_head(&dr->dt.di.dr_children) == NULL); 2605 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift); 2606 if (!BP_IS_HOLE(db->db_blkptr)) { 2607 int epbs = 2608 dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; 2609 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, 2610 db->db.db_size); 2611 ASSERT3U(dn->dn_phys->dn_maxblkid 2612 >> (db->db_level * epbs), >=, db->db_blkid); 2613 arc_set_callback(db->db_buf, dbuf_do_evict, db); 2614 } 2615 DB_DNODE_EXIT(db); 2616 mutex_destroy(&dr->dt.di.dr_mtx); 2617 list_destroy(&dr->dt.di.dr_children); 2618 } 2619 kmem_free(dr, sizeof (dbuf_dirty_record_t)); 2620 2621 cv_broadcast(&db->db_changed); 2622 ASSERT(db->db_dirtycnt > 0); 2623 db->db_dirtycnt -= 1; 2624 db->db_data_pending = NULL; 2625 dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg); 2626} 2627 2628static void 2629dbuf_write_nofill_ready(zio_t *zio) 2630{ 2631 dbuf_write_ready(zio, NULL, zio->io_private); 2632} 2633 2634static void 2635dbuf_write_nofill_done(zio_t *zio) 2636{ 2637 dbuf_write_done(zio, NULL, zio->io_private); 2638} 2639 2640static void 2641dbuf_write_override_ready(zio_t *zio) 2642{ 2643 dbuf_dirty_record_t *dr = zio->io_private; 2644 dmu_buf_impl_t *db = dr->dr_dbuf; 2645 2646 dbuf_write_ready(zio, NULL, db); 2647} 2648 2649static void 2650dbuf_write_override_done(zio_t *zio) 2651{ 2652 dbuf_dirty_record_t *dr = zio->io_private; 2653 dmu_buf_impl_t *db = dr->dr_dbuf; 2654 blkptr_t *obp = &dr->dt.dl.dr_overridden_by; 2655 2656 mutex_enter(&db->db_mtx); 2657 if (!BP_EQUAL(zio->io_bp, obp)) { 2658 if (!BP_IS_HOLE(obp)) 2659 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp); 2660 arc_release(dr->dt.dl.dr_data, db); 2661 } 2662 mutex_exit(&db->db_mtx); 2663 2664 dbuf_write_done(zio, NULL, db); 2665} 2666 2667/* Issue I/O to commit a dirty buffer to disk. */ 2668static void 2669dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) 2670{ 2671 dmu_buf_impl_t *db = dr->dr_dbuf; 2672 dnode_t *dn; 2673 objset_t *os; 2674 dmu_buf_impl_t *parent = db->db_parent; 2675 uint64_t txg = tx->tx_txg; 2676 zbookmark_t zb; 2677 zio_prop_t zp; 2678 zio_t *zio; 2679 int wp_flag = 0; 2680 2681 DB_DNODE_ENTER(db); 2682 dn = DB_DNODE(db); 2683 os = dn->dn_objset; 2684 2685 if (db->db_state != DB_NOFILL) { 2686 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) { 2687 /* 2688 * Private object buffers are released here rather 2689 * than in dbuf_dirty() since they are only modified 2690 * in the syncing context and we don't want the 2691 * overhead of making multiple copies of the data. 2692 */ 2693 if (BP_IS_HOLE(db->db_blkptr)) { 2694 arc_buf_thaw(data); 2695 } else { 2696 dbuf_release_bp(db); 2697 } 2698 } 2699 } 2700 2701 if (parent != dn->dn_dbuf) { 2702 /* Our parent is an indirect block. */ 2703 /* We have a dirty parent that has been scheduled for write. */ 2704 ASSERT(parent && parent->db_data_pending); 2705 /* Our parent's buffer is one level closer to the dnode. */ 2706 ASSERT(db->db_level == parent->db_level-1); 2707 /* 2708 * We're about to modify our parent's db_data by modifying 2709 * our block pointer, so the parent must be released. 2710 */ 2711 ASSERT(arc_released(parent->db_buf)); 2712 zio = parent->db_data_pending->dr_zio; 2713 } else { 2714 /* Our parent is the dnode itself. */ 2715 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && 2716 db->db_blkid != DMU_SPILL_BLKID) || 2717 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); 2718 if (db->db_blkid != DMU_SPILL_BLKID) 2719 ASSERT3P(db->db_blkptr, ==, 2720 &dn->dn_phys->dn_blkptr[db->db_blkid]); 2721 zio = dn->dn_zio; 2722 } 2723 2724 ASSERT(db->db_level == 0 || data == db->db_buf); 2725 ASSERT3U(db->db_blkptr->blk_birth, <=, txg); 2726 ASSERT(zio); 2727 2728 SET_BOOKMARK(&zb, os->os_dsl_dataset ? 2729 os->os_dsl_dataset->ds_object : DMU_META_OBJSET, 2730 db->db.db_object, db->db_level, db->db_blkid); 2731 2732 if (db->db_blkid == DMU_SPILL_BLKID) 2733 wp_flag = WP_SPILL; 2734 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0; 2735 2736 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp); 2737 DB_DNODE_EXIT(db); 2738 2739 if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 2740 ASSERT(db->db_state != DB_NOFILL); 2741 dr->dr_zio = zio_write(zio, os->os_spa, txg, 2742 db->db_blkptr, data->b_data, arc_buf_size(data), &zp, 2743 dbuf_write_override_ready, dbuf_write_override_done, dr, 2744 ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); 2745 mutex_enter(&db->db_mtx); 2746 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 2747 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by, 2748 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite); 2749 mutex_exit(&db->db_mtx); 2750 } else if (db->db_state == DB_NOFILL) { 2751 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF || 2752 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY); 2753 dr->dr_zio = zio_write(zio, os->os_spa, txg, 2754 db->db_blkptr, NULL, db->db.db_size, &zp, 2755 dbuf_write_nofill_ready, dbuf_write_nofill_done, db, 2756 ZIO_PRIORITY_ASYNC_WRITE, 2757 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); 2758 } else { 2759 ASSERT(arc_released(data)); 2760 dr->dr_zio = arc_write(zio, os->os_spa, txg, 2761 db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), 2762 DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready, 2763 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, 2764 ZIO_FLAG_MUSTSUCCEED, &zb); 2765 } 2766} 2767