dbuf.c revision 268649
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
27 */
28
29#include <sys/zfs_context.h>
30#include <sys/dmu.h>
31#include <sys/dmu_send.h>
32#include <sys/dmu_impl.h>
33#include <sys/dbuf.h>
34#include <sys/dmu_objset.h>
35#include <sys/dsl_dataset.h>
36#include <sys/dsl_dir.h>
37#include <sys/dmu_tx.h>
38#include <sys/spa.h>
39#include <sys/zio.h>
40#include <sys/dmu_zfetch.h>
41#include <sys/sa.h>
42#include <sys/sa_impl.h>
43#include <sys/zfeature.h>
44#include <sys/blkptr.h>
45#include <sys/range_tree.h>
46
47/*
48 * Number of times that zfs_free_range() took the slow path while doing
49 * a zfs receive.  A nonzero value indicates a potential performance problem.
50 */
51uint64_t zfs_free_range_recv_miss;
52
53static void dbuf_destroy(dmu_buf_impl_t *db);
54static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
55static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
56
57/*
58 * Global data structures and functions for the dbuf cache.
59 */
60static kmem_cache_t *dbuf_cache;
61
62/* ARGSUSED */
63static int
64dbuf_cons(void *vdb, void *unused, int kmflag)
65{
66	dmu_buf_impl_t *db = vdb;
67	bzero(db, sizeof (dmu_buf_impl_t));
68
69	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
70	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
71	refcount_create(&db->db_holds);
72	return (0);
73}
74
75/* ARGSUSED */
76static void
77dbuf_dest(void *vdb, void *unused)
78{
79	dmu_buf_impl_t *db = vdb;
80	mutex_destroy(&db->db_mtx);
81	cv_destroy(&db->db_changed);
82	refcount_destroy(&db->db_holds);
83}
84
85/*
86 * dbuf hash table routines
87 */
88static dbuf_hash_table_t dbuf_hash_table;
89
90static uint64_t dbuf_hash_count;
91
92static uint64_t
93dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
94{
95	uintptr_t osv = (uintptr_t)os;
96	uint64_t crc = -1ULL;
97
98	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
99	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
100	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
101	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
102	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
103	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
104	crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
105
106	crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
107
108	return (crc);
109}
110
111#define	DBUF_HASH(os, obj, level, blkid) dbuf_hash(os, obj, level, blkid);
112
113#define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
114	((dbuf)->db.db_object == (obj) &&		\
115	(dbuf)->db_objset == (os) &&			\
116	(dbuf)->db_level == (level) &&			\
117	(dbuf)->db_blkid == (blkid))
118
119dmu_buf_impl_t *
120dbuf_find(dnode_t *dn, uint8_t level, uint64_t blkid)
121{
122	dbuf_hash_table_t *h = &dbuf_hash_table;
123	objset_t *os = dn->dn_objset;
124	uint64_t obj = dn->dn_object;
125	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
126	uint64_t idx = hv & h->hash_table_mask;
127	dmu_buf_impl_t *db;
128
129	mutex_enter(DBUF_HASH_MUTEX(h, idx));
130	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
131		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
132			mutex_enter(&db->db_mtx);
133			if (db->db_state != DB_EVICTING) {
134				mutex_exit(DBUF_HASH_MUTEX(h, idx));
135				return (db);
136			}
137			mutex_exit(&db->db_mtx);
138		}
139	}
140	mutex_exit(DBUF_HASH_MUTEX(h, idx));
141	return (NULL);
142}
143
144/*
145 * Insert an entry into the hash table.  If there is already an element
146 * equal to elem in the hash table, then the already existing element
147 * will be returned and the new element will not be inserted.
148 * Otherwise returns NULL.
149 */
150static dmu_buf_impl_t *
151dbuf_hash_insert(dmu_buf_impl_t *db)
152{
153	dbuf_hash_table_t *h = &dbuf_hash_table;
154	objset_t *os = db->db_objset;
155	uint64_t obj = db->db.db_object;
156	int level = db->db_level;
157	uint64_t blkid = db->db_blkid;
158	uint64_t hv = DBUF_HASH(os, obj, level, blkid);
159	uint64_t idx = hv & h->hash_table_mask;
160	dmu_buf_impl_t *dbf;
161
162	mutex_enter(DBUF_HASH_MUTEX(h, idx));
163	for (dbf = h->hash_table[idx]; dbf != NULL; dbf = dbf->db_hash_next) {
164		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
165			mutex_enter(&dbf->db_mtx);
166			if (dbf->db_state != DB_EVICTING) {
167				mutex_exit(DBUF_HASH_MUTEX(h, idx));
168				return (dbf);
169			}
170			mutex_exit(&dbf->db_mtx);
171		}
172	}
173
174	mutex_enter(&db->db_mtx);
175	db->db_hash_next = h->hash_table[idx];
176	h->hash_table[idx] = db;
177	mutex_exit(DBUF_HASH_MUTEX(h, idx));
178	atomic_add_64(&dbuf_hash_count, 1);
179
180	return (NULL);
181}
182
183/*
184 * Remove an entry from the hash table.  This operation will
185 * fail if there are any existing holds on the db.
186 */
187static void
188dbuf_hash_remove(dmu_buf_impl_t *db)
189{
190	dbuf_hash_table_t *h = &dbuf_hash_table;
191	uint64_t hv = DBUF_HASH(db->db_objset, db->db.db_object,
192	    db->db_level, db->db_blkid);
193	uint64_t idx = hv & h->hash_table_mask;
194	dmu_buf_impl_t *dbf, **dbp;
195
196	/*
197	 * We musn't hold db_mtx to maintin lock ordering:
198	 * DBUF_HASH_MUTEX > db_mtx.
199	 */
200	ASSERT(refcount_is_zero(&db->db_holds));
201	ASSERT(db->db_state == DB_EVICTING);
202	ASSERT(!MUTEX_HELD(&db->db_mtx));
203
204	mutex_enter(DBUF_HASH_MUTEX(h, idx));
205	dbp = &h->hash_table[idx];
206	while ((dbf = *dbp) != db) {
207		dbp = &dbf->db_hash_next;
208		ASSERT(dbf != NULL);
209	}
210	*dbp = db->db_hash_next;
211	db->db_hash_next = NULL;
212	mutex_exit(DBUF_HASH_MUTEX(h, idx));
213	atomic_add_64(&dbuf_hash_count, -1);
214}
215
216static arc_evict_func_t dbuf_do_evict;
217
218static void
219dbuf_evict_user(dmu_buf_impl_t *db)
220{
221	ASSERT(MUTEX_HELD(&db->db_mtx));
222
223	if (db->db_level != 0 || db->db_evict_func == NULL)
224		return;
225
226	if (db->db_user_data_ptr_ptr)
227		*db->db_user_data_ptr_ptr = db->db.db_data;
228	db->db_evict_func(&db->db, db->db_user_ptr);
229	db->db_user_ptr = NULL;
230	db->db_user_data_ptr_ptr = NULL;
231	db->db_evict_func = NULL;
232}
233
234boolean_t
235dbuf_is_metadata(dmu_buf_impl_t *db)
236{
237	if (db->db_level > 0) {
238		return (B_TRUE);
239	} else {
240		boolean_t is_metadata;
241
242		DB_DNODE_ENTER(db);
243		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
244		DB_DNODE_EXIT(db);
245
246		return (is_metadata);
247	}
248}
249
250void
251dbuf_evict(dmu_buf_impl_t *db)
252{
253	ASSERT(MUTEX_HELD(&db->db_mtx));
254	ASSERT(db->db_buf == NULL);
255	ASSERT(db->db_data_pending == NULL);
256
257	dbuf_clear(db);
258	dbuf_destroy(db);
259}
260
261void
262dbuf_init(void)
263{
264	uint64_t hsize = 1ULL << 16;
265	dbuf_hash_table_t *h = &dbuf_hash_table;
266	int i;
267
268	/*
269	 * The hash table is big enough to fill all of physical memory
270	 * with an average 4K block size.  The table will take up
271	 * totalmem*sizeof(void*)/4K (i.e. 2MB/GB with 8-byte pointers).
272	 */
273	while (hsize * 4096 < (uint64_t)physmem * PAGESIZE)
274		hsize <<= 1;
275
276retry:
277	h->hash_table_mask = hsize - 1;
278	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
279	if (h->hash_table == NULL) {
280		/* XXX - we should really return an error instead of assert */
281		ASSERT(hsize > (1ULL << 10));
282		hsize >>= 1;
283		goto retry;
284	}
285
286	dbuf_cache = kmem_cache_create("dmu_buf_impl_t",
287	    sizeof (dmu_buf_impl_t),
288	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
289
290	for (i = 0; i < DBUF_MUTEXES; i++)
291		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
292}
293
294void
295dbuf_fini(void)
296{
297	dbuf_hash_table_t *h = &dbuf_hash_table;
298	int i;
299
300	for (i = 0; i < DBUF_MUTEXES; i++)
301		mutex_destroy(&h->hash_mutexes[i]);
302	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
303	kmem_cache_destroy(dbuf_cache);
304}
305
306/*
307 * Other stuff.
308 */
309
310#ifdef ZFS_DEBUG
311static void
312dbuf_verify(dmu_buf_impl_t *db)
313{
314	dnode_t *dn;
315	dbuf_dirty_record_t *dr;
316
317	ASSERT(MUTEX_HELD(&db->db_mtx));
318
319	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
320		return;
321
322	ASSERT(db->db_objset != NULL);
323	DB_DNODE_ENTER(db);
324	dn = DB_DNODE(db);
325	if (dn == NULL) {
326		ASSERT(db->db_parent == NULL);
327		ASSERT(db->db_blkptr == NULL);
328	} else {
329		ASSERT3U(db->db.db_object, ==, dn->dn_object);
330		ASSERT3P(db->db_objset, ==, dn->dn_objset);
331		ASSERT3U(db->db_level, <, dn->dn_nlevels);
332		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
333		    db->db_blkid == DMU_SPILL_BLKID ||
334		    !list_is_empty(&dn->dn_dbufs));
335	}
336	if (db->db_blkid == DMU_BONUS_BLKID) {
337		ASSERT(dn != NULL);
338		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
339		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
340	} else if (db->db_blkid == DMU_SPILL_BLKID) {
341		ASSERT(dn != NULL);
342		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
343		ASSERT0(db->db.db_offset);
344	} else {
345		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
346	}
347
348	for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
349		ASSERT(dr->dr_dbuf == db);
350
351	for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
352		ASSERT(dr->dr_dbuf == db);
353
354	/*
355	 * We can't assert that db_size matches dn_datablksz because it
356	 * can be momentarily different when another thread is doing
357	 * dnode_set_blksz().
358	 */
359	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
360		dr = db->db_data_pending;
361		/*
362		 * It should only be modified in syncing context, so
363		 * make sure we only have one copy of the data.
364		 */
365		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
366	}
367
368	/* verify db->db_blkptr */
369	if (db->db_blkptr) {
370		if (db->db_parent == dn->dn_dbuf) {
371			/* db is pointed to by the dnode */
372			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
373			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
374				ASSERT(db->db_parent == NULL);
375			else
376				ASSERT(db->db_parent != NULL);
377			if (db->db_blkid != DMU_SPILL_BLKID)
378				ASSERT3P(db->db_blkptr, ==,
379				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
380		} else {
381			/* db is pointed to by an indirect block */
382			int epb = db->db_parent->db.db_size >> SPA_BLKPTRSHIFT;
383			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
384			ASSERT3U(db->db_parent->db.db_object, ==,
385			    db->db.db_object);
386			/*
387			 * dnode_grow_indblksz() can make this fail if we don't
388			 * have the struct_rwlock.  XXX indblksz no longer
389			 * grows.  safe to do this now?
390			 */
391			if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
392				ASSERT3P(db->db_blkptr, ==,
393				    ((blkptr_t *)db->db_parent->db.db_data +
394				    db->db_blkid % epb));
395			}
396		}
397	}
398	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
399	    (db->db_buf == NULL || db->db_buf->b_data) &&
400	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
401	    db->db_state != DB_FILL && !dn->dn_free_txg) {
402		/*
403		 * If the blkptr isn't set but they have nonzero data,
404		 * it had better be dirty, otherwise we'll lose that
405		 * data when we evict this buffer.
406		 */
407		if (db->db_dirtycnt == 0) {
408			uint64_t *buf = db->db.db_data;
409			int i;
410
411			for (i = 0; i < db->db.db_size >> 3; i++) {
412				ASSERT(buf[i] == 0);
413			}
414		}
415	}
416	DB_DNODE_EXIT(db);
417}
418#endif
419
420static void
421dbuf_update_data(dmu_buf_impl_t *db)
422{
423	ASSERT(MUTEX_HELD(&db->db_mtx));
424	if (db->db_level == 0 && db->db_user_data_ptr_ptr) {
425		ASSERT(!refcount_is_zero(&db->db_holds));
426		*db->db_user_data_ptr_ptr = db->db.db_data;
427	}
428}
429
430static void
431dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
432{
433	ASSERT(MUTEX_HELD(&db->db_mtx));
434	ASSERT(db->db_buf == NULL || !arc_has_callback(db->db_buf));
435	db->db_buf = buf;
436	if (buf != NULL) {
437		ASSERT(buf->b_data != NULL);
438		db->db.db_data = buf->b_data;
439		if (!arc_released(buf))
440			arc_set_callback(buf, dbuf_do_evict, db);
441		dbuf_update_data(db);
442	} else {
443		dbuf_evict_user(db);
444		db->db.db_data = NULL;
445		if (db->db_state != DB_NOFILL)
446			db->db_state = DB_UNCACHED;
447	}
448}
449
450/*
451 * Loan out an arc_buf for read.  Return the loaned arc_buf.
452 */
453arc_buf_t *
454dbuf_loan_arcbuf(dmu_buf_impl_t *db)
455{
456	arc_buf_t *abuf;
457
458	mutex_enter(&db->db_mtx);
459	if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
460		int blksz = db->db.db_size;
461		spa_t *spa = db->db_objset->os_spa;
462
463		mutex_exit(&db->db_mtx);
464		abuf = arc_loan_buf(spa, blksz);
465		bcopy(db->db.db_data, abuf->b_data, blksz);
466	} else {
467		abuf = db->db_buf;
468		arc_loan_inuse_buf(abuf, db);
469		dbuf_set_data(db, NULL);
470		mutex_exit(&db->db_mtx);
471	}
472	return (abuf);
473}
474
475uint64_t
476dbuf_whichblock(dnode_t *dn, uint64_t offset)
477{
478	if (dn->dn_datablkshift) {
479		return (offset >> dn->dn_datablkshift);
480	} else {
481		ASSERT3U(offset, <, dn->dn_datablksz);
482		return (0);
483	}
484}
485
486static void
487dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
488{
489	dmu_buf_impl_t *db = vdb;
490
491	mutex_enter(&db->db_mtx);
492	ASSERT3U(db->db_state, ==, DB_READ);
493	/*
494	 * All reads are synchronous, so we must have a hold on the dbuf
495	 */
496	ASSERT(refcount_count(&db->db_holds) > 0);
497	ASSERT(db->db_buf == NULL);
498	ASSERT(db->db.db_data == NULL);
499	if (db->db_level == 0 && db->db_freed_in_flight) {
500		/* we were freed in flight; disregard any error */
501		arc_release(buf, db);
502		bzero(buf->b_data, db->db.db_size);
503		arc_buf_freeze(buf);
504		db->db_freed_in_flight = FALSE;
505		dbuf_set_data(db, buf);
506		db->db_state = DB_CACHED;
507	} else if (zio == NULL || zio->io_error == 0) {
508		dbuf_set_data(db, buf);
509		db->db_state = DB_CACHED;
510	} else {
511		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
512		ASSERT3P(db->db_buf, ==, NULL);
513		VERIFY(arc_buf_remove_ref(buf, db));
514		db->db_state = DB_UNCACHED;
515	}
516	cv_broadcast(&db->db_changed);
517	dbuf_rele_and_unlock(db, NULL);
518}
519
520static void
521dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t *flags)
522{
523	dnode_t *dn;
524	zbookmark_t zb;
525	uint32_t aflags = ARC_NOWAIT;
526
527	DB_DNODE_ENTER(db);
528	dn = DB_DNODE(db);
529	ASSERT(!refcount_is_zero(&db->db_holds));
530	/* We need the struct_rwlock to prevent db_blkptr from changing. */
531	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
532	ASSERT(MUTEX_HELD(&db->db_mtx));
533	ASSERT(db->db_state == DB_UNCACHED);
534	ASSERT(db->db_buf == NULL);
535
536	if (db->db_blkid == DMU_BONUS_BLKID) {
537		int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
538
539		ASSERT3U(bonuslen, <=, db->db.db_size);
540		db->db.db_data = zio_buf_alloc(DN_MAX_BONUSLEN);
541		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
542		if (bonuslen < DN_MAX_BONUSLEN)
543			bzero(db->db.db_data, DN_MAX_BONUSLEN);
544		if (bonuslen)
545			bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
546		DB_DNODE_EXIT(db);
547		dbuf_update_data(db);
548		db->db_state = DB_CACHED;
549		mutex_exit(&db->db_mtx);
550		return;
551	}
552
553	/*
554	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
555	 * processes the delete record and clears the bp while we are waiting
556	 * for the dn_mtx (resulting in a "no" from block_freed).
557	 */
558	if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
559	    (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
560	    BP_IS_HOLE(db->db_blkptr)))) {
561		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
562
563		DB_DNODE_EXIT(db);
564		dbuf_set_data(db, arc_buf_alloc(db->db_objset->os_spa,
565		    db->db.db_size, db, type));
566		bzero(db->db.db_data, db->db.db_size);
567		db->db_state = DB_CACHED;
568		*flags |= DB_RF_CACHED;
569		mutex_exit(&db->db_mtx);
570		return;
571	}
572
573	DB_DNODE_EXIT(db);
574
575	db->db_state = DB_READ;
576	mutex_exit(&db->db_mtx);
577
578	if (DBUF_IS_L2CACHEABLE(db))
579		aflags |= ARC_L2CACHE;
580	if (DBUF_IS_L2COMPRESSIBLE(db))
581		aflags |= ARC_L2COMPRESS;
582
583	SET_BOOKMARK(&zb, db->db_objset->os_dsl_dataset ?
584	    db->db_objset->os_dsl_dataset->ds_object : DMU_META_OBJSET,
585	    db->db.db_object, db->db_level, db->db_blkid);
586
587	dbuf_add_ref(db, NULL);
588
589	(void) arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
590	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ,
591	    (*flags & DB_RF_CANFAIL) ? ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED,
592	    &aflags, &zb);
593	if (aflags & ARC_CACHED)
594		*flags |= DB_RF_CACHED;
595}
596
597int
598dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
599{
600	int err = 0;
601	boolean_t havepzio = (zio != NULL);
602	boolean_t prefetch;
603	dnode_t *dn;
604
605	/*
606	 * We don't have to hold the mutex to check db_state because it
607	 * can't be freed while we have a hold on the buffer.
608	 */
609	ASSERT(!refcount_is_zero(&db->db_holds));
610
611	if (db->db_state == DB_NOFILL)
612		return (SET_ERROR(EIO));
613
614	DB_DNODE_ENTER(db);
615	dn = DB_DNODE(db);
616	if ((flags & DB_RF_HAVESTRUCT) == 0)
617		rw_enter(&dn->dn_struct_rwlock, RW_READER);
618
619	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
620	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
621	    DBUF_IS_CACHEABLE(db);
622
623	mutex_enter(&db->db_mtx);
624	if (db->db_state == DB_CACHED) {
625		mutex_exit(&db->db_mtx);
626		if (prefetch)
627			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
628			    db->db.db_size, TRUE);
629		if ((flags & DB_RF_HAVESTRUCT) == 0)
630			rw_exit(&dn->dn_struct_rwlock);
631		DB_DNODE_EXIT(db);
632	} else if (db->db_state == DB_UNCACHED) {
633		spa_t *spa = dn->dn_objset->os_spa;
634
635		if (zio == NULL)
636			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
637		dbuf_read_impl(db, zio, &flags);
638
639		/* dbuf_read_impl has dropped db_mtx for us */
640
641		if (prefetch)
642			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
643			    db->db.db_size, flags & DB_RF_CACHED);
644
645		if ((flags & DB_RF_HAVESTRUCT) == 0)
646			rw_exit(&dn->dn_struct_rwlock);
647		DB_DNODE_EXIT(db);
648
649		if (!havepzio)
650			err = zio_wait(zio);
651	} else {
652		/*
653		 * Another reader came in while the dbuf was in flight
654		 * between UNCACHED and CACHED.  Either a writer will finish
655		 * writing the buffer (sending the dbuf to CACHED) or the
656		 * first reader's request will reach the read_done callback
657		 * and send the dbuf to CACHED.  Otherwise, a failure
658		 * occurred and the dbuf went to UNCACHED.
659		 */
660		mutex_exit(&db->db_mtx);
661		if (prefetch)
662			dmu_zfetch(&dn->dn_zfetch, db->db.db_offset,
663			    db->db.db_size, TRUE);
664		if ((flags & DB_RF_HAVESTRUCT) == 0)
665			rw_exit(&dn->dn_struct_rwlock);
666		DB_DNODE_EXIT(db);
667
668		/* Skip the wait per the caller's request. */
669		mutex_enter(&db->db_mtx);
670		if ((flags & DB_RF_NEVERWAIT) == 0) {
671			while (db->db_state == DB_READ ||
672			    db->db_state == DB_FILL) {
673				ASSERT(db->db_state == DB_READ ||
674				    (flags & DB_RF_HAVESTRUCT) == 0);
675				cv_wait(&db->db_changed, &db->db_mtx);
676			}
677			if (db->db_state == DB_UNCACHED)
678				err = SET_ERROR(EIO);
679		}
680		mutex_exit(&db->db_mtx);
681	}
682
683	ASSERT(err || havepzio || db->db_state == DB_CACHED);
684	return (err);
685}
686
687static void
688dbuf_noread(dmu_buf_impl_t *db)
689{
690	ASSERT(!refcount_is_zero(&db->db_holds));
691	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
692	mutex_enter(&db->db_mtx);
693	while (db->db_state == DB_READ || db->db_state == DB_FILL)
694		cv_wait(&db->db_changed, &db->db_mtx);
695	if (db->db_state == DB_UNCACHED) {
696		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
697		spa_t *spa = db->db_objset->os_spa;
698
699		ASSERT(db->db_buf == NULL);
700		ASSERT(db->db.db_data == NULL);
701		dbuf_set_data(db, arc_buf_alloc(spa, db->db.db_size, db, type));
702		db->db_state = DB_FILL;
703	} else if (db->db_state == DB_NOFILL) {
704		dbuf_set_data(db, NULL);
705	} else {
706		ASSERT3U(db->db_state, ==, DB_CACHED);
707	}
708	mutex_exit(&db->db_mtx);
709}
710
711/*
712 * This is our just-in-time copy function.  It makes a copy of
713 * buffers, that have been modified in a previous transaction
714 * group, before we modify them in the current active group.
715 *
716 * This function is used in two places: when we are dirtying a
717 * buffer for the first time in a txg, and when we are freeing
718 * a range in a dnode that includes this buffer.
719 *
720 * Note that when we are called from dbuf_free_range() we do
721 * not put a hold on the buffer, we just traverse the active
722 * dbuf list for the dnode.
723 */
724static void
725dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
726{
727	dbuf_dirty_record_t *dr = db->db_last_dirty;
728
729	ASSERT(MUTEX_HELD(&db->db_mtx));
730	ASSERT(db->db.db_data != NULL);
731	ASSERT(db->db_level == 0);
732	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
733
734	if (dr == NULL ||
735	    (dr->dt.dl.dr_data !=
736	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
737		return;
738
739	/*
740	 * If the last dirty record for this dbuf has not yet synced
741	 * and its referencing the dbuf data, either:
742	 *	reset the reference to point to a new copy,
743	 * or (if there a no active holders)
744	 *	just null out the current db_data pointer.
745	 */
746	ASSERT(dr->dr_txg >= txg - 2);
747	if (db->db_blkid == DMU_BONUS_BLKID) {
748		/* Note that the data bufs here are zio_bufs */
749		dr->dt.dl.dr_data = zio_buf_alloc(DN_MAX_BONUSLEN);
750		arc_space_consume(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
751		bcopy(db->db.db_data, dr->dt.dl.dr_data, DN_MAX_BONUSLEN);
752	} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
753		int size = db->db.db_size;
754		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
755		spa_t *spa = db->db_objset->os_spa;
756
757		dr->dt.dl.dr_data = arc_buf_alloc(spa, size, db, type);
758		bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
759	} else {
760		dbuf_set_data(db, NULL);
761	}
762}
763
764void
765dbuf_unoverride(dbuf_dirty_record_t *dr)
766{
767	dmu_buf_impl_t *db = dr->dr_dbuf;
768	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
769	uint64_t txg = dr->dr_txg;
770
771	ASSERT(MUTEX_HELD(&db->db_mtx));
772	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
773	ASSERT(db->db_level == 0);
774
775	if (db->db_blkid == DMU_BONUS_BLKID ||
776	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
777		return;
778
779	ASSERT(db->db_data_pending != dr);
780
781	/* free this block */
782	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
783		zio_free(db->db_objset->os_spa, txg, bp);
784
785	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
786	dr->dt.dl.dr_nopwrite = B_FALSE;
787
788	/*
789	 * Release the already-written buffer, so we leave it in
790	 * a consistent dirty state.  Note that all callers are
791	 * modifying the buffer, so they will immediately do
792	 * another (redundant) arc_release().  Therefore, leave
793	 * the buf thawed to save the effort of freezing &
794	 * immediately re-thawing it.
795	 */
796	arc_release(dr->dt.dl.dr_data, db);
797}
798
799/*
800 * Evict (if its unreferenced) or clear (if its referenced) any level-0
801 * data blocks in the free range, so that any future readers will find
802 * empty blocks.
803 *
804 * This is a no-op if the dataset is in the middle of an incremental
805 * receive; see comment below for details.
806 */
807void
808dbuf_free_range(dnode_t *dn, uint64_t start, uint64_t end, dmu_tx_t *tx)
809{
810	dmu_buf_impl_t *db, *db_next;
811	uint64_t txg = tx->tx_txg;
812
813	if (end > dn->dn_maxblkid && (end != DMU_SPILL_BLKID))
814		end = dn->dn_maxblkid;
815	dprintf_dnode(dn, "start=%llu end=%llu\n", start, end);
816
817	mutex_enter(&dn->dn_dbufs_mtx);
818	if (start >= dn->dn_unlisted_l0_blkid * dn->dn_datablksz) {
819		/* There can't be any dbufs in this range; no need to search. */
820		mutex_exit(&dn->dn_dbufs_mtx);
821		return;
822	} else if (dmu_objset_is_receiving(dn->dn_objset)) {
823		/*
824		 * If we are receiving, we expect there to be no dbufs in
825		 * the range to be freed, because receive modifies each
826		 * block at most once, and in offset order.  If this is
827		 * not the case, it can lead to performance problems,
828		 * so note that we unexpectedly took the slow path.
829		 */
830		atomic_inc_64(&zfs_free_range_recv_miss);
831	}
832
833	for (db = list_head(&dn->dn_dbufs); db != NULL; db = db_next) {
834		db_next = list_next(&dn->dn_dbufs, db);
835		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
836
837		if (db->db_level != 0)
838			continue;
839		if (db->db_blkid < start || db->db_blkid > end)
840			continue;
841
842		/* found a level 0 buffer in the range */
843		mutex_enter(&db->db_mtx);
844		if (dbuf_undirty(db, tx)) {
845			/* mutex has been dropped and dbuf destroyed */
846			continue;
847		}
848
849		if (db->db_state == DB_UNCACHED ||
850		    db->db_state == DB_NOFILL ||
851		    db->db_state == DB_EVICTING) {
852			ASSERT(db->db.db_data == NULL);
853			mutex_exit(&db->db_mtx);
854			continue;
855		}
856		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
857			/* will be handled in dbuf_read_done or dbuf_rele */
858			db->db_freed_in_flight = TRUE;
859			mutex_exit(&db->db_mtx);
860			continue;
861		}
862		if (refcount_count(&db->db_holds) == 0) {
863			ASSERT(db->db_buf);
864			dbuf_clear(db);
865			continue;
866		}
867		/* The dbuf is referenced */
868
869		if (db->db_last_dirty != NULL) {
870			dbuf_dirty_record_t *dr = db->db_last_dirty;
871
872			if (dr->dr_txg == txg) {
873				/*
874				 * This buffer is "in-use", re-adjust the file
875				 * size to reflect that this buffer may
876				 * contain new data when we sync.
877				 */
878				if (db->db_blkid != DMU_SPILL_BLKID &&
879				    db->db_blkid > dn->dn_maxblkid)
880					dn->dn_maxblkid = db->db_blkid;
881				dbuf_unoverride(dr);
882			} else {
883				/*
884				 * This dbuf is not dirty in the open context.
885				 * Either uncache it (if its not referenced in
886				 * the open context) or reset its contents to
887				 * empty.
888				 */
889				dbuf_fix_old_data(db, txg);
890			}
891		}
892		/* clear the contents if its cached */
893		if (db->db_state == DB_CACHED) {
894			ASSERT(db->db.db_data != NULL);
895			arc_release(db->db_buf, db);
896			bzero(db->db.db_data, db->db.db_size);
897			arc_buf_freeze(db->db_buf);
898		}
899
900		mutex_exit(&db->db_mtx);
901	}
902	mutex_exit(&dn->dn_dbufs_mtx);
903}
904
905static int
906dbuf_block_freeable(dmu_buf_impl_t *db)
907{
908	dsl_dataset_t *ds = db->db_objset->os_dsl_dataset;
909	uint64_t birth_txg = 0;
910
911	/*
912	 * We don't need any locking to protect db_blkptr:
913	 * If it's syncing, then db_last_dirty will be set
914	 * so we'll ignore db_blkptr.
915	 *
916	 * This logic ensures that only block births for
917	 * filled blocks are considered.
918	 */
919	ASSERT(MUTEX_HELD(&db->db_mtx));
920	if (db->db_last_dirty && (db->db_blkptr == NULL ||
921	    !BP_IS_HOLE(db->db_blkptr))) {
922		birth_txg = db->db_last_dirty->dr_txg;
923	} else if (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
924		birth_txg = db->db_blkptr->blk_birth;
925	}
926
927	/*
928	 * If this block don't exist or is in a snapshot, it can't be freed.
929	 * Don't pass the bp to dsl_dataset_block_freeable() since we
930	 * are holding the db_mtx lock and might deadlock if we are
931	 * prefetching a dedup-ed block.
932	 */
933	if (birth_txg != 0)
934		return (ds == NULL ||
935		    dsl_dataset_block_freeable(ds, NULL, birth_txg));
936	else
937		return (B_FALSE);
938}
939
940void
941dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
942{
943	arc_buf_t *buf, *obuf;
944	int osize = db->db.db_size;
945	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
946	dnode_t *dn;
947
948	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
949
950	DB_DNODE_ENTER(db);
951	dn = DB_DNODE(db);
952
953	/* XXX does *this* func really need the lock? */
954	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
955
956	/*
957	 * This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
958	 * is OK, because there can be no other references to the db
959	 * when we are changing its size, so no concurrent DB_FILL can
960	 * be happening.
961	 */
962	/*
963	 * XXX we should be doing a dbuf_read, checking the return
964	 * value and returning that up to our callers
965	 */
966	dmu_buf_will_dirty(&db->db, tx);
967
968	/* create the data buffer for the new block */
969	buf = arc_buf_alloc(dn->dn_objset->os_spa, size, db, type);
970
971	/* copy old block data to the new block */
972	obuf = db->db_buf;
973	bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
974	/* zero the remainder */
975	if (size > osize)
976		bzero((uint8_t *)buf->b_data + osize, size - osize);
977
978	mutex_enter(&db->db_mtx);
979	dbuf_set_data(db, buf);
980	VERIFY(arc_buf_remove_ref(obuf, db));
981	db->db.db_size = size;
982
983	if (db->db_level == 0) {
984		ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
985		db->db_last_dirty->dt.dl.dr_data = buf;
986	}
987	mutex_exit(&db->db_mtx);
988
989	dnode_willuse_space(dn, size-osize, tx);
990	DB_DNODE_EXIT(db);
991}
992
993void
994dbuf_release_bp(dmu_buf_impl_t *db)
995{
996	objset_t *os = db->db_objset;
997
998	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
999	ASSERT(arc_released(os->os_phys_buf) ||
1000	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1001	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1002
1003	(void) arc_release(db->db_buf, db);
1004}
1005
1006dbuf_dirty_record_t *
1007dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1008{
1009	dnode_t *dn;
1010	objset_t *os;
1011	dbuf_dirty_record_t **drp, *dr;
1012	int drop_struct_lock = FALSE;
1013	boolean_t do_free_accounting = B_FALSE;
1014	int txgoff = tx->tx_txg & TXG_MASK;
1015
1016	ASSERT(tx->tx_txg != 0);
1017	ASSERT(!refcount_is_zero(&db->db_holds));
1018	DMU_TX_DIRTY_BUF(tx, db);
1019
1020	DB_DNODE_ENTER(db);
1021	dn = DB_DNODE(db);
1022	/*
1023	 * Shouldn't dirty a regular buffer in syncing context.  Private
1024	 * objects may be dirtied in syncing context, but only if they
1025	 * were already pre-dirtied in open context.
1026	 */
1027	ASSERT(!dmu_tx_is_syncing(tx) ||
1028	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
1029	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1030	    dn->dn_objset->os_dsl_dataset == NULL);
1031	/*
1032	 * We make this assert for private objects as well, but after we
1033	 * check if we're already dirty.  They are allowed to re-dirty
1034	 * in syncing context.
1035	 */
1036	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1037	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1038	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1039
1040	mutex_enter(&db->db_mtx);
1041	/*
1042	 * XXX make this true for indirects too?  The problem is that
1043	 * transactions created with dmu_tx_create_assigned() from
1044	 * syncing context don't bother holding ahead.
1045	 */
1046	ASSERT(db->db_level != 0 ||
1047	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
1048	    db->db_state == DB_NOFILL);
1049
1050	mutex_enter(&dn->dn_mtx);
1051	/*
1052	 * Don't set dirtyctx to SYNC if we're just modifying this as we
1053	 * initialize the objset.
1054	 */
1055	if (dn->dn_dirtyctx == DN_UNDIRTIED &&
1056	    !BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
1057		dn->dn_dirtyctx =
1058		    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN);
1059		ASSERT(dn->dn_dirtyctx_firstset == NULL);
1060		dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
1061	}
1062	mutex_exit(&dn->dn_mtx);
1063
1064	if (db->db_blkid == DMU_SPILL_BLKID)
1065		dn->dn_have_spill = B_TRUE;
1066
1067	/*
1068	 * If this buffer is already dirty, we're done.
1069	 */
1070	drp = &db->db_last_dirty;
1071	ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
1072	    db->db.db_object == DMU_META_DNODE_OBJECT);
1073	while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
1074		drp = &dr->dr_next;
1075	if (dr && dr->dr_txg == tx->tx_txg) {
1076		DB_DNODE_EXIT(db);
1077
1078		if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
1079			/*
1080			 * If this buffer has already been written out,
1081			 * we now need to reset its state.
1082			 */
1083			dbuf_unoverride(dr);
1084			if (db->db.db_object != DMU_META_DNODE_OBJECT &&
1085			    db->db_state != DB_NOFILL)
1086				arc_buf_thaw(db->db_buf);
1087		}
1088		mutex_exit(&db->db_mtx);
1089		return (dr);
1090	}
1091
1092	/*
1093	 * Only valid if not already dirty.
1094	 */
1095	ASSERT(dn->dn_object == 0 ||
1096	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
1097	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
1098
1099	ASSERT3U(dn->dn_nlevels, >, db->db_level);
1100	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
1101	    dn->dn_phys->dn_nlevels > db->db_level ||
1102	    dn->dn_next_nlevels[txgoff] > db->db_level ||
1103	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
1104	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
1105
1106	/*
1107	 * We should only be dirtying in syncing context if it's the
1108	 * mos or we're initializing the os or it's a special object.
1109	 * However, we are allowed to dirty in syncing context provided
1110	 * we already dirtied it in open context.  Hence we must make
1111	 * this assertion only if we're not already dirty.
1112	 */
1113	os = dn->dn_objset;
1114	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
1115	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
1116	ASSERT(db->db.db_size != 0);
1117
1118	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1119
1120	if (db->db_blkid != DMU_BONUS_BLKID) {
1121		/*
1122		 * Update the accounting.
1123		 * Note: we delay "free accounting" until after we drop
1124		 * the db_mtx.  This keeps us from grabbing other locks
1125		 * (and possibly deadlocking) in bp_get_dsize() while
1126		 * also holding the db_mtx.
1127		 */
1128		dnode_willuse_space(dn, db->db.db_size, tx);
1129		do_free_accounting = dbuf_block_freeable(db);
1130	}
1131
1132	/*
1133	 * If this buffer is dirty in an old transaction group we need
1134	 * to make a copy of it so that the changes we make in this
1135	 * transaction group won't leak out when we sync the older txg.
1136	 */
1137	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
1138	if (db->db_level == 0) {
1139		void *data_old = db->db_buf;
1140
1141		if (db->db_state != DB_NOFILL) {
1142			if (db->db_blkid == DMU_BONUS_BLKID) {
1143				dbuf_fix_old_data(db, tx->tx_txg);
1144				data_old = db->db.db_data;
1145			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
1146				/*
1147				 * Release the data buffer from the cache so
1148				 * that we can modify it without impacting
1149				 * possible other users of this cached data
1150				 * block.  Note that indirect blocks and
1151				 * private objects are not released until the
1152				 * syncing state (since they are only modified
1153				 * then).
1154				 */
1155				arc_release(db->db_buf, db);
1156				dbuf_fix_old_data(db, tx->tx_txg);
1157				data_old = db->db_buf;
1158			}
1159			ASSERT(data_old != NULL);
1160		}
1161		dr->dt.dl.dr_data = data_old;
1162	} else {
1163		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_DEFAULT, NULL);
1164		list_create(&dr->dt.di.dr_children,
1165		    sizeof (dbuf_dirty_record_t),
1166		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
1167	}
1168	if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
1169		dr->dr_accounted = db->db.db_size;
1170	dr->dr_dbuf = db;
1171	dr->dr_txg = tx->tx_txg;
1172	dr->dr_next = *drp;
1173	*drp = dr;
1174
1175	/*
1176	 * We could have been freed_in_flight between the dbuf_noread
1177	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
1178	 * happened after the free.
1179	 */
1180	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1181	    db->db_blkid != DMU_SPILL_BLKID) {
1182		mutex_enter(&dn->dn_mtx);
1183		if (dn->dn_free_ranges[txgoff] != NULL) {
1184			range_tree_clear(dn->dn_free_ranges[txgoff],
1185			    db->db_blkid, 1);
1186		}
1187		mutex_exit(&dn->dn_mtx);
1188		db->db_freed_in_flight = FALSE;
1189	}
1190
1191	/*
1192	 * This buffer is now part of this txg
1193	 */
1194	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
1195	db->db_dirtycnt += 1;
1196	ASSERT3U(db->db_dirtycnt, <=, 3);
1197
1198	mutex_exit(&db->db_mtx);
1199
1200	if (db->db_blkid == DMU_BONUS_BLKID ||
1201	    db->db_blkid == DMU_SPILL_BLKID) {
1202		mutex_enter(&dn->dn_mtx);
1203		ASSERT(!list_link_active(&dr->dr_dirty_node));
1204		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1205		mutex_exit(&dn->dn_mtx);
1206		dnode_setdirty(dn, tx);
1207		DB_DNODE_EXIT(db);
1208		return (dr);
1209	} else if (do_free_accounting) {
1210		blkptr_t *bp = db->db_blkptr;
1211		int64_t willfree = (bp && !BP_IS_HOLE(bp)) ?
1212		    bp_get_dsize(os->os_spa, bp) : db->db.db_size;
1213		/*
1214		 * This is only a guess -- if the dbuf is dirty
1215		 * in a previous txg, we don't know how much
1216		 * space it will use on disk yet.  We should
1217		 * really have the struct_rwlock to access
1218		 * db_blkptr, but since this is just a guess,
1219		 * it's OK if we get an odd answer.
1220		 */
1221		ddt_prefetch(os->os_spa, bp);
1222		dnode_willuse_space(dn, -willfree, tx);
1223	}
1224
1225	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
1226		rw_enter(&dn->dn_struct_rwlock, RW_READER);
1227		drop_struct_lock = TRUE;
1228	}
1229
1230	if (db->db_level == 0) {
1231		dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
1232		ASSERT(dn->dn_maxblkid >= db->db_blkid);
1233	}
1234
1235	if (db->db_level+1 < dn->dn_nlevels) {
1236		dmu_buf_impl_t *parent = db->db_parent;
1237		dbuf_dirty_record_t *di;
1238		int parent_held = FALSE;
1239
1240		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
1241			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1242
1243			parent = dbuf_hold_level(dn, db->db_level+1,
1244			    db->db_blkid >> epbs, FTAG);
1245			ASSERT(parent != NULL);
1246			parent_held = TRUE;
1247		}
1248		if (drop_struct_lock)
1249			rw_exit(&dn->dn_struct_rwlock);
1250		ASSERT3U(db->db_level+1, ==, parent->db_level);
1251		di = dbuf_dirty(parent, tx);
1252		if (parent_held)
1253			dbuf_rele(parent, FTAG);
1254
1255		mutex_enter(&db->db_mtx);
1256		/*
1257		 * Since we've dropped the mutex, it's possible that
1258		 * dbuf_undirty() might have changed this out from under us.
1259		 */
1260		if (db->db_last_dirty == dr ||
1261		    dn->dn_object == DMU_META_DNODE_OBJECT) {
1262			mutex_enter(&di->dt.di.dr_mtx);
1263			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
1264			ASSERT(!list_link_active(&dr->dr_dirty_node));
1265			list_insert_tail(&di->dt.di.dr_children, dr);
1266			mutex_exit(&di->dt.di.dr_mtx);
1267			dr->dr_parent = di;
1268		}
1269		mutex_exit(&db->db_mtx);
1270	} else {
1271		ASSERT(db->db_level+1 == dn->dn_nlevels);
1272		ASSERT(db->db_blkid < dn->dn_nblkptr);
1273		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
1274		mutex_enter(&dn->dn_mtx);
1275		ASSERT(!list_link_active(&dr->dr_dirty_node));
1276		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
1277		mutex_exit(&dn->dn_mtx);
1278		if (drop_struct_lock)
1279			rw_exit(&dn->dn_struct_rwlock);
1280	}
1281
1282	dnode_setdirty(dn, tx);
1283	DB_DNODE_EXIT(db);
1284	return (dr);
1285}
1286
1287/*
1288 * Undirty a buffer in the transaction group referenced by the given
1289 * transaction.  Return whether this evicted the dbuf.
1290 */
1291static boolean_t
1292dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
1293{
1294	dnode_t *dn;
1295	uint64_t txg = tx->tx_txg;
1296	dbuf_dirty_record_t *dr, **drp;
1297
1298	ASSERT(txg != 0);
1299	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1300	ASSERT0(db->db_level);
1301	ASSERT(MUTEX_HELD(&db->db_mtx));
1302
1303	/*
1304	 * If this buffer is not dirty, we're done.
1305	 */
1306	for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
1307		if (dr->dr_txg <= txg)
1308			break;
1309	if (dr == NULL || dr->dr_txg < txg)
1310		return (B_FALSE);
1311	ASSERT(dr->dr_txg == txg);
1312	ASSERT(dr->dr_dbuf == db);
1313
1314	DB_DNODE_ENTER(db);
1315	dn = DB_DNODE(db);
1316
1317	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
1318
1319	ASSERT(db->db.db_size != 0);
1320
1321	/*
1322	 * Any space we accounted for in dp_dirty_* will be cleaned up by
1323	 * dsl_pool_sync().  This is relatively rare so the discrepancy
1324	 * is not a big deal.
1325	 */
1326
1327	*drp = dr->dr_next;
1328
1329	/*
1330	 * Note that there are three places in dbuf_dirty()
1331	 * where this dirty record may be put on a list.
1332	 * Make sure to do a list_remove corresponding to
1333	 * every one of those list_insert calls.
1334	 */
1335	if (dr->dr_parent) {
1336		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
1337		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
1338		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
1339	} else if (db->db_blkid == DMU_SPILL_BLKID ||
1340	    db->db_level+1 == dn->dn_nlevels) {
1341		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
1342		mutex_enter(&dn->dn_mtx);
1343		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
1344		mutex_exit(&dn->dn_mtx);
1345	}
1346	DB_DNODE_EXIT(db);
1347
1348	if (db->db_state != DB_NOFILL) {
1349		dbuf_unoverride(dr);
1350
1351		ASSERT(db->db_buf != NULL);
1352		ASSERT(dr->dt.dl.dr_data != NULL);
1353		if (dr->dt.dl.dr_data != db->db_buf)
1354			VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db));
1355	}
1356	kmem_free(dr, sizeof (dbuf_dirty_record_t));
1357
1358	ASSERT(db->db_dirtycnt > 0);
1359	db->db_dirtycnt -= 1;
1360
1361	if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
1362		arc_buf_t *buf = db->db_buf;
1363
1364		ASSERT(db->db_state == DB_NOFILL || arc_released(buf));
1365		dbuf_set_data(db, NULL);
1366		VERIFY(arc_buf_remove_ref(buf, db));
1367		dbuf_evict(db);
1368		return (B_TRUE);
1369	}
1370
1371	return (B_FALSE);
1372}
1373
1374void
1375dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
1376{
1377	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1378	int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
1379
1380	ASSERT(tx->tx_txg != 0);
1381	ASSERT(!refcount_is_zero(&db->db_holds));
1382
1383	DB_DNODE_ENTER(db);
1384	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
1385		rf |= DB_RF_HAVESTRUCT;
1386	DB_DNODE_EXIT(db);
1387	(void) dbuf_read(db, NULL, rf);
1388	(void) dbuf_dirty(db, tx);
1389}
1390
1391void
1392dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1393{
1394	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1395
1396	db->db_state = DB_NOFILL;
1397
1398	dmu_buf_will_fill(db_fake, tx);
1399}
1400
1401void
1402dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
1403{
1404	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1405
1406	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1407	ASSERT(tx->tx_txg != 0);
1408	ASSERT(db->db_level == 0);
1409	ASSERT(!refcount_is_zero(&db->db_holds));
1410
1411	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
1412	    dmu_tx_private_ok(tx));
1413
1414	dbuf_noread(db);
1415	(void) dbuf_dirty(db, tx);
1416}
1417
1418#pragma weak dmu_buf_fill_done = dbuf_fill_done
1419/* ARGSUSED */
1420void
1421dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
1422{
1423	mutex_enter(&db->db_mtx);
1424	DBUF_VERIFY(db);
1425
1426	if (db->db_state == DB_FILL) {
1427		if (db->db_level == 0 && db->db_freed_in_flight) {
1428			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1429			/* we were freed while filling */
1430			/* XXX dbuf_undirty? */
1431			bzero(db->db.db_data, db->db.db_size);
1432			db->db_freed_in_flight = FALSE;
1433		}
1434		db->db_state = DB_CACHED;
1435		cv_broadcast(&db->db_changed);
1436	}
1437	mutex_exit(&db->db_mtx);
1438}
1439
1440void
1441dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
1442    bp_embedded_type_t etype, enum zio_compress comp,
1443    int uncompressed_size, int compressed_size, int byteorder,
1444    dmu_tx_t *tx)
1445{
1446	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
1447	struct dirty_leaf *dl;
1448	dmu_object_type_t type;
1449
1450	DB_DNODE_ENTER(db);
1451	type = DB_DNODE(db)->dn_type;
1452	DB_DNODE_EXIT(db);
1453
1454	ASSERT0(db->db_level);
1455	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1456
1457	dmu_buf_will_not_fill(dbuf, tx);
1458
1459	ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
1460	dl = &db->db_last_dirty->dt.dl;
1461	encode_embedded_bp_compressed(&dl->dr_overridden_by,
1462	    data, comp, uncompressed_size, compressed_size);
1463	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
1464	BP_SET_TYPE(&dl->dr_overridden_by, type);
1465	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
1466	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
1467
1468	dl->dr_override_state = DR_OVERRIDDEN;
1469	dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
1470}
1471
1472/*
1473 * Directly assign a provided arc buf to a given dbuf if it's not referenced
1474 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
1475 */
1476void
1477dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
1478{
1479	ASSERT(!refcount_is_zero(&db->db_holds));
1480	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1481	ASSERT(db->db_level == 0);
1482	ASSERT(DBUF_GET_BUFC_TYPE(db) == ARC_BUFC_DATA);
1483	ASSERT(buf != NULL);
1484	ASSERT(arc_buf_size(buf) == db->db.db_size);
1485	ASSERT(tx->tx_txg != 0);
1486
1487	arc_return_buf(buf, db);
1488	ASSERT(arc_released(buf));
1489
1490	mutex_enter(&db->db_mtx);
1491
1492	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1493		cv_wait(&db->db_changed, &db->db_mtx);
1494
1495	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
1496
1497	if (db->db_state == DB_CACHED &&
1498	    refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
1499		mutex_exit(&db->db_mtx);
1500		(void) dbuf_dirty(db, tx);
1501		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
1502		VERIFY(arc_buf_remove_ref(buf, db));
1503		xuio_stat_wbuf_copied();
1504		return;
1505	}
1506
1507	xuio_stat_wbuf_nocopy();
1508	if (db->db_state == DB_CACHED) {
1509		dbuf_dirty_record_t *dr = db->db_last_dirty;
1510
1511		ASSERT(db->db_buf != NULL);
1512		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
1513			ASSERT(dr->dt.dl.dr_data == db->db_buf);
1514			if (!arc_released(db->db_buf)) {
1515				ASSERT(dr->dt.dl.dr_override_state ==
1516				    DR_OVERRIDDEN);
1517				arc_release(db->db_buf, db);
1518			}
1519			dr->dt.dl.dr_data = buf;
1520			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1521		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
1522			arc_release(db->db_buf, db);
1523			VERIFY(arc_buf_remove_ref(db->db_buf, db));
1524		}
1525		db->db_buf = NULL;
1526	}
1527	ASSERT(db->db_buf == NULL);
1528	dbuf_set_data(db, buf);
1529	db->db_state = DB_FILL;
1530	mutex_exit(&db->db_mtx);
1531	(void) dbuf_dirty(db, tx);
1532	dmu_buf_fill_done(&db->db, tx);
1533}
1534
1535/*
1536 * "Clear" the contents of this dbuf.  This will mark the dbuf
1537 * EVICTING and clear *most* of its references.  Unfortunately,
1538 * when we are not holding the dn_dbufs_mtx, we can't clear the
1539 * entry in the dn_dbufs list.  We have to wait until dbuf_destroy()
1540 * in this case.  For callers from the DMU we will usually see:
1541 *	dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy()
1542 * For the arc callback, we will usually see:
1543 *	dbuf_do_evict()->dbuf_clear();dbuf_destroy()
1544 * Sometimes, though, we will get a mix of these two:
1545 *	DMU: dbuf_clear()->arc_buf_evict()
1546 *	ARC: dbuf_do_evict()->dbuf_destroy()
1547 */
1548void
1549dbuf_clear(dmu_buf_impl_t *db)
1550{
1551	dnode_t *dn;
1552	dmu_buf_impl_t *parent = db->db_parent;
1553	dmu_buf_impl_t *dndb;
1554	int dbuf_gone = FALSE;
1555
1556	ASSERT(MUTEX_HELD(&db->db_mtx));
1557	ASSERT(refcount_is_zero(&db->db_holds));
1558
1559	dbuf_evict_user(db);
1560
1561	if (db->db_state == DB_CACHED) {
1562		ASSERT(db->db.db_data != NULL);
1563		if (db->db_blkid == DMU_BONUS_BLKID) {
1564			zio_buf_free(db->db.db_data, DN_MAX_BONUSLEN);
1565			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
1566		}
1567		db->db.db_data = NULL;
1568		db->db_state = DB_UNCACHED;
1569	}
1570
1571	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1572	ASSERT(db->db_data_pending == NULL);
1573
1574	db->db_state = DB_EVICTING;
1575	db->db_blkptr = NULL;
1576
1577	DB_DNODE_ENTER(db);
1578	dn = DB_DNODE(db);
1579	dndb = dn->dn_dbuf;
1580	if (db->db_blkid != DMU_BONUS_BLKID && MUTEX_HELD(&dn->dn_dbufs_mtx)) {
1581		list_remove(&dn->dn_dbufs, db);
1582		(void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1583		membar_producer();
1584		DB_DNODE_EXIT(db);
1585		/*
1586		 * Decrementing the dbuf count means that the hold corresponding
1587		 * to the removed dbuf is no longer discounted in dnode_move(),
1588		 * so the dnode cannot be moved until after we release the hold.
1589		 * The membar_producer() ensures visibility of the decremented
1590		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
1591		 * release any lock.
1592		 */
1593		dnode_rele(dn, db);
1594		db->db_dnode_handle = NULL;
1595	} else {
1596		DB_DNODE_EXIT(db);
1597	}
1598
1599	if (db->db_buf)
1600		dbuf_gone = arc_buf_evict(db->db_buf);
1601
1602	if (!dbuf_gone)
1603		mutex_exit(&db->db_mtx);
1604
1605	/*
1606	 * If this dbuf is referenced from an indirect dbuf,
1607	 * decrement the ref count on the indirect dbuf.
1608	 */
1609	if (parent && parent != dndb)
1610		dbuf_rele(parent, db);
1611}
1612
1613static int
1614dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
1615    dmu_buf_impl_t **parentp, blkptr_t **bpp)
1616{
1617	int nlevels, epbs;
1618
1619	*parentp = NULL;
1620	*bpp = NULL;
1621
1622	ASSERT(blkid != DMU_BONUS_BLKID);
1623
1624	if (blkid == DMU_SPILL_BLKID) {
1625		mutex_enter(&dn->dn_mtx);
1626		if (dn->dn_have_spill &&
1627		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
1628			*bpp = &dn->dn_phys->dn_spill;
1629		else
1630			*bpp = NULL;
1631		dbuf_add_ref(dn->dn_dbuf, NULL);
1632		*parentp = dn->dn_dbuf;
1633		mutex_exit(&dn->dn_mtx);
1634		return (0);
1635	}
1636
1637	if (dn->dn_phys->dn_nlevels == 0)
1638		nlevels = 1;
1639	else
1640		nlevels = dn->dn_phys->dn_nlevels;
1641
1642	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
1643
1644	ASSERT3U(level * epbs, <, 64);
1645	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1646	if (level >= nlevels ||
1647	    (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
1648		/* the buffer has no parent yet */
1649		return (SET_ERROR(ENOENT));
1650	} else if (level < nlevels-1) {
1651		/* this block is referenced from an indirect block */
1652		int err = dbuf_hold_impl(dn, level+1,
1653		    blkid >> epbs, fail_sparse, NULL, parentp);
1654		if (err)
1655			return (err);
1656		err = dbuf_read(*parentp, NULL,
1657		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
1658		if (err) {
1659			dbuf_rele(*parentp, NULL);
1660			*parentp = NULL;
1661			return (err);
1662		}
1663		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
1664		    (blkid & ((1ULL << epbs) - 1));
1665		return (0);
1666	} else {
1667		/* the block is referenced from the dnode */
1668		ASSERT3U(level, ==, nlevels-1);
1669		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
1670		    blkid < dn->dn_phys->dn_nblkptr);
1671		if (dn->dn_dbuf) {
1672			dbuf_add_ref(dn->dn_dbuf, NULL);
1673			*parentp = dn->dn_dbuf;
1674		}
1675		*bpp = &dn->dn_phys->dn_blkptr[blkid];
1676		return (0);
1677	}
1678}
1679
1680static dmu_buf_impl_t *
1681dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
1682    dmu_buf_impl_t *parent, blkptr_t *blkptr)
1683{
1684	objset_t *os = dn->dn_objset;
1685	dmu_buf_impl_t *db, *odb;
1686
1687	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1688	ASSERT(dn->dn_type != DMU_OT_NONE);
1689
1690	db = kmem_cache_alloc(dbuf_cache, KM_SLEEP);
1691
1692	db->db_objset = os;
1693	db->db.db_object = dn->dn_object;
1694	db->db_level = level;
1695	db->db_blkid = blkid;
1696	db->db_last_dirty = NULL;
1697	db->db_dirtycnt = 0;
1698	db->db_dnode_handle = dn->dn_handle;
1699	db->db_parent = parent;
1700	db->db_blkptr = blkptr;
1701
1702	db->db_user_ptr = NULL;
1703	db->db_user_data_ptr_ptr = NULL;
1704	db->db_evict_func = NULL;
1705	db->db_immediate_evict = 0;
1706	db->db_freed_in_flight = 0;
1707
1708	if (blkid == DMU_BONUS_BLKID) {
1709		ASSERT3P(parent, ==, dn->dn_dbuf);
1710		db->db.db_size = DN_MAX_BONUSLEN -
1711		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
1712		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1713		db->db.db_offset = DMU_BONUS_BLKID;
1714		db->db_state = DB_UNCACHED;
1715		/* the bonus dbuf is not placed in the hash table */
1716		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1717		return (db);
1718	} else if (blkid == DMU_SPILL_BLKID) {
1719		db->db.db_size = (blkptr != NULL) ?
1720		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
1721		db->db.db_offset = 0;
1722	} else {
1723		int blocksize =
1724		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
1725		db->db.db_size = blocksize;
1726		db->db.db_offset = db->db_blkid * blocksize;
1727	}
1728
1729	/*
1730	 * Hold the dn_dbufs_mtx while we get the new dbuf
1731	 * in the hash table *and* added to the dbufs list.
1732	 * This prevents a possible deadlock with someone
1733	 * trying to look up this dbuf before its added to the
1734	 * dn_dbufs list.
1735	 */
1736	mutex_enter(&dn->dn_dbufs_mtx);
1737	db->db_state = DB_EVICTING;
1738	if ((odb = dbuf_hash_insert(db)) != NULL) {
1739		/* someone else inserted it first */
1740		kmem_cache_free(dbuf_cache, db);
1741		mutex_exit(&dn->dn_dbufs_mtx);
1742		return (odb);
1743	}
1744	list_insert_head(&dn->dn_dbufs, db);
1745	if (db->db_level == 0 && db->db_blkid >=
1746	    dn->dn_unlisted_l0_blkid)
1747		dn->dn_unlisted_l0_blkid = db->db_blkid + 1;
1748	db->db_state = DB_UNCACHED;
1749	mutex_exit(&dn->dn_dbufs_mtx);
1750	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1751
1752	if (parent && parent != dn->dn_dbuf)
1753		dbuf_add_ref(parent, db);
1754
1755	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
1756	    refcount_count(&dn->dn_holds) > 0);
1757	(void) refcount_add(&dn->dn_holds, db);
1758	(void) atomic_inc_32_nv(&dn->dn_dbufs_count);
1759
1760	dprintf_dbuf(db, "db=%p\n", db);
1761
1762	return (db);
1763}
1764
1765static int
1766dbuf_do_evict(void *private)
1767{
1768	arc_buf_t *buf = private;
1769	dmu_buf_impl_t *db = buf->b_private;
1770
1771	if (!MUTEX_HELD(&db->db_mtx))
1772		mutex_enter(&db->db_mtx);
1773
1774	ASSERT(refcount_is_zero(&db->db_holds));
1775
1776	if (db->db_state != DB_EVICTING) {
1777		ASSERT(db->db_state == DB_CACHED);
1778		DBUF_VERIFY(db);
1779		db->db_buf = NULL;
1780		dbuf_evict(db);
1781	} else {
1782		mutex_exit(&db->db_mtx);
1783		dbuf_destroy(db);
1784	}
1785	return (0);
1786}
1787
1788static void
1789dbuf_destroy(dmu_buf_impl_t *db)
1790{
1791	ASSERT(refcount_is_zero(&db->db_holds));
1792
1793	if (db->db_blkid != DMU_BONUS_BLKID) {
1794		/*
1795		 * If this dbuf is still on the dn_dbufs list,
1796		 * remove it from that list.
1797		 */
1798		if (db->db_dnode_handle != NULL) {
1799			dnode_t *dn;
1800
1801			DB_DNODE_ENTER(db);
1802			dn = DB_DNODE(db);
1803			mutex_enter(&dn->dn_dbufs_mtx);
1804			list_remove(&dn->dn_dbufs, db);
1805			(void) atomic_dec_32_nv(&dn->dn_dbufs_count);
1806			mutex_exit(&dn->dn_dbufs_mtx);
1807			DB_DNODE_EXIT(db);
1808			/*
1809			 * Decrementing the dbuf count means that the hold
1810			 * corresponding to the removed dbuf is no longer
1811			 * discounted in dnode_move(), so the dnode cannot be
1812			 * moved until after we release the hold.
1813			 */
1814			dnode_rele(dn, db);
1815			db->db_dnode_handle = NULL;
1816		}
1817		dbuf_hash_remove(db);
1818	}
1819	db->db_parent = NULL;
1820	db->db_buf = NULL;
1821
1822	ASSERT(!list_link_active(&db->db_link));
1823	ASSERT(db->db.db_data == NULL);
1824	ASSERT(db->db_hash_next == NULL);
1825	ASSERT(db->db_blkptr == NULL);
1826	ASSERT(db->db_data_pending == NULL);
1827
1828	kmem_cache_free(dbuf_cache, db);
1829	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_OTHER);
1830}
1831
1832void
1833dbuf_prefetch(dnode_t *dn, uint64_t blkid, zio_priority_t prio)
1834{
1835	dmu_buf_impl_t *db = NULL;
1836	blkptr_t *bp = NULL;
1837
1838	ASSERT(blkid != DMU_BONUS_BLKID);
1839	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1840
1841	if (dnode_block_freed(dn, blkid))
1842		return;
1843
1844	/* dbuf_find() returns with db_mtx held */
1845	if (db = dbuf_find(dn, 0, blkid)) {
1846		/*
1847		 * This dbuf is already in the cache.  We assume that
1848		 * it is already CACHED, or else about to be either
1849		 * read or filled.
1850		 */
1851		mutex_exit(&db->db_mtx);
1852		return;
1853	}
1854
1855	if (dbuf_findbp(dn, 0, blkid, TRUE, &db, &bp) == 0) {
1856		if (bp && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) {
1857			dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
1858			uint32_t aflags = ARC_NOWAIT | ARC_PREFETCH;
1859			zbookmark_t zb;
1860
1861			SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
1862			    dn->dn_object, 0, blkid);
1863
1864			(void) arc_read(NULL, dn->dn_objset->os_spa,
1865			    bp, NULL, NULL, prio,
1866			    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
1867			    &aflags, &zb);
1868		}
1869		if (db)
1870			dbuf_rele(db, NULL);
1871	}
1872}
1873
1874/*
1875 * Returns with db_holds incremented, and db_mtx not held.
1876 * Note: dn_struct_rwlock must be held.
1877 */
1878int
1879dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
1880    void *tag, dmu_buf_impl_t **dbp)
1881{
1882	dmu_buf_impl_t *db, *parent = NULL;
1883
1884	ASSERT(blkid != DMU_BONUS_BLKID);
1885	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
1886	ASSERT3U(dn->dn_nlevels, >, level);
1887
1888	*dbp = NULL;
1889top:
1890	/* dbuf_find() returns with db_mtx held */
1891	db = dbuf_find(dn, level, blkid);
1892
1893	if (db == NULL) {
1894		blkptr_t *bp = NULL;
1895		int err;
1896
1897		ASSERT3P(parent, ==, NULL);
1898		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
1899		if (fail_sparse) {
1900			if (err == 0 && bp && BP_IS_HOLE(bp))
1901				err = SET_ERROR(ENOENT);
1902			if (err) {
1903				if (parent)
1904					dbuf_rele(parent, NULL);
1905				return (err);
1906			}
1907		}
1908		if (err && err != ENOENT)
1909			return (err);
1910		db = dbuf_create(dn, level, blkid, parent, bp);
1911	}
1912
1913	if (db->db_buf && refcount_is_zero(&db->db_holds)) {
1914		arc_buf_add_ref(db->db_buf, db);
1915		if (db->db_buf->b_data == NULL) {
1916			dbuf_clear(db);
1917			if (parent) {
1918				dbuf_rele(parent, NULL);
1919				parent = NULL;
1920			}
1921			goto top;
1922		}
1923		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
1924	}
1925
1926	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
1927
1928	/*
1929	 * If this buffer is currently syncing out, and we are are
1930	 * still referencing it from db_data, we need to make a copy
1931	 * of it in case we decide we want to dirty it again in this txg.
1932	 */
1933	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1934	    dn->dn_object != DMU_META_DNODE_OBJECT &&
1935	    db->db_state == DB_CACHED && db->db_data_pending) {
1936		dbuf_dirty_record_t *dr = db->db_data_pending;
1937
1938		if (dr->dt.dl.dr_data == db->db_buf) {
1939			arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1940
1941			dbuf_set_data(db,
1942			    arc_buf_alloc(dn->dn_objset->os_spa,
1943			    db->db.db_size, db, type));
1944			bcopy(dr->dt.dl.dr_data->b_data, db->db.db_data,
1945			    db->db.db_size);
1946		}
1947	}
1948
1949	(void) refcount_add(&db->db_holds, tag);
1950	dbuf_update_data(db);
1951	DBUF_VERIFY(db);
1952	mutex_exit(&db->db_mtx);
1953
1954	/* NOTE: we can't rele the parent until after we drop the db_mtx */
1955	if (parent)
1956		dbuf_rele(parent, NULL);
1957
1958	ASSERT3P(DB_DNODE(db), ==, dn);
1959	ASSERT3U(db->db_blkid, ==, blkid);
1960	ASSERT3U(db->db_level, ==, level);
1961	*dbp = db;
1962
1963	return (0);
1964}
1965
1966dmu_buf_impl_t *
1967dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
1968{
1969	dmu_buf_impl_t *db;
1970	int err = dbuf_hold_impl(dn, 0, blkid, FALSE, tag, &db);
1971	return (err ? NULL : db);
1972}
1973
1974dmu_buf_impl_t *
1975dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
1976{
1977	dmu_buf_impl_t *db;
1978	int err = dbuf_hold_impl(dn, level, blkid, FALSE, tag, &db);
1979	return (err ? NULL : db);
1980}
1981
1982void
1983dbuf_create_bonus(dnode_t *dn)
1984{
1985	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
1986
1987	ASSERT(dn->dn_bonus == NULL);
1988	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
1989}
1990
1991int
1992dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
1993{
1994	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
1995	dnode_t *dn;
1996
1997	if (db->db_blkid != DMU_SPILL_BLKID)
1998		return (SET_ERROR(ENOTSUP));
1999	if (blksz == 0)
2000		blksz = SPA_MINBLOCKSIZE;
2001	if (blksz > SPA_MAXBLOCKSIZE)
2002		blksz = SPA_MAXBLOCKSIZE;
2003	else
2004		blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
2005
2006	DB_DNODE_ENTER(db);
2007	dn = DB_DNODE(db);
2008	rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
2009	dbuf_new_size(db, blksz, tx);
2010	rw_exit(&dn->dn_struct_rwlock);
2011	DB_DNODE_EXIT(db);
2012
2013	return (0);
2014}
2015
2016void
2017dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
2018{
2019	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
2020}
2021
2022#pragma weak dmu_buf_add_ref = dbuf_add_ref
2023void
2024dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
2025{
2026	int64_t holds = refcount_add(&db->db_holds, tag);
2027	ASSERT(holds > 1);
2028}
2029
2030/*
2031 * If you call dbuf_rele() you had better not be referencing the dnode handle
2032 * unless you have some other direct or indirect hold on the dnode. (An indirect
2033 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
2034 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
2035 * dnode's parent dbuf evicting its dnode handles.
2036 */
2037void
2038dbuf_rele(dmu_buf_impl_t *db, void *tag)
2039{
2040	mutex_enter(&db->db_mtx);
2041	dbuf_rele_and_unlock(db, tag);
2042}
2043
2044void
2045dmu_buf_rele(dmu_buf_t *db, void *tag)
2046{
2047	dbuf_rele((dmu_buf_impl_t *)db, tag);
2048}
2049
2050/*
2051 * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
2052 * db_dirtycnt and db_holds to be updated atomically.
2053 */
2054void
2055dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
2056{
2057	int64_t holds;
2058
2059	ASSERT(MUTEX_HELD(&db->db_mtx));
2060	DBUF_VERIFY(db);
2061
2062	/*
2063	 * Remove the reference to the dbuf before removing its hold on the
2064	 * dnode so we can guarantee in dnode_move() that a referenced bonus
2065	 * buffer has a corresponding dnode hold.
2066	 */
2067	holds = refcount_remove(&db->db_holds, tag);
2068	ASSERT(holds >= 0);
2069
2070	/*
2071	 * We can't freeze indirects if there is a possibility that they
2072	 * may be modified in the current syncing context.
2073	 */
2074	if (db->db_buf && holds == (db->db_level == 0 ? db->db_dirtycnt : 0))
2075		arc_buf_freeze(db->db_buf);
2076
2077	if (holds == db->db_dirtycnt &&
2078	    db->db_level == 0 && db->db_immediate_evict)
2079		dbuf_evict_user(db);
2080
2081	if (holds == 0) {
2082		if (db->db_blkid == DMU_BONUS_BLKID) {
2083			mutex_exit(&db->db_mtx);
2084
2085			/*
2086			 * If the dnode moves here, we cannot cross this barrier
2087			 * until the move completes.
2088			 */
2089			DB_DNODE_ENTER(db);
2090			(void) atomic_dec_32_nv(&DB_DNODE(db)->dn_dbufs_count);
2091			DB_DNODE_EXIT(db);
2092			/*
2093			 * The bonus buffer's dnode hold is no longer discounted
2094			 * in dnode_move(). The dnode cannot move until after
2095			 * the dnode_rele().
2096			 */
2097			dnode_rele(DB_DNODE(db), db);
2098		} else if (db->db_buf == NULL) {
2099			/*
2100			 * This is a special case: we never associated this
2101			 * dbuf with any data allocated from the ARC.
2102			 */
2103			ASSERT(db->db_state == DB_UNCACHED ||
2104			    db->db_state == DB_NOFILL);
2105			dbuf_evict(db);
2106		} else if (arc_released(db->db_buf)) {
2107			arc_buf_t *buf = db->db_buf;
2108			/*
2109			 * This dbuf has anonymous data associated with it.
2110			 */
2111			dbuf_set_data(db, NULL);
2112			VERIFY(arc_buf_remove_ref(buf, db));
2113			dbuf_evict(db);
2114		} else {
2115			VERIFY(!arc_buf_remove_ref(db->db_buf, db));
2116
2117			/*
2118			 * A dbuf will be eligible for eviction if either the
2119			 * 'primarycache' property is set or a duplicate
2120			 * copy of this buffer is already cached in the arc.
2121			 *
2122			 * In the case of the 'primarycache' a buffer
2123			 * is considered for eviction if it matches the
2124			 * criteria set in the property.
2125			 *
2126			 * To decide if our buffer is considered a
2127			 * duplicate, we must call into the arc to determine
2128			 * if multiple buffers are referencing the same
2129			 * block on-disk. If so, then we simply evict
2130			 * ourselves.
2131			 */
2132			if (!DBUF_IS_CACHEABLE(db) ||
2133			    arc_buf_eviction_needed(db->db_buf))
2134				dbuf_clear(db);
2135			else
2136				mutex_exit(&db->db_mtx);
2137		}
2138	} else {
2139		mutex_exit(&db->db_mtx);
2140	}
2141}
2142
2143#pragma weak dmu_buf_refcount = dbuf_refcount
2144uint64_t
2145dbuf_refcount(dmu_buf_impl_t *db)
2146{
2147	return (refcount_count(&db->db_holds));
2148}
2149
2150void *
2151dmu_buf_set_user(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2152    dmu_buf_evict_func_t *evict_func)
2153{
2154	return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2155	    user_data_ptr_ptr, evict_func));
2156}
2157
2158void *
2159dmu_buf_set_user_ie(dmu_buf_t *db_fake, void *user_ptr, void *user_data_ptr_ptr,
2160    dmu_buf_evict_func_t *evict_func)
2161{
2162	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2163
2164	db->db_immediate_evict = TRUE;
2165	return (dmu_buf_update_user(db_fake, NULL, user_ptr,
2166	    user_data_ptr_ptr, evict_func));
2167}
2168
2169void *
2170dmu_buf_update_user(dmu_buf_t *db_fake, void *old_user_ptr, void *user_ptr,
2171    void *user_data_ptr_ptr, dmu_buf_evict_func_t *evict_func)
2172{
2173	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2174	ASSERT(db->db_level == 0);
2175
2176	ASSERT((user_ptr == NULL) == (evict_func == NULL));
2177
2178	mutex_enter(&db->db_mtx);
2179
2180	if (db->db_user_ptr == old_user_ptr) {
2181		db->db_user_ptr = user_ptr;
2182		db->db_user_data_ptr_ptr = user_data_ptr_ptr;
2183		db->db_evict_func = evict_func;
2184
2185		dbuf_update_data(db);
2186	} else {
2187		old_user_ptr = db->db_user_ptr;
2188	}
2189
2190	mutex_exit(&db->db_mtx);
2191	return (old_user_ptr);
2192}
2193
2194void *
2195dmu_buf_get_user(dmu_buf_t *db_fake)
2196{
2197	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2198	ASSERT(!refcount_is_zero(&db->db_holds));
2199
2200	return (db->db_user_ptr);
2201}
2202
2203boolean_t
2204dmu_buf_freeable(dmu_buf_t *dbuf)
2205{
2206	boolean_t res = B_FALSE;
2207	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2208
2209	if (db->db_blkptr)
2210		res = dsl_dataset_block_freeable(db->db_objset->os_dsl_dataset,
2211		    db->db_blkptr, db->db_blkptr->blk_birth);
2212
2213	return (res);
2214}
2215
2216blkptr_t *
2217dmu_buf_get_blkptr(dmu_buf_t *db)
2218{
2219	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
2220	return (dbi->db_blkptr);
2221}
2222
2223static void
2224dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
2225{
2226	/* ASSERT(dmu_tx_is_syncing(tx) */
2227	ASSERT(MUTEX_HELD(&db->db_mtx));
2228
2229	if (db->db_blkptr != NULL)
2230		return;
2231
2232	if (db->db_blkid == DMU_SPILL_BLKID) {
2233		db->db_blkptr = &dn->dn_phys->dn_spill;
2234		BP_ZERO(db->db_blkptr);
2235		return;
2236	}
2237	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
2238		/*
2239		 * This buffer was allocated at a time when there was
2240		 * no available blkptrs from the dnode, or it was
2241		 * inappropriate to hook it in (i.e., nlevels mis-match).
2242		 */
2243		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
2244		ASSERT(db->db_parent == NULL);
2245		db->db_parent = dn->dn_dbuf;
2246		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
2247		DBUF_VERIFY(db);
2248	} else {
2249		dmu_buf_impl_t *parent = db->db_parent;
2250		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2251
2252		ASSERT(dn->dn_phys->dn_nlevels > 1);
2253		if (parent == NULL) {
2254			mutex_exit(&db->db_mtx);
2255			rw_enter(&dn->dn_struct_rwlock, RW_READER);
2256			(void) dbuf_hold_impl(dn, db->db_level+1,
2257			    db->db_blkid >> epbs, FALSE, db, &parent);
2258			rw_exit(&dn->dn_struct_rwlock);
2259			mutex_enter(&db->db_mtx);
2260			db->db_parent = parent;
2261		}
2262		db->db_blkptr = (blkptr_t *)parent->db.db_data +
2263		    (db->db_blkid & ((1ULL << epbs) - 1));
2264		DBUF_VERIFY(db);
2265	}
2266}
2267
2268static void
2269dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2270{
2271	dmu_buf_impl_t *db = dr->dr_dbuf;
2272	dnode_t *dn;
2273	zio_t *zio;
2274
2275	ASSERT(dmu_tx_is_syncing(tx));
2276
2277	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2278
2279	mutex_enter(&db->db_mtx);
2280
2281	ASSERT(db->db_level > 0);
2282	DBUF_VERIFY(db);
2283
2284	/* Read the block if it hasn't been read yet. */
2285	if (db->db_buf == NULL) {
2286		mutex_exit(&db->db_mtx);
2287		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
2288		mutex_enter(&db->db_mtx);
2289	}
2290	ASSERT3U(db->db_state, ==, DB_CACHED);
2291	ASSERT(db->db_buf != NULL);
2292
2293	DB_DNODE_ENTER(db);
2294	dn = DB_DNODE(db);
2295	/* Indirect block size must match what the dnode thinks it is. */
2296	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2297	dbuf_check_blkptr(dn, db);
2298	DB_DNODE_EXIT(db);
2299
2300	/* Provide the pending dirty record to child dbufs */
2301	db->db_data_pending = dr;
2302
2303	mutex_exit(&db->db_mtx);
2304	dbuf_write(dr, db->db_buf, tx);
2305
2306	zio = dr->dr_zio;
2307	mutex_enter(&dr->dt.di.dr_mtx);
2308	dbuf_sync_list(&dr->dt.di.dr_children, tx);
2309	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2310	mutex_exit(&dr->dt.di.dr_mtx);
2311	zio_nowait(zio);
2312}
2313
2314static void
2315dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
2316{
2317	arc_buf_t **datap = &dr->dt.dl.dr_data;
2318	dmu_buf_impl_t *db = dr->dr_dbuf;
2319	dnode_t *dn;
2320	objset_t *os;
2321	uint64_t txg = tx->tx_txg;
2322
2323	ASSERT(dmu_tx_is_syncing(tx));
2324
2325	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
2326
2327	mutex_enter(&db->db_mtx);
2328	/*
2329	 * To be synced, we must be dirtied.  But we
2330	 * might have been freed after the dirty.
2331	 */
2332	if (db->db_state == DB_UNCACHED) {
2333		/* This buffer has been freed since it was dirtied */
2334		ASSERT(db->db.db_data == NULL);
2335	} else if (db->db_state == DB_FILL) {
2336		/* This buffer was freed and is now being re-filled */
2337		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
2338	} else {
2339		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
2340	}
2341	DBUF_VERIFY(db);
2342
2343	DB_DNODE_ENTER(db);
2344	dn = DB_DNODE(db);
2345
2346	if (db->db_blkid == DMU_SPILL_BLKID) {
2347		mutex_enter(&dn->dn_mtx);
2348		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
2349		mutex_exit(&dn->dn_mtx);
2350	}
2351
2352	/*
2353	 * If this is a bonus buffer, simply copy the bonus data into the
2354	 * dnode.  It will be written out when the dnode is synced (and it
2355	 * will be synced, since it must have been dirty for dbuf_sync to
2356	 * be called).
2357	 */
2358	if (db->db_blkid == DMU_BONUS_BLKID) {
2359		dbuf_dirty_record_t **drp;
2360
2361		ASSERT(*datap != NULL);
2362		ASSERT0(db->db_level);
2363		ASSERT3U(dn->dn_phys->dn_bonuslen, <=, DN_MAX_BONUSLEN);
2364		bcopy(*datap, DN_BONUS(dn->dn_phys), dn->dn_phys->dn_bonuslen);
2365		DB_DNODE_EXIT(db);
2366
2367		if (*datap != db->db.db_data) {
2368			zio_buf_free(*datap, DN_MAX_BONUSLEN);
2369			arc_space_return(DN_MAX_BONUSLEN, ARC_SPACE_OTHER);
2370		}
2371		db->db_data_pending = NULL;
2372		drp = &db->db_last_dirty;
2373		while (*drp != dr)
2374			drp = &(*drp)->dr_next;
2375		ASSERT(dr->dr_next == NULL);
2376		ASSERT(dr->dr_dbuf == db);
2377		*drp = dr->dr_next;
2378		if (dr->dr_dbuf->db_level != 0) {
2379			list_destroy(&dr->dt.di.dr_children);
2380			mutex_destroy(&dr->dt.di.dr_mtx);
2381		}
2382		kmem_free(dr, sizeof (dbuf_dirty_record_t));
2383		ASSERT(db->db_dirtycnt > 0);
2384		db->db_dirtycnt -= 1;
2385		dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
2386		return;
2387	}
2388
2389	os = dn->dn_objset;
2390
2391	/*
2392	 * This function may have dropped the db_mtx lock allowing a dmu_sync
2393	 * operation to sneak in. As a result, we need to ensure that we
2394	 * don't check the dr_override_state until we have returned from
2395	 * dbuf_check_blkptr.
2396	 */
2397	dbuf_check_blkptr(dn, db);
2398
2399	/*
2400	 * If this buffer is in the middle of an immediate write,
2401	 * wait for the synchronous IO to complete.
2402	 */
2403	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
2404		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
2405		cv_wait(&db->db_changed, &db->db_mtx);
2406		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
2407	}
2408
2409	if (db->db_state != DB_NOFILL &&
2410	    dn->dn_object != DMU_META_DNODE_OBJECT &&
2411	    refcount_count(&db->db_holds) > 1 &&
2412	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
2413	    *datap == db->db_buf) {
2414		/*
2415		 * If this buffer is currently "in use" (i.e., there
2416		 * are active holds and db_data still references it),
2417		 * then make a copy before we start the write so that
2418		 * any modifications from the open txg will not leak
2419		 * into this write.
2420		 *
2421		 * NOTE: this copy does not need to be made for
2422		 * objects only modified in the syncing context (e.g.
2423		 * DNONE_DNODE blocks).
2424		 */
2425		int blksz = arc_buf_size(*datap);
2426		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2427		*datap = arc_buf_alloc(os->os_spa, blksz, db, type);
2428		bcopy(db->db.db_data, (*datap)->b_data, blksz);
2429	}
2430	db->db_data_pending = dr;
2431
2432	mutex_exit(&db->db_mtx);
2433
2434	dbuf_write(dr, *datap, tx);
2435
2436	ASSERT(!list_link_active(&dr->dr_dirty_node));
2437	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
2438		list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
2439		DB_DNODE_EXIT(db);
2440	} else {
2441		/*
2442		 * Although zio_nowait() does not "wait for an IO", it does
2443		 * initiate the IO. If this is an empty write it seems plausible
2444		 * that the IO could actually be completed before the nowait
2445		 * returns. We need to DB_DNODE_EXIT() first in case
2446		 * zio_nowait() invalidates the dbuf.
2447		 */
2448		DB_DNODE_EXIT(db);
2449		zio_nowait(dr->dr_zio);
2450	}
2451}
2452
2453void
2454dbuf_sync_list(list_t *list, dmu_tx_t *tx)
2455{
2456	dbuf_dirty_record_t *dr;
2457
2458	while (dr = list_head(list)) {
2459		if (dr->dr_zio != NULL) {
2460			/*
2461			 * If we find an already initialized zio then we
2462			 * are processing the meta-dnode, and we have finished.
2463			 * The dbufs for all dnodes are put back on the list
2464			 * during processing, so that we can zio_wait()
2465			 * these IOs after initiating all child IOs.
2466			 */
2467			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
2468			    DMU_META_DNODE_OBJECT);
2469			break;
2470		}
2471		list_remove(list, dr);
2472		if (dr->dr_dbuf->db_level > 0)
2473			dbuf_sync_indirect(dr, tx);
2474		else
2475			dbuf_sync_leaf(dr, tx);
2476	}
2477}
2478
2479/* ARGSUSED */
2480static void
2481dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
2482{
2483	dmu_buf_impl_t *db = vdb;
2484	dnode_t *dn;
2485	blkptr_t *bp = zio->io_bp;
2486	blkptr_t *bp_orig = &zio->io_bp_orig;
2487	spa_t *spa = zio->io_spa;
2488	int64_t delta;
2489	uint64_t fill = 0;
2490	int i;
2491
2492	ASSERT3P(db->db_blkptr, ==, bp);
2493
2494	DB_DNODE_ENTER(db);
2495	dn = DB_DNODE(db);
2496	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
2497	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
2498	zio->io_prev_space_delta = delta;
2499
2500	if (bp->blk_birth != 0) {
2501		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
2502		    BP_GET_TYPE(bp) == dn->dn_type) ||
2503		    (db->db_blkid == DMU_SPILL_BLKID &&
2504		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
2505		    BP_IS_EMBEDDED(bp));
2506		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
2507	}
2508
2509	mutex_enter(&db->db_mtx);
2510
2511#ifdef ZFS_DEBUG
2512	if (db->db_blkid == DMU_SPILL_BLKID) {
2513		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2514		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2515		    db->db_blkptr == &dn->dn_phys->dn_spill);
2516	}
2517#endif
2518
2519	if (db->db_level == 0) {
2520		mutex_enter(&dn->dn_mtx);
2521		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
2522		    db->db_blkid != DMU_SPILL_BLKID)
2523			dn->dn_phys->dn_maxblkid = db->db_blkid;
2524		mutex_exit(&dn->dn_mtx);
2525
2526		if (dn->dn_type == DMU_OT_DNODE) {
2527			dnode_phys_t *dnp = db->db.db_data;
2528			for (i = db->db.db_size >> DNODE_SHIFT; i > 0;
2529			    i--, dnp++) {
2530				if (dnp->dn_type != DMU_OT_NONE)
2531					fill++;
2532			}
2533		} else {
2534			if (BP_IS_HOLE(bp)) {
2535				fill = 0;
2536			} else {
2537				fill = 1;
2538			}
2539		}
2540	} else {
2541		blkptr_t *ibp = db->db.db_data;
2542		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
2543		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
2544			if (BP_IS_HOLE(ibp))
2545				continue;
2546			fill += BP_GET_FILL(ibp);
2547		}
2548	}
2549	DB_DNODE_EXIT(db);
2550
2551	if (!BP_IS_EMBEDDED(bp))
2552		bp->blk_fill = fill;
2553
2554	mutex_exit(&db->db_mtx);
2555}
2556
2557/*
2558 * The SPA will call this callback several times for each zio - once
2559 * for every physical child i/o (zio->io_phys_children times).  This
2560 * allows the DMU to monitor the progress of each logical i/o.  For example,
2561 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
2562 * block.  There may be a long delay before all copies/fragments are completed,
2563 * so this callback allows us to retire dirty space gradually, as the physical
2564 * i/os complete.
2565 */
2566/* ARGSUSED */
2567static void
2568dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
2569{
2570	dmu_buf_impl_t *db = arg;
2571	objset_t *os = db->db_objset;
2572	dsl_pool_t *dp = dmu_objset_pool(os);
2573	dbuf_dirty_record_t *dr;
2574	int delta = 0;
2575
2576	dr = db->db_data_pending;
2577	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
2578
2579	/*
2580	 * The callback will be called io_phys_children times.  Retire one
2581	 * portion of our dirty space each time we are called.  Any rounding
2582	 * error will be cleaned up by dsl_pool_sync()'s call to
2583	 * dsl_pool_undirty_space().
2584	 */
2585	delta = dr->dr_accounted / zio->io_phys_children;
2586	dsl_pool_undirty_space(dp, delta, zio->io_txg);
2587}
2588
2589/* ARGSUSED */
2590static void
2591dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
2592{
2593	dmu_buf_impl_t *db = vdb;
2594	blkptr_t *bp_orig = &zio->io_bp_orig;
2595	blkptr_t *bp = db->db_blkptr;
2596	objset_t *os = db->db_objset;
2597	dmu_tx_t *tx = os->os_synctx;
2598	dbuf_dirty_record_t **drp, *dr;
2599
2600	ASSERT0(zio->io_error);
2601	ASSERT(db->db_blkptr == bp);
2602
2603	/*
2604	 * For nopwrites and rewrites we ensure that the bp matches our
2605	 * original and bypass all the accounting.
2606	 */
2607	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
2608		ASSERT(BP_EQUAL(bp, bp_orig));
2609	} else {
2610		dsl_dataset_t *ds = os->os_dsl_dataset;
2611		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
2612		dsl_dataset_block_born(ds, bp, tx);
2613	}
2614
2615	mutex_enter(&db->db_mtx);
2616
2617	DBUF_VERIFY(db);
2618
2619	drp = &db->db_last_dirty;
2620	while ((dr = *drp) != db->db_data_pending)
2621		drp = &dr->dr_next;
2622	ASSERT(!list_link_active(&dr->dr_dirty_node));
2623	ASSERT(dr->dr_dbuf == db);
2624	ASSERT(dr->dr_next == NULL);
2625	*drp = dr->dr_next;
2626
2627#ifdef ZFS_DEBUG
2628	if (db->db_blkid == DMU_SPILL_BLKID) {
2629		dnode_t *dn;
2630
2631		DB_DNODE_ENTER(db);
2632		dn = DB_DNODE(db);
2633		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
2634		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
2635		    db->db_blkptr == &dn->dn_phys->dn_spill);
2636		DB_DNODE_EXIT(db);
2637	}
2638#endif
2639
2640	if (db->db_level == 0) {
2641		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2642		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
2643		if (db->db_state != DB_NOFILL) {
2644			if (dr->dt.dl.dr_data != db->db_buf)
2645				VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data,
2646				    db));
2647			else if (!arc_released(db->db_buf))
2648				arc_set_callback(db->db_buf, dbuf_do_evict, db);
2649		}
2650	} else {
2651		dnode_t *dn;
2652
2653		DB_DNODE_ENTER(db);
2654		dn = DB_DNODE(db);
2655		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
2656		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
2657		if (!BP_IS_HOLE(db->db_blkptr)) {
2658			int epbs =
2659			    dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
2660			ASSERT3U(db->db_blkid, <=,
2661			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
2662			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
2663			    db->db.db_size);
2664			if (!arc_released(db->db_buf))
2665				arc_set_callback(db->db_buf, dbuf_do_evict, db);
2666		}
2667		DB_DNODE_EXIT(db);
2668		mutex_destroy(&dr->dt.di.dr_mtx);
2669		list_destroy(&dr->dt.di.dr_children);
2670	}
2671	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2672
2673	cv_broadcast(&db->db_changed);
2674	ASSERT(db->db_dirtycnt > 0);
2675	db->db_dirtycnt -= 1;
2676	db->db_data_pending = NULL;
2677	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
2678}
2679
2680static void
2681dbuf_write_nofill_ready(zio_t *zio)
2682{
2683	dbuf_write_ready(zio, NULL, zio->io_private);
2684}
2685
2686static void
2687dbuf_write_nofill_done(zio_t *zio)
2688{
2689	dbuf_write_done(zio, NULL, zio->io_private);
2690}
2691
2692static void
2693dbuf_write_override_ready(zio_t *zio)
2694{
2695	dbuf_dirty_record_t *dr = zio->io_private;
2696	dmu_buf_impl_t *db = dr->dr_dbuf;
2697
2698	dbuf_write_ready(zio, NULL, db);
2699}
2700
2701static void
2702dbuf_write_override_done(zio_t *zio)
2703{
2704	dbuf_dirty_record_t *dr = zio->io_private;
2705	dmu_buf_impl_t *db = dr->dr_dbuf;
2706	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
2707
2708	mutex_enter(&db->db_mtx);
2709	if (!BP_EQUAL(zio->io_bp, obp)) {
2710		if (!BP_IS_HOLE(obp))
2711			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
2712		arc_release(dr->dt.dl.dr_data, db);
2713	}
2714	mutex_exit(&db->db_mtx);
2715
2716	dbuf_write_done(zio, NULL, db);
2717}
2718
2719/* Issue I/O to commit a dirty buffer to disk. */
2720static void
2721dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
2722{
2723	dmu_buf_impl_t *db = dr->dr_dbuf;
2724	dnode_t *dn;
2725	objset_t *os;
2726	dmu_buf_impl_t *parent = db->db_parent;
2727	uint64_t txg = tx->tx_txg;
2728	zbookmark_t zb;
2729	zio_prop_t zp;
2730	zio_t *zio;
2731	int wp_flag = 0;
2732
2733	DB_DNODE_ENTER(db);
2734	dn = DB_DNODE(db);
2735	os = dn->dn_objset;
2736
2737	if (db->db_state != DB_NOFILL) {
2738		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
2739			/*
2740			 * Private object buffers are released here rather
2741			 * than in dbuf_dirty() since they are only modified
2742			 * in the syncing context and we don't want the
2743			 * overhead of making multiple copies of the data.
2744			 */
2745			if (BP_IS_HOLE(db->db_blkptr)) {
2746				arc_buf_thaw(data);
2747			} else {
2748				dbuf_release_bp(db);
2749			}
2750		}
2751	}
2752
2753	if (parent != dn->dn_dbuf) {
2754		/* Our parent is an indirect block. */
2755		/* We have a dirty parent that has been scheduled for write. */
2756		ASSERT(parent && parent->db_data_pending);
2757		/* Our parent's buffer is one level closer to the dnode. */
2758		ASSERT(db->db_level == parent->db_level-1);
2759		/*
2760		 * We're about to modify our parent's db_data by modifying
2761		 * our block pointer, so the parent must be released.
2762		 */
2763		ASSERT(arc_released(parent->db_buf));
2764		zio = parent->db_data_pending->dr_zio;
2765	} else {
2766		/* Our parent is the dnode itself. */
2767		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
2768		    db->db_blkid != DMU_SPILL_BLKID) ||
2769		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
2770		if (db->db_blkid != DMU_SPILL_BLKID)
2771			ASSERT3P(db->db_blkptr, ==,
2772			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
2773		zio = dn->dn_zio;
2774	}
2775
2776	ASSERT(db->db_level == 0 || data == db->db_buf);
2777	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
2778	ASSERT(zio);
2779
2780	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
2781	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
2782	    db->db.db_object, db->db_level, db->db_blkid);
2783
2784	if (db->db_blkid == DMU_SPILL_BLKID)
2785		wp_flag = WP_SPILL;
2786	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
2787
2788	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
2789	DB_DNODE_EXIT(db);
2790
2791	if (db->db_level == 0 &&
2792	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
2793		/*
2794		 * The BP for this block has been provided by open context
2795		 * (by dmu_sync() or dmu_buf_write_embedded()).
2796		 */
2797		void *contents = (data != NULL) ? data->b_data : NULL;
2798
2799		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2800		    db->db_blkptr, contents, db->db.db_size, &zp,
2801		    dbuf_write_override_ready, NULL, dbuf_write_override_done,
2802		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2803		mutex_enter(&db->db_mtx);
2804		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
2805		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
2806		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
2807		mutex_exit(&db->db_mtx);
2808	} else if (db->db_state == DB_NOFILL) {
2809		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
2810		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
2811		dr->dr_zio = zio_write(zio, os->os_spa, txg,
2812		    db->db_blkptr, NULL, db->db.db_size, &zp,
2813		    dbuf_write_nofill_ready, NULL, dbuf_write_nofill_done, db,
2814		    ZIO_PRIORITY_ASYNC_WRITE,
2815		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
2816	} else {
2817		ASSERT(arc_released(data));
2818		dr->dr_zio = arc_write(zio, os->os_spa, txg,
2819		    db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db),
2820		    DBUF_IS_L2COMPRESSIBLE(db), &zp, dbuf_write_ready,
2821		    dbuf_write_physdone, dbuf_write_done, db,
2822		    ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
2823	}
2824}
2825