dmu_tx.c revision 276081
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
25 */
26
27#include <sys/dmu.h>
28#include <sys/dmu_impl.h>
29#include <sys/dbuf.h>
30#include <sys/dmu_tx.h>
31#include <sys/dmu_objset.h>
32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34#include <sys/dsl_pool.h>
35#include <sys/zap_impl.h> /* for fzap_default_block_shift */
36#include <sys/spa.h>
37#include <sys/sa.h>
38#include <sys/sa_impl.h>
39#include <sys/zfs_context.h>
40#include <sys/varargs.h>
41
42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43    uint64_t arg1, uint64_t arg2);
44
45
46dmu_tx_t *
47dmu_tx_create_dd(dsl_dir_t *dd)
48{
49	dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50	tx->tx_dir = dd;
51	if (dd != NULL)
52		tx->tx_pool = dd->dd_pool;
53	list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54	    offsetof(dmu_tx_hold_t, txh_node));
55	list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56	    offsetof(dmu_tx_callback_t, dcb_node));
57	tx->tx_start = gethrtime();
58#ifdef ZFS_DEBUG
59	refcount_create(&tx->tx_space_written);
60	refcount_create(&tx->tx_space_freed);
61#endif
62	return (tx);
63}
64
65dmu_tx_t *
66dmu_tx_create(objset_t *os)
67{
68	dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
69	tx->tx_objset = os;
70	tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
71	return (tx);
72}
73
74dmu_tx_t *
75dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
76{
77	dmu_tx_t *tx = dmu_tx_create_dd(NULL);
78
79	ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
80	tx->tx_pool = dp;
81	tx->tx_txg = txg;
82	tx->tx_anyobj = TRUE;
83
84	return (tx);
85}
86
87int
88dmu_tx_is_syncing(dmu_tx_t *tx)
89{
90	return (tx->tx_anyobj);
91}
92
93int
94dmu_tx_private_ok(dmu_tx_t *tx)
95{
96	return (tx->tx_anyobj);
97}
98
99static dmu_tx_hold_t *
100dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
101    enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
102{
103	dmu_tx_hold_t *txh;
104	dnode_t *dn = NULL;
105	int err;
106
107	if (object != DMU_NEW_OBJECT) {
108		err = dnode_hold(os, object, tx, &dn);
109		if (err) {
110			tx->tx_err = err;
111			return (NULL);
112		}
113
114		if (err == 0 && tx->tx_txg != 0) {
115			mutex_enter(&dn->dn_mtx);
116			/*
117			 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
118			 * problem, but there's no way for it to happen (for
119			 * now, at least).
120			 */
121			ASSERT(dn->dn_assigned_txg == 0);
122			dn->dn_assigned_txg = tx->tx_txg;
123			(void) refcount_add(&dn->dn_tx_holds, tx);
124			mutex_exit(&dn->dn_mtx);
125		}
126	}
127
128	txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
129	txh->txh_tx = tx;
130	txh->txh_dnode = dn;
131#ifdef ZFS_DEBUG
132	txh->txh_type = type;
133	txh->txh_arg1 = arg1;
134	txh->txh_arg2 = arg2;
135#endif
136	list_insert_tail(&tx->tx_holds, txh);
137
138	return (txh);
139}
140
141void
142dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
143{
144	/*
145	 * If we're syncing, they can manipulate any object anyhow, and
146	 * the hold on the dnode_t can cause problems.
147	 */
148	if (!dmu_tx_is_syncing(tx)) {
149		(void) dmu_tx_hold_object_impl(tx, os,
150		    object, THT_NEWOBJECT, 0, 0);
151	}
152}
153
154static int
155dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
156{
157	int err;
158	dmu_buf_impl_t *db;
159
160	rw_enter(&dn->dn_struct_rwlock, RW_READER);
161	db = dbuf_hold_level(dn, level, blkid, FTAG);
162	rw_exit(&dn->dn_struct_rwlock);
163	if (db == NULL)
164		return (SET_ERROR(EIO));
165	err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
166	dbuf_rele(db, FTAG);
167	return (err);
168}
169
170static void
171dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
172    int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
173{
174	objset_t *os = dn->dn_objset;
175	dsl_dataset_t *ds = os->os_dsl_dataset;
176	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
177	dmu_buf_impl_t *parent = NULL;
178	blkptr_t *bp = NULL;
179	uint64_t space;
180
181	if (level >= dn->dn_nlevels || history[level] == blkid)
182		return;
183
184	history[level] = blkid;
185
186	space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
187
188	if (db == NULL || db == dn->dn_dbuf) {
189		ASSERT(level != 0);
190		db = NULL;
191	} else {
192		ASSERT(DB_DNODE(db) == dn);
193		ASSERT(db->db_level == level);
194		ASSERT(db->db.db_size == space);
195		ASSERT(db->db_blkid == blkid);
196		bp = db->db_blkptr;
197		parent = db->db_parent;
198	}
199
200	freeable = (bp && (freeable ||
201	    dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
202
203	if (freeable)
204		txh->txh_space_tooverwrite += space;
205	else
206		txh->txh_space_towrite += space;
207	if (bp)
208		txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
209
210	dmu_tx_count_twig(txh, dn, parent, level + 1,
211	    blkid >> epbs, freeable, history);
212}
213
214/* ARGSUSED */
215static void
216dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
217{
218	dnode_t *dn = txh->txh_dnode;
219	uint64_t start, end, i;
220	int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
221	int err = 0;
222
223	if (len == 0)
224		return;
225
226	min_bs = SPA_MINBLOCKSHIFT;
227	max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1;
228	min_ibs = DN_MIN_INDBLKSHIFT;
229	max_ibs = DN_MAX_INDBLKSHIFT;
230
231	if (dn) {
232		uint64_t history[DN_MAX_LEVELS];
233		int nlvls = dn->dn_nlevels;
234		int delta;
235
236		/*
237		 * For i/o error checking, read the first and last level-0
238		 * blocks (if they are not aligned), and all the level-1 blocks.
239		 */
240		if (dn->dn_maxblkid == 0) {
241			delta = dn->dn_datablksz;
242			start = (off < dn->dn_datablksz) ? 0 : 1;
243			end = (off+len <= dn->dn_datablksz) ? 0 : 1;
244			if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
245				err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
246				if (err)
247					goto out;
248				delta -= off;
249			}
250		} else {
251			zio_t *zio = zio_root(dn->dn_objset->os_spa,
252			    NULL, NULL, ZIO_FLAG_CANFAIL);
253
254			/* first level-0 block */
255			start = off >> dn->dn_datablkshift;
256			if (P2PHASE(off, dn->dn_datablksz) ||
257			    len < dn->dn_datablksz) {
258				err = dmu_tx_check_ioerr(zio, dn, 0, start);
259				if (err)
260					goto out;
261			}
262
263			/* last level-0 block */
264			end = (off+len-1) >> dn->dn_datablkshift;
265			if (end != start && end <= dn->dn_maxblkid &&
266			    P2PHASE(off+len, dn->dn_datablksz)) {
267				err = dmu_tx_check_ioerr(zio, dn, 0, end);
268				if (err)
269					goto out;
270			}
271
272			/* level-1 blocks */
273			if (nlvls > 1) {
274				int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
275				for (i = (start>>shft)+1; i < end>>shft; i++) {
276					err = dmu_tx_check_ioerr(zio, dn, 1, i);
277					if (err)
278						goto out;
279				}
280			}
281
282			err = zio_wait(zio);
283			if (err)
284				goto out;
285			delta = P2NPHASE(off, dn->dn_datablksz);
286		}
287
288		min_ibs = max_ibs = dn->dn_indblkshift;
289		if (dn->dn_maxblkid > 0) {
290			/*
291			 * The blocksize can't change,
292			 * so we can make a more precise estimate.
293			 */
294			ASSERT(dn->dn_datablkshift != 0);
295			min_bs = max_bs = dn->dn_datablkshift;
296		} else {
297			/*
298			 * The blocksize can increase up to the recordsize,
299			 * or if it is already more than the recordsize,
300			 * up to the next power of 2.
301			 */
302			min_bs = highbit64(dn->dn_datablksz - 1);
303			max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1));
304		}
305
306		/*
307		 * If this write is not off the end of the file
308		 * we need to account for overwrites/unref.
309		 */
310		if (start <= dn->dn_maxblkid) {
311			for (int l = 0; l < DN_MAX_LEVELS; l++)
312				history[l] = -1ULL;
313		}
314		while (start <= dn->dn_maxblkid) {
315			dmu_buf_impl_t *db;
316
317			rw_enter(&dn->dn_struct_rwlock, RW_READER);
318			err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
319			rw_exit(&dn->dn_struct_rwlock);
320
321			if (err) {
322				txh->txh_tx->tx_err = err;
323				return;
324			}
325
326			dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
327			    history);
328			dbuf_rele(db, FTAG);
329			if (++start > end) {
330				/*
331				 * Account for new indirects appearing
332				 * before this IO gets assigned into a txg.
333				 */
334				bits = 64 - min_bs;
335				epbs = min_ibs - SPA_BLKPTRSHIFT;
336				for (bits -= epbs * (nlvls - 1);
337				    bits >= 0; bits -= epbs)
338					txh->txh_fudge += 1ULL << max_ibs;
339				goto out;
340			}
341			off += delta;
342			if (len >= delta)
343				len -= delta;
344			delta = dn->dn_datablksz;
345		}
346	}
347
348	/*
349	 * 'end' is the last thing we will access, not one past.
350	 * This way we won't overflow when accessing the last byte.
351	 */
352	start = P2ALIGN(off, 1ULL << max_bs);
353	end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
354	txh->txh_space_towrite += end - start + 1;
355
356	start >>= min_bs;
357	end >>= min_bs;
358
359	epbs = min_ibs - SPA_BLKPTRSHIFT;
360
361	/*
362	 * The object contains at most 2^(64 - min_bs) blocks,
363	 * and each indirect level maps 2^epbs.
364	 */
365	for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
366		start >>= epbs;
367		end >>= epbs;
368		ASSERT3U(end, >=, start);
369		txh->txh_space_towrite += (end - start + 1) << max_ibs;
370		if (start != 0) {
371			/*
372			 * We also need a new blkid=0 indirect block
373			 * to reference any existing file data.
374			 */
375			txh->txh_space_towrite += 1ULL << max_ibs;
376		}
377	}
378
379out:
380	if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
381	    2 * DMU_MAX_ACCESS)
382		err = SET_ERROR(EFBIG);
383
384	if (err)
385		txh->txh_tx->tx_err = err;
386}
387
388static void
389dmu_tx_count_dnode(dmu_tx_hold_t *txh)
390{
391	dnode_t *dn = txh->txh_dnode;
392	dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
393	uint64_t space = mdn->dn_datablksz +
394	    ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
395
396	if (dn && dn->dn_dbuf->db_blkptr &&
397	    dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
398	    dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
399		txh->txh_space_tooverwrite += space;
400		txh->txh_space_tounref += space;
401	} else {
402		txh->txh_space_towrite += space;
403		if (dn && dn->dn_dbuf->db_blkptr)
404			txh->txh_space_tounref += space;
405	}
406}
407
408void
409dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
410{
411	dmu_tx_hold_t *txh;
412
413	ASSERT(tx->tx_txg == 0);
414	ASSERT(len < DMU_MAX_ACCESS);
415	ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
416
417	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
418	    object, THT_WRITE, off, len);
419	if (txh == NULL)
420		return;
421
422	dmu_tx_count_write(txh, off, len);
423	dmu_tx_count_dnode(txh);
424}
425
426static void
427dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
428{
429	uint64_t blkid, nblks, lastblk;
430	uint64_t space = 0, unref = 0, skipped = 0;
431	dnode_t *dn = txh->txh_dnode;
432	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
433	spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
434	int epbs;
435	uint64_t l0span = 0, nl1blks = 0;
436
437	if (dn->dn_nlevels == 0)
438		return;
439
440	/*
441	 * The struct_rwlock protects us against dn_nlevels
442	 * changing, in case (against all odds) we manage to dirty &
443	 * sync out the changes after we check for being dirty.
444	 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
445	 */
446	rw_enter(&dn->dn_struct_rwlock, RW_READER);
447	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
448	if (dn->dn_maxblkid == 0) {
449		if (off == 0 && len >= dn->dn_datablksz) {
450			blkid = 0;
451			nblks = 1;
452		} else {
453			rw_exit(&dn->dn_struct_rwlock);
454			return;
455		}
456	} else {
457		blkid = off >> dn->dn_datablkshift;
458		nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
459
460		if (blkid > dn->dn_maxblkid) {
461			rw_exit(&dn->dn_struct_rwlock);
462			return;
463		}
464		if (blkid + nblks > dn->dn_maxblkid)
465			nblks = dn->dn_maxblkid - blkid + 1;
466
467	}
468	l0span = nblks;    /* save for later use to calc level > 1 overhead */
469	if (dn->dn_nlevels == 1) {
470		int i;
471		for (i = 0; i < nblks; i++) {
472			blkptr_t *bp = dn->dn_phys->dn_blkptr;
473			ASSERT3U(blkid + i, <, dn->dn_nblkptr);
474			bp += blkid + i;
475			if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
476				dprintf_bp(bp, "can free old%s", "");
477				space += bp_get_dsize(spa, bp);
478			}
479			unref += BP_GET_ASIZE(bp);
480		}
481		nl1blks = 1;
482		nblks = 0;
483	}
484
485	lastblk = blkid + nblks - 1;
486	while (nblks) {
487		dmu_buf_impl_t *dbuf;
488		uint64_t ibyte, new_blkid;
489		int epb = 1 << epbs;
490		int err, i, blkoff, tochk;
491		blkptr_t *bp;
492
493		ibyte = blkid << dn->dn_datablkshift;
494		err = dnode_next_offset(dn,
495		    DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
496		new_blkid = ibyte >> dn->dn_datablkshift;
497		if (err == ESRCH) {
498			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
499			break;
500		}
501		if (err) {
502			txh->txh_tx->tx_err = err;
503			break;
504		}
505		if (new_blkid > lastblk) {
506			skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
507			break;
508		}
509
510		if (new_blkid > blkid) {
511			ASSERT((new_blkid >> epbs) > (blkid >> epbs));
512			skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
513			nblks -= new_blkid - blkid;
514			blkid = new_blkid;
515		}
516		blkoff = P2PHASE(blkid, epb);
517		tochk = MIN(epb - blkoff, nblks);
518
519		err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
520		if (err) {
521			txh->txh_tx->tx_err = err;
522			break;
523		}
524
525		txh->txh_memory_tohold += dbuf->db.db_size;
526
527		/*
528		 * We don't check memory_tohold against DMU_MAX_ACCESS because
529		 * memory_tohold is an over-estimation (especially the >L1
530		 * indirect blocks), so it could fail.  Callers should have
531		 * already verified that they will not be holding too much
532		 * memory.
533		 */
534
535		err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
536		if (err != 0) {
537			txh->txh_tx->tx_err = err;
538			dbuf_rele(dbuf, FTAG);
539			break;
540		}
541
542		bp = dbuf->db.db_data;
543		bp += blkoff;
544
545		for (i = 0; i < tochk; i++) {
546			if (dsl_dataset_block_freeable(ds, &bp[i],
547			    bp[i].blk_birth)) {
548				dprintf_bp(&bp[i], "can free old%s", "");
549				space += bp_get_dsize(spa, &bp[i]);
550			}
551			unref += BP_GET_ASIZE(bp);
552		}
553		dbuf_rele(dbuf, FTAG);
554
555		++nl1blks;
556		blkid += tochk;
557		nblks -= tochk;
558	}
559	rw_exit(&dn->dn_struct_rwlock);
560
561	/*
562	 * Add in memory requirements of higher-level indirects.
563	 * This assumes a worst-possible scenario for dn_nlevels and a
564	 * worst-possible distribution of l1-blocks over the region to free.
565	 */
566	{
567		uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs);
568		int level = 2;
569		/*
570		 * Here we don't use DN_MAX_LEVEL, but calculate it with the
571		 * given datablkshift and indblkshift. This makes the
572		 * difference between 19 and 8 on large files.
573		 */
574		int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) /
575		    (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
576
577		while (level++ < maxlevel) {
578			txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1)
579			    << dn->dn_indblkshift;
580			blkcnt = 1 + (blkcnt >> epbs);
581		}
582	}
583
584	/* account for new level 1 indirect blocks that might show up */
585	if (skipped > 0) {
586		txh->txh_fudge += skipped << dn->dn_indblkshift;
587		skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
588		txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
589	}
590	txh->txh_space_tofree += space;
591	txh->txh_space_tounref += unref;
592}
593
594/*
595 * This function marks the transaction as being a "net free".  The end
596 * result is that refquotas will be disabled for this transaction, and
597 * this transaction will be able to use half of the pool space overhead
598 * (see dsl_pool_adjustedsize()).  Therefore this function should only
599 * be called for transactions that we expect will not cause a net increase
600 * in the amount of space used (but it's OK if that is occasionally not true).
601 */
602void
603dmu_tx_mark_netfree(dmu_tx_t *tx)
604{
605	dmu_tx_hold_t *txh;
606
607	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
608	    DMU_NEW_OBJECT, THT_FREE, 0, 0);
609
610	/*
611	 * Pretend that this operation will free 1GB of space.  This
612	 * should be large enough to cancel out the largest write.
613	 * We don't want to use something like UINT64_MAX, because that would
614	 * cause overflows when doing math with these values (e.g. in
615	 * dmu_tx_try_assign()).
616	 */
617	txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024;
618}
619
620void
621dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
622{
623	dmu_tx_hold_t *txh;
624	dnode_t *dn;
625	int err;
626	zio_t *zio;
627
628	ASSERT(tx->tx_txg == 0);
629
630	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
631	    object, THT_FREE, off, len);
632	if (txh == NULL)
633		return;
634	dn = txh->txh_dnode;
635	dmu_tx_count_dnode(txh);
636
637	if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
638		return;
639	if (len == DMU_OBJECT_END)
640		len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
641
642
643	/*
644	 * For i/o error checking, we read the first and last level-0
645	 * blocks if they are not aligned, and all the level-1 blocks.
646	 *
647	 * Note:  dbuf_free_range() assumes that we have not instantiated
648	 * any level-0 dbufs that will be completely freed.  Therefore we must
649	 * exercise care to not read or count the first and last blocks
650	 * if they are blocksize-aligned.
651	 */
652	if (dn->dn_datablkshift == 0) {
653		if (off != 0 || len < dn->dn_datablksz)
654			dmu_tx_count_write(txh, 0, dn->dn_datablksz);
655	} else {
656		/* first block will be modified if it is not aligned */
657		if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
658			dmu_tx_count_write(txh, off, 1);
659		/* last block will be modified if it is not aligned */
660		if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
661			dmu_tx_count_write(txh, off+len, 1);
662	}
663
664	/*
665	 * Check level-1 blocks.
666	 */
667	if (dn->dn_nlevels > 1) {
668		int shift = dn->dn_datablkshift + dn->dn_indblkshift -
669		    SPA_BLKPTRSHIFT;
670		uint64_t start = off >> shift;
671		uint64_t end = (off + len) >> shift;
672
673		ASSERT(dn->dn_indblkshift != 0);
674
675		/*
676		 * dnode_reallocate() can result in an object with indirect
677		 * blocks having an odd data block size.  In this case,
678		 * just check the single block.
679		 */
680		if (dn->dn_datablkshift == 0)
681			start = end = 0;
682
683		zio = zio_root(tx->tx_pool->dp_spa,
684		    NULL, NULL, ZIO_FLAG_CANFAIL);
685		for (uint64_t i = start; i <= end; i++) {
686			uint64_t ibyte = i << shift;
687			err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
688			i = ibyte >> shift;
689			if (err == ESRCH)
690				break;
691			if (err) {
692				tx->tx_err = err;
693				return;
694			}
695
696			err = dmu_tx_check_ioerr(zio, dn, 1, i);
697			if (err) {
698				tx->tx_err = err;
699				return;
700			}
701		}
702		err = zio_wait(zio);
703		if (err) {
704			tx->tx_err = err;
705			return;
706		}
707	}
708
709	dmu_tx_count_free(txh, off, len);
710}
711
712void
713dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
714{
715	dmu_tx_hold_t *txh;
716	dnode_t *dn;
717	uint64_t nblocks;
718	int epbs, err;
719
720	ASSERT(tx->tx_txg == 0);
721
722	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
723	    object, THT_ZAP, add, (uintptr_t)name);
724	if (txh == NULL)
725		return;
726	dn = txh->txh_dnode;
727
728	dmu_tx_count_dnode(txh);
729
730	if (dn == NULL) {
731		/*
732		 * We will be able to fit a new object's entries into one leaf
733		 * block.  So there will be at most 2 blocks total,
734		 * including the header block.
735		 */
736		dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
737		return;
738	}
739
740	ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
741
742	if (dn->dn_maxblkid == 0 && !add) {
743		blkptr_t *bp;
744
745		/*
746		 * If there is only one block  (i.e. this is a micro-zap)
747		 * and we are not adding anything, the accounting is simple.
748		 */
749		err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
750		if (err) {
751			tx->tx_err = err;
752			return;
753		}
754
755		/*
756		 * Use max block size here, since we don't know how much
757		 * the size will change between now and the dbuf dirty call.
758		 */
759		bp = &dn->dn_phys->dn_blkptr[0];
760		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
761		    bp, bp->blk_birth))
762			txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ;
763		else
764			txh->txh_space_towrite += MZAP_MAX_BLKSZ;
765		if (!BP_IS_HOLE(bp))
766			txh->txh_space_tounref += MZAP_MAX_BLKSZ;
767		return;
768	}
769
770	if (dn->dn_maxblkid > 0 && name) {
771		/*
772		 * access the name in this fat-zap so that we'll check
773		 * for i/o errors to the leaf blocks, etc.
774		 */
775		err = zap_lookup(dn->dn_objset, dn->dn_object, name,
776		    8, 0, NULL);
777		if (err == EIO) {
778			tx->tx_err = err;
779			return;
780		}
781	}
782
783	err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
784	    &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
785
786	/*
787	 * If the modified blocks are scattered to the four winds,
788	 * we'll have to modify an indirect twig for each.
789	 */
790	epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
791	for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
792		if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
793			txh->txh_space_towrite += 3 << dn->dn_indblkshift;
794		else
795			txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
796}
797
798void
799dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
800{
801	dmu_tx_hold_t *txh;
802
803	ASSERT(tx->tx_txg == 0);
804
805	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
806	    object, THT_BONUS, 0, 0);
807	if (txh)
808		dmu_tx_count_dnode(txh);
809}
810
811void
812dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
813{
814	dmu_tx_hold_t *txh;
815	ASSERT(tx->tx_txg == 0);
816
817	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
818	    DMU_NEW_OBJECT, THT_SPACE, space, 0);
819
820	txh->txh_space_towrite += space;
821}
822
823int
824dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
825{
826	dmu_tx_hold_t *txh;
827	int holds = 0;
828
829	/*
830	 * By asserting that the tx is assigned, we're counting the
831	 * number of dn_tx_holds, which is the same as the number of
832	 * dn_holds.  Otherwise, we'd be counting dn_holds, but
833	 * dn_tx_holds could be 0.
834	 */
835	ASSERT(tx->tx_txg != 0);
836
837	/* if (tx->tx_anyobj == TRUE) */
838		/* return (0); */
839
840	for (txh = list_head(&tx->tx_holds); txh;
841	    txh = list_next(&tx->tx_holds, txh)) {
842		if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
843			holds++;
844	}
845
846	return (holds);
847}
848
849#ifdef ZFS_DEBUG
850void
851dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
852{
853	dmu_tx_hold_t *txh;
854	int match_object = FALSE, match_offset = FALSE;
855	dnode_t *dn;
856
857	DB_DNODE_ENTER(db);
858	dn = DB_DNODE(db);
859	ASSERT(tx->tx_txg != 0);
860	ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
861	ASSERT3U(dn->dn_object, ==, db->db.db_object);
862
863	if (tx->tx_anyobj) {
864		DB_DNODE_EXIT(db);
865		return;
866	}
867
868	/* XXX No checking on the meta dnode for now */
869	if (db->db.db_object == DMU_META_DNODE_OBJECT) {
870		DB_DNODE_EXIT(db);
871		return;
872	}
873
874	for (txh = list_head(&tx->tx_holds); txh;
875	    txh = list_next(&tx->tx_holds, txh)) {
876		ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
877		if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
878			match_object = TRUE;
879		if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
880			int datablkshift = dn->dn_datablkshift ?
881			    dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
882			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
883			int shift = datablkshift + epbs * db->db_level;
884			uint64_t beginblk = shift >= 64 ? 0 :
885			    (txh->txh_arg1 >> shift);
886			uint64_t endblk = shift >= 64 ? 0 :
887			    ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
888			uint64_t blkid = db->db_blkid;
889
890			/* XXX txh_arg2 better not be zero... */
891
892			dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
893			    txh->txh_type, beginblk, endblk);
894
895			switch (txh->txh_type) {
896			case THT_WRITE:
897				if (blkid >= beginblk && blkid <= endblk)
898					match_offset = TRUE;
899				/*
900				 * We will let this hold work for the bonus
901				 * or spill buffer so that we don't need to
902				 * hold it when creating a new object.
903				 */
904				if (blkid == DMU_BONUS_BLKID ||
905				    blkid == DMU_SPILL_BLKID)
906					match_offset = TRUE;
907				/*
908				 * They might have to increase nlevels,
909				 * thus dirtying the new TLIBs.  Or the
910				 * might have to change the block size,
911				 * thus dirying the new lvl=0 blk=0.
912				 */
913				if (blkid == 0)
914					match_offset = TRUE;
915				break;
916			case THT_FREE:
917				/*
918				 * We will dirty all the level 1 blocks in
919				 * the free range and perhaps the first and
920				 * last level 0 block.
921				 */
922				if (blkid >= beginblk && (blkid <= endblk ||
923				    txh->txh_arg2 == DMU_OBJECT_END))
924					match_offset = TRUE;
925				break;
926			case THT_SPILL:
927				if (blkid == DMU_SPILL_BLKID)
928					match_offset = TRUE;
929				break;
930			case THT_BONUS:
931				if (blkid == DMU_BONUS_BLKID)
932					match_offset = TRUE;
933				break;
934			case THT_ZAP:
935				match_offset = TRUE;
936				break;
937			case THT_NEWOBJECT:
938				match_object = TRUE;
939				break;
940			default:
941				ASSERT(!"bad txh_type");
942			}
943		}
944		if (match_object && match_offset) {
945			DB_DNODE_EXIT(db);
946			return;
947		}
948	}
949	DB_DNODE_EXIT(db);
950	panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
951	    (u_longlong_t)db->db.db_object, db->db_level,
952	    (u_longlong_t)db->db_blkid);
953}
954#endif
955
956/*
957 * If we can't do 10 iops, something is wrong.  Let us go ahead
958 * and hit zfs_dirty_data_max.
959 */
960hrtime_t zfs_delay_max_ns = MSEC2NSEC(100);
961int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
962
963/*
964 * We delay transactions when we've determined that the backend storage
965 * isn't able to accommodate the rate of incoming writes.
966 *
967 * If there is already a transaction waiting, we delay relative to when
968 * that transaction finishes waiting.  This way the calculated min_time
969 * is independent of the number of threads concurrently executing
970 * transactions.
971 *
972 * If we are the only waiter, wait relative to when the transaction
973 * started, rather than the current time.  This credits the transaction for
974 * "time already served", e.g. reading indirect blocks.
975 *
976 * The minimum time for a transaction to take is calculated as:
977 *     min_time = scale * (dirty - min) / (max - dirty)
978 *     min_time is then capped at zfs_delay_max_ns.
979 *
980 * The delay has two degrees of freedom that can be adjusted via tunables.
981 * The percentage of dirty data at which we start to delay is defined by
982 * zfs_delay_min_dirty_percent. This should typically be at or above
983 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to
984 * delay after writing at full speed has failed to keep up with the incoming
985 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
986 * speaking, this variable determines the amount of delay at the midpoint of
987 * the curve.
988 *
989 * delay
990 *  10ms +-------------------------------------------------------------*+
991 *       |                                                             *|
992 *   9ms +                                                             *+
993 *       |                                                             *|
994 *   8ms +                                                             *+
995 *       |                                                            * |
996 *   7ms +                                                            * +
997 *       |                                                            * |
998 *   6ms +                                                            * +
999 *       |                                                            * |
1000 *   5ms +                                                           *  +
1001 *       |                                                           *  |
1002 *   4ms +                                                           *  +
1003 *       |                                                           *  |
1004 *   3ms +                                                          *   +
1005 *       |                                                          *   |
1006 *   2ms +                                              (midpoint) *    +
1007 *       |                                                  |    **     |
1008 *   1ms +                                                  v ***       +
1009 *       |             zfs_delay_scale ---------->     ********         |
1010 *     0 +-------------------------------------*********----------------+
1011 *       0%                    <- zfs_dirty_data_max ->               100%
1012 *
1013 * Note that since the delay is added to the outstanding time remaining on the
1014 * most recent transaction, the delay is effectively the inverse of IOPS.
1015 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
1016 * was chosen such that small changes in the amount of accumulated dirty data
1017 * in the first 3/4 of the curve yield relatively small differences in the
1018 * amount of delay.
1019 *
1020 * The effects can be easier to understand when the amount of delay is
1021 * represented on a log scale:
1022 *
1023 * delay
1024 * 100ms +-------------------------------------------------------------++
1025 *       +                                                              +
1026 *       |                                                              |
1027 *       +                                                             *+
1028 *  10ms +                                                             *+
1029 *       +                                                           ** +
1030 *       |                                              (midpoint)  **  |
1031 *       +                                                  |     **    +
1032 *   1ms +                                                  v ****      +
1033 *       +             zfs_delay_scale ---------->        *****         +
1034 *       |                                             ****             |
1035 *       +                                          ****                +
1036 * 100us +                                        **                    +
1037 *       +                                       *                      +
1038 *       |                                      *                       |
1039 *       +                                     *                        +
1040 *  10us +                                     *                        +
1041 *       +                                                              +
1042 *       |                                                              |
1043 *       +                                                              +
1044 *       +--------------------------------------------------------------+
1045 *       0%                    <- zfs_dirty_data_max ->               100%
1046 *
1047 * Note here that only as the amount of dirty data approaches its limit does
1048 * the delay start to increase rapidly. The goal of a properly tuned system
1049 * should be to keep the amount of dirty data out of that range by first
1050 * ensuring that the appropriate limits are set for the I/O scheduler to reach
1051 * optimal throughput on the backend storage, and then by changing the value
1052 * of zfs_delay_scale to increase the steepness of the curve.
1053 */
1054static void
1055dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
1056{
1057	dsl_pool_t *dp = tx->tx_pool;
1058	uint64_t delay_min_bytes =
1059	    zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
1060	hrtime_t wakeup, min_tx_time, now;
1061
1062	if (dirty <= delay_min_bytes)
1063		return;
1064
1065	/*
1066	 * The caller has already waited until we are under the max.
1067	 * We make them pass us the amount of dirty data so we don't
1068	 * have to handle the case of it being >= the max, which could
1069	 * cause a divide-by-zero if it's == the max.
1070	 */
1071	ASSERT3U(dirty, <, zfs_dirty_data_max);
1072
1073	now = gethrtime();
1074	min_tx_time = zfs_delay_scale *
1075	    (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
1076	if (now > tx->tx_start + min_tx_time)
1077		return;
1078
1079	min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
1080
1081	DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
1082	    uint64_t, min_tx_time);
1083
1084	mutex_enter(&dp->dp_lock);
1085	wakeup = MAX(tx->tx_start + min_tx_time,
1086	    dp->dp_last_wakeup + min_tx_time);
1087	dp->dp_last_wakeup = wakeup;
1088	mutex_exit(&dp->dp_lock);
1089
1090#ifdef _KERNEL
1091#ifdef illumos
1092	mutex_enter(&curthread->t_delay_lock);
1093	while (cv_timedwait_hires(&curthread->t_delay_cv,
1094	    &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns,
1095	    CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0)
1096		continue;
1097	mutex_exit(&curthread->t_delay_lock);
1098#else
1099	pause_sbt("dmu_tx_delay", wakeup * SBT_1NS,
1100	    zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE);
1101#endif
1102#else
1103	hrtime_t delta = wakeup - gethrtime();
1104	struct timespec ts;
1105	ts.tv_sec = delta / NANOSEC;
1106	ts.tv_nsec = delta % NANOSEC;
1107	(void) nanosleep(&ts, NULL);
1108#endif
1109}
1110
1111static int
1112dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
1113{
1114	dmu_tx_hold_t *txh;
1115	spa_t *spa = tx->tx_pool->dp_spa;
1116	uint64_t memory, asize, fsize, usize;
1117	uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
1118
1119	ASSERT0(tx->tx_txg);
1120
1121	if (tx->tx_err)
1122		return (tx->tx_err);
1123
1124	if (spa_suspended(spa)) {
1125		/*
1126		 * If the user has indicated a blocking failure mode
1127		 * then return ERESTART which will block in dmu_tx_wait().
1128		 * Otherwise, return EIO so that an error can get
1129		 * propagated back to the VOP calls.
1130		 *
1131		 * Note that we always honor the txg_how flag regardless
1132		 * of the failuremode setting.
1133		 */
1134		if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
1135		    txg_how != TXG_WAIT)
1136			return (SET_ERROR(EIO));
1137
1138		return (SET_ERROR(ERESTART));
1139	}
1140
1141	if (!tx->tx_waited &&
1142	    dsl_pool_need_dirty_delay(tx->tx_pool)) {
1143		tx->tx_wait_dirty = B_TRUE;
1144		return (SET_ERROR(ERESTART));
1145	}
1146
1147	tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
1148	tx->tx_needassign_txh = NULL;
1149
1150	/*
1151	 * NB: No error returns are allowed after txg_hold_open, but
1152	 * before processing the dnode holds, due to the
1153	 * dmu_tx_unassign() logic.
1154	 */
1155
1156	towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
1157	for (txh = list_head(&tx->tx_holds); txh;
1158	    txh = list_next(&tx->tx_holds, txh)) {
1159		dnode_t *dn = txh->txh_dnode;
1160		if (dn != NULL) {
1161			mutex_enter(&dn->dn_mtx);
1162			if (dn->dn_assigned_txg == tx->tx_txg - 1) {
1163				mutex_exit(&dn->dn_mtx);
1164				tx->tx_needassign_txh = txh;
1165				return (SET_ERROR(ERESTART));
1166			}
1167			if (dn->dn_assigned_txg == 0)
1168				dn->dn_assigned_txg = tx->tx_txg;
1169			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1170			(void) refcount_add(&dn->dn_tx_holds, tx);
1171			mutex_exit(&dn->dn_mtx);
1172		}
1173		towrite += txh->txh_space_towrite;
1174		tofree += txh->txh_space_tofree;
1175		tooverwrite += txh->txh_space_tooverwrite;
1176		tounref += txh->txh_space_tounref;
1177		tohold += txh->txh_memory_tohold;
1178		fudge += txh->txh_fudge;
1179	}
1180
1181	/*
1182	 * If a snapshot has been taken since we made our estimates,
1183	 * assume that we won't be able to free or overwrite anything.
1184	 */
1185	if (tx->tx_objset &&
1186	    dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
1187	    tx->tx_lastsnap_txg) {
1188		towrite += tooverwrite;
1189		tooverwrite = tofree = 0;
1190	}
1191
1192	/* needed allocation: worst-case estimate of write space */
1193	asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
1194	/* freed space estimate: worst-case overwrite + free estimate */
1195	fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
1196	/* convert unrefd space to worst-case estimate */
1197	usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
1198	/* calculate memory footprint estimate */
1199	memory = towrite + tooverwrite + tohold;
1200
1201#ifdef ZFS_DEBUG
1202	/*
1203	 * Add in 'tohold' to account for our dirty holds on this memory
1204	 * XXX - the "fudge" factor is to account for skipped blocks that
1205	 * we missed because dnode_next_offset() misses in-core-only blocks.
1206	 */
1207	tx->tx_space_towrite = asize +
1208	    spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
1209	tx->tx_space_tofree = tofree;
1210	tx->tx_space_tooverwrite = tooverwrite;
1211	tx->tx_space_tounref = tounref;
1212#endif
1213
1214	if (tx->tx_dir && asize != 0) {
1215		int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1216		    asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1217		if (err)
1218			return (err);
1219	}
1220
1221	return (0);
1222}
1223
1224static void
1225dmu_tx_unassign(dmu_tx_t *tx)
1226{
1227	dmu_tx_hold_t *txh;
1228
1229	if (tx->tx_txg == 0)
1230		return;
1231
1232	txg_rele_to_quiesce(&tx->tx_txgh);
1233
1234	/*
1235	 * Walk the transaction's hold list, removing the hold on the
1236	 * associated dnode, and notifying waiters if the refcount drops to 0.
1237	 */
1238	for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1239	    txh = list_next(&tx->tx_holds, txh)) {
1240		dnode_t *dn = txh->txh_dnode;
1241
1242		if (dn == NULL)
1243			continue;
1244		mutex_enter(&dn->dn_mtx);
1245		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1246
1247		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1248			dn->dn_assigned_txg = 0;
1249			cv_broadcast(&dn->dn_notxholds);
1250		}
1251		mutex_exit(&dn->dn_mtx);
1252	}
1253
1254	txg_rele_to_sync(&tx->tx_txgh);
1255
1256	tx->tx_lasttried_txg = tx->tx_txg;
1257	tx->tx_txg = 0;
1258}
1259
1260/*
1261 * Assign tx to a transaction group.  txg_how can be one of:
1262 *
1263 * (1)	TXG_WAIT.  If the current open txg is full, waits until there's
1264 *	a new one.  This should be used when you're not holding locks.
1265 *	It will only fail if we're truly out of space (or over quota).
1266 *
1267 * (2)	TXG_NOWAIT.  If we can't assign into the current open txg without
1268 *	blocking, returns immediately with ERESTART.  This should be used
1269 *	whenever you're holding locks.  On an ERESTART error, the caller
1270 *	should drop locks, do a dmu_tx_wait(tx), and try again.
1271 *
1272 * (3)  TXG_WAITED.  Like TXG_NOWAIT, but indicates that dmu_tx_wait()
1273 *      has already been called on behalf of this operation (though
1274 *      most likely on a different tx).
1275 */
1276int
1277dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how)
1278{
1279	int err;
1280
1281	ASSERT(tx->tx_txg == 0);
1282	ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT ||
1283	    txg_how == TXG_WAITED);
1284	ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1285
1286	/* If we might wait, we must not hold the config lock. */
1287	ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool));
1288
1289	if (txg_how == TXG_WAITED)
1290		tx->tx_waited = B_TRUE;
1291
1292	while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1293		dmu_tx_unassign(tx);
1294
1295		if (err != ERESTART || txg_how != TXG_WAIT)
1296			return (err);
1297
1298		dmu_tx_wait(tx);
1299	}
1300
1301	txg_rele_to_quiesce(&tx->tx_txgh);
1302
1303	return (0);
1304}
1305
1306void
1307dmu_tx_wait(dmu_tx_t *tx)
1308{
1309	spa_t *spa = tx->tx_pool->dp_spa;
1310	dsl_pool_t *dp = tx->tx_pool;
1311
1312	ASSERT(tx->tx_txg == 0);
1313	ASSERT(!dsl_pool_config_held(tx->tx_pool));
1314
1315	if (tx->tx_wait_dirty) {
1316		/*
1317		 * dmu_tx_try_assign() has determined that we need to wait
1318		 * because we've consumed much or all of the dirty buffer
1319		 * space.
1320		 */
1321		mutex_enter(&dp->dp_lock);
1322		while (dp->dp_dirty_total >= zfs_dirty_data_max)
1323			cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
1324		uint64_t dirty = dp->dp_dirty_total;
1325		mutex_exit(&dp->dp_lock);
1326
1327		dmu_tx_delay(tx, dirty);
1328
1329		tx->tx_wait_dirty = B_FALSE;
1330
1331		/*
1332		 * Note: setting tx_waited only has effect if the caller
1333		 * used TX_WAIT.  Otherwise they are going to destroy
1334		 * this tx and try again.  The common case, zfs_write(),
1335		 * uses TX_WAIT.
1336		 */
1337		tx->tx_waited = B_TRUE;
1338	} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1339		/*
1340		 * If the pool is suspended we need to wait until it
1341		 * is resumed.  Note that it's possible that the pool
1342		 * has become active after this thread has tried to
1343		 * obtain a tx.  If that's the case then tx_lasttried_txg
1344		 * would not have been set.
1345		 */
1346		txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
1347	} else if (tx->tx_needassign_txh) {
1348		/*
1349		 * A dnode is assigned to the quiescing txg.  Wait for its
1350		 * transaction to complete.
1351		 */
1352		dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1353
1354		mutex_enter(&dn->dn_mtx);
1355		while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1356			cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1357		mutex_exit(&dn->dn_mtx);
1358		tx->tx_needassign_txh = NULL;
1359	} else {
1360		txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1361	}
1362}
1363
1364void
1365dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1366{
1367#ifdef ZFS_DEBUG
1368	if (tx->tx_dir == NULL || delta == 0)
1369		return;
1370
1371	if (delta > 0) {
1372		ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1373		    tx->tx_space_towrite);
1374		(void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1375	} else {
1376		(void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1377	}
1378#endif
1379}
1380
1381void
1382dmu_tx_commit(dmu_tx_t *tx)
1383{
1384	dmu_tx_hold_t *txh;
1385
1386	ASSERT(tx->tx_txg != 0);
1387
1388	/*
1389	 * Go through the transaction's hold list and remove holds on
1390	 * associated dnodes, notifying waiters if no holds remain.
1391	 */
1392	while (txh = list_head(&tx->tx_holds)) {
1393		dnode_t *dn = txh->txh_dnode;
1394
1395		list_remove(&tx->tx_holds, txh);
1396		kmem_free(txh, sizeof (dmu_tx_hold_t));
1397		if (dn == NULL)
1398			continue;
1399		mutex_enter(&dn->dn_mtx);
1400		ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1401
1402		if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1403			dn->dn_assigned_txg = 0;
1404			cv_broadcast(&dn->dn_notxholds);
1405		}
1406		mutex_exit(&dn->dn_mtx);
1407		dnode_rele(dn, tx);
1408	}
1409
1410	if (tx->tx_tempreserve_cookie)
1411		dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1412
1413	if (!list_is_empty(&tx->tx_callbacks))
1414		txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1415
1416	if (tx->tx_anyobj == FALSE)
1417		txg_rele_to_sync(&tx->tx_txgh);
1418
1419	list_destroy(&tx->tx_callbacks);
1420	list_destroy(&tx->tx_holds);
1421#ifdef ZFS_DEBUG
1422	dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1423	    tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1424	    tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1425	refcount_destroy_many(&tx->tx_space_written,
1426	    refcount_count(&tx->tx_space_written));
1427	refcount_destroy_many(&tx->tx_space_freed,
1428	    refcount_count(&tx->tx_space_freed));
1429#endif
1430	kmem_free(tx, sizeof (dmu_tx_t));
1431}
1432
1433void
1434dmu_tx_abort(dmu_tx_t *tx)
1435{
1436	dmu_tx_hold_t *txh;
1437
1438	ASSERT(tx->tx_txg == 0);
1439
1440	while (txh = list_head(&tx->tx_holds)) {
1441		dnode_t *dn = txh->txh_dnode;
1442
1443		list_remove(&tx->tx_holds, txh);
1444		kmem_free(txh, sizeof (dmu_tx_hold_t));
1445		if (dn != NULL)
1446			dnode_rele(dn, tx);
1447	}
1448
1449	/*
1450	 * Call any registered callbacks with an error code.
1451	 */
1452	if (!list_is_empty(&tx->tx_callbacks))
1453		dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1454
1455	list_destroy(&tx->tx_callbacks);
1456	list_destroy(&tx->tx_holds);
1457#ifdef ZFS_DEBUG
1458	refcount_destroy_many(&tx->tx_space_written,
1459	    refcount_count(&tx->tx_space_written));
1460	refcount_destroy_many(&tx->tx_space_freed,
1461	    refcount_count(&tx->tx_space_freed));
1462#endif
1463	kmem_free(tx, sizeof (dmu_tx_t));
1464}
1465
1466uint64_t
1467dmu_tx_get_txg(dmu_tx_t *tx)
1468{
1469	ASSERT(tx->tx_txg != 0);
1470	return (tx->tx_txg);
1471}
1472
1473dsl_pool_t *
1474dmu_tx_pool(dmu_tx_t *tx)
1475{
1476	ASSERT(tx->tx_pool != NULL);
1477	return (tx->tx_pool);
1478}
1479
1480
1481void
1482dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1483{
1484	dmu_tx_callback_t *dcb;
1485
1486	dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1487
1488	dcb->dcb_func = func;
1489	dcb->dcb_data = data;
1490
1491	list_insert_tail(&tx->tx_callbacks, dcb);
1492}
1493
1494/*
1495 * Call all the commit callbacks on a list, with a given error code.
1496 */
1497void
1498dmu_tx_do_callbacks(list_t *cb_list, int error)
1499{
1500	dmu_tx_callback_t *dcb;
1501
1502	while (dcb = list_head(cb_list)) {
1503		list_remove(cb_list, dcb);
1504		dcb->dcb_func(dcb->dcb_data, error);
1505		kmem_free(dcb, sizeof (dmu_tx_callback_t));
1506	}
1507}
1508
1509/*
1510 * Interface to hold a bunch of attributes.
1511 * used for creating new files.
1512 * attrsize is the total size of all attributes
1513 * to be added during object creation
1514 *
1515 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1516 */
1517
1518/*
1519 * hold necessary attribute name for attribute registration.
1520 * should be a very rare case where this is needed.  If it does
1521 * happen it would only happen on the first write to the file system.
1522 */
1523static void
1524dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1525{
1526	int i;
1527
1528	if (!sa->sa_need_attr_registration)
1529		return;
1530
1531	for (i = 0; i != sa->sa_num_attrs; i++) {
1532		if (!sa->sa_attr_table[i].sa_registered) {
1533			if (sa->sa_reg_attr_obj)
1534				dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1535				    B_TRUE, sa->sa_attr_table[i].sa_name);
1536			else
1537				dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1538				    B_TRUE, sa->sa_attr_table[i].sa_name);
1539		}
1540	}
1541}
1542
1543
1544void
1545dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1546{
1547	dnode_t *dn;
1548	dmu_tx_hold_t *txh;
1549
1550	txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1551	    THT_SPILL, 0, 0);
1552
1553	dn = txh->txh_dnode;
1554
1555	if (dn == NULL)
1556		return;
1557
1558	/* If blkptr doesn't exist then add space to towrite */
1559	if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1560		txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
1561	} else {
1562		blkptr_t *bp;
1563
1564		bp = &dn->dn_phys->dn_spill;
1565		if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1566		    bp, bp->blk_birth))
1567			txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE;
1568		else
1569			txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE;
1570		if (!BP_IS_HOLE(bp))
1571			txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE;
1572	}
1573}
1574
1575void
1576dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1577{
1578	sa_os_t *sa = tx->tx_objset->os_sa;
1579
1580	dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1581
1582	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1583		return;
1584
1585	if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1586		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1587	else {
1588		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1589		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1590		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1591		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1592	}
1593
1594	dmu_tx_sa_registration_hold(sa, tx);
1595
1596	if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1597		return;
1598
1599	(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1600	    THT_SPILL, 0, 0);
1601}
1602
1603/*
1604 * Hold SA attribute
1605 *
1606 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1607 *
1608 * variable_size is the total size of all variable sized attributes
1609 * passed to this function.  It is not the total size of all
1610 * variable size attributes that *may* exist on this object.
1611 */
1612void
1613dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1614{
1615	uint64_t object;
1616	sa_os_t *sa = tx->tx_objset->os_sa;
1617
1618	ASSERT(hdl != NULL);
1619
1620	object = sa_handle_object(hdl);
1621
1622	dmu_tx_hold_bonus(tx, object);
1623
1624	if (tx->tx_objset->os_sa->sa_master_obj == 0)
1625		return;
1626
1627	if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1628	    tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1629		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1630		dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1631		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1632		dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1633	}
1634
1635	dmu_tx_sa_registration_hold(sa, tx);
1636
1637	if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1638		dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1639
1640	if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1641		ASSERT(tx->tx_txg == 0);
1642		dmu_tx_hold_spill(tx, object);
1643	} else {
1644		dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1645		dnode_t *dn;
1646
1647		DB_DNODE_ENTER(db);
1648		dn = DB_DNODE(db);
1649		if (dn->dn_have_spill) {
1650			ASSERT(tx->tx_txg == 0);
1651			dmu_tx_hold_spill(tx, object);
1652		}
1653		DB_DNODE_EXIT(db);
1654	}
1655}
1656