dmu_tx.c revision 297112
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25 * Copyright (c) 2014 Integros [integros.com] 26 */ 27 28#include <sys/dmu.h> 29#include <sys/dmu_impl.h> 30#include <sys/dbuf.h> 31#include <sys/dmu_tx.h> 32#include <sys/dmu_objset.h> 33#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 34#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 35#include <sys/dsl_pool.h> 36#include <sys/zap_impl.h> /* for fzap_default_block_shift */ 37#include <sys/spa.h> 38#include <sys/sa.h> 39#include <sys/sa_impl.h> 40#include <sys/zfs_context.h> 41#include <sys/varargs.h> 42 43typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 44 uint64_t arg1, uint64_t arg2); 45 46 47dmu_tx_t * 48dmu_tx_create_dd(dsl_dir_t *dd) 49{ 50 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 51 tx->tx_dir = dd; 52 if (dd != NULL) 53 tx->tx_pool = dd->dd_pool; 54 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 55 offsetof(dmu_tx_hold_t, txh_node)); 56 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 57 offsetof(dmu_tx_callback_t, dcb_node)); 58 tx->tx_start = gethrtime(); 59#ifdef ZFS_DEBUG 60 refcount_create(&tx->tx_space_written); 61 refcount_create(&tx->tx_space_freed); 62#endif 63 return (tx); 64} 65 66dmu_tx_t * 67dmu_tx_create(objset_t *os) 68{ 69 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 70 tx->tx_objset = os; 71 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 72 return (tx); 73} 74 75dmu_tx_t * 76dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 77{ 78 dmu_tx_t *tx = dmu_tx_create_dd(NULL); 79 80 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 81 tx->tx_pool = dp; 82 tx->tx_txg = txg; 83 tx->tx_anyobj = TRUE; 84 85 return (tx); 86} 87 88int 89dmu_tx_is_syncing(dmu_tx_t *tx) 90{ 91 return (tx->tx_anyobj); 92} 93 94int 95dmu_tx_private_ok(dmu_tx_t *tx) 96{ 97 return (tx->tx_anyobj); 98} 99 100static dmu_tx_hold_t * 101dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 102 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 103{ 104 dmu_tx_hold_t *txh; 105 dnode_t *dn = NULL; 106 int err; 107 108 if (object != DMU_NEW_OBJECT) { 109 err = dnode_hold(os, object, tx, &dn); 110 if (err) { 111 tx->tx_err = err; 112 return (NULL); 113 } 114 115 if (err == 0 && tx->tx_txg != 0) { 116 mutex_enter(&dn->dn_mtx); 117 /* 118 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 119 * problem, but there's no way for it to happen (for 120 * now, at least). 121 */ 122 ASSERT(dn->dn_assigned_txg == 0); 123 dn->dn_assigned_txg = tx->tx_txg; 124 (void) refcount_add(&dn->dn_tx_holds, tx); 125 mutex_exit(&dn->dn_mtx); 126 } 127 } 128 129 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 130 txh->txh_tx = tx; 131 txh->txh_dnode = dn; 132#ifdef ZFS_DEBUG 133 txh->txh_type = type; 134 txh->txh_arg1 = arg1; 135 txh->txh_arg2 = arg2; 136#endif 137 list_insert_tail(&tx->tx_holds, txh); 138 139 return (txh); 140} 141 142void 143dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 144{ 145 /* 146 * If we're syncing, they can manipulate any object anyhow, and 147 * the hold on the dnode_t can cause problems. 148 */ 149 if (!dmu_tx_is_syncing(tx)) { 150 (void) dmu_tx_hold_object_impl(tx, os, 151 object, THT_NEWOBJECT, 0, 0); 152 } 153} 154 155static int 156dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 157{ 158 int err; 159 dmu_buf_impl_t *db; 160 161 rw_enter(&dn->dn_struct_rwlock, RW_READER); 162 db = dbuf_hold_level(dn, level, blkid, FTAG); 163 rw_exit(&dn->dn_struct_rwlock); 164 if (db == NULL) 165 return (SET_ERROR(EIO)); 166 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 167 dbuf_rele(db, FTAG); 168 return (err); 169} 170 171static void 172dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 173 int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 174{ 175 objset_t *os = dn->dn_objset; 176 dsl_dataset_t *ds = os->os_dsl_dataset; 177 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 178 dmu_buf_impl_t *parent = NULL; 179 blkptr_t *bp = NULL; 180 uint64_t space; 181 182 if (level >= dn->dn_nlevels || history[level] == blkid) 183 return; 184 185 history[level] = blkid; 186 187 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 188 189 if (db == NULL || db == dn->dn_dbuf) { 190 ASSERT(level != 0); 191 db = NULL; 192 } else { 193 ASSERT(DB_DNODE(db) == dn); 194 ASSERT(db->db_level == level); 195 ASSERT(db->db.db_size == space); 196 ASSERT(db->db_blkid == blkid); 197 bp = db->db_blkptr; 198 parent = db->db_parent; 199 } 200 201 freeable = (bp && (freeable || 202 dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 203 204 if (freeable) 205 txh->txh_space_tooverwrite += space; 206 else 207 txh->txh_space_towrite += space; 208 if (bp) 209 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); 210 211 dmu_tx_count_twig(txh, dn, parent, level + 1, 212 blkid >> epbs, freeable, history); 213} 214 215/* ARGSUSED */ 216static void 217dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 218{ 219 dnode_t *dn = txh->txh_dnode; 220 uint64_t start, end, i; 221 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 222 int err = 0; 223 224 if (len == 0) 225 return; 226 227 min_bs = SPA_MINBLOCKSHIFT; 228 max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; 229 min_ibs = DN_MIN_INDBLKSHIFT; 230 max_ibs = DN_MAX_INDBLKSHIFT; 231 232 if (dn) { 233 uint64_t history[DN_MAX_LEVELS]; 234 int nlvls = dn->dn_nlevels; 235 int delta; 236 237 /* 238 * For i/o error checking, read the first and last level-0 239 * blocks (if they are not aligned), and all the level-1 blocks. 240 */ 241 if (dn->dn_maxblkid == 0) { 242 delta = dn->dn_datablksz; 243 start = (off < dn->dn_datablksz) ? 0 : 1; 244 end = (off+len <= dn->dn_datablksz) ? 0 : 1; 245 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 246 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 247 if (err) 248 goto out; 249 delta -= off; 250 } 251 } else { 252 zio_t *zio = zio_root(dn->dn_objset->os_spa, 253 NULL, NULL, ZIO_FLAG_CANFAIL); 254 255 /* first level-0 block */ 256 start = off >> dn->dn_datablkshift; 257 if (P2PHASE(off, dn->dn_datablksz) || 258 len < dn->dn_datablksz) { 259 err = dmu_tx_check_ioerr(zio, dn, 0, start); 260 if (err) 261 goto out; 262 } 263 264 /* last level-0 block */ 265 end = (off+len-1) >> dn->dn_datablkshift; 266 if (end != start && end <= dn->dn_maxblkid && 267 P2PHASE(off+len, dn->dn_datablksz)) { 268 err = dmu_tx_check_ioerr(zio, dn, 0, end); 269 if (err) 270 goto out; 271 } 272 273 /* level-1 blocks */ 274 if (nlvls > 1) { 275 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 276 for (i = (start>>shft)+1; i < end>>shft; i++) { 277 err = dmu_tx_check_ioerr(zio, dn, 1, i); 278 if (err) 279 goto out; 280 } 281 } 282 283 err = zio_wait(zio); 284 if (err) 285 goto out; 286 delta = P2NPHASE(off, dn->dn_datablksz); 287 } 288 289 min_ibs = max_ibs = dn->dn_indblkshift; 290 if (dn->dn_maxblkid > 0) { 291 /* 292 * The blocksize can't change, 293 * so we can make a more precise estimate. 294 */ 295 ASSERT(dn->dn_datablkshift != 0); 296 min_bs = max_bs = dn->dn_datablkshift; 297 } else { 298 /* 299 * The blocksize can increase up to the recordsize, 300 * or if it is already more than the recordsize, 301 * up to the next power of 2. 302 */ 303 min_bs = highbit64(dn->dn_datablksz - 1); 304 max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); 305 } 306 307 /* 308 * If this write is not off the end of the file 309 * we need to account for overwrites/unref. 310 */ 311 if (start <= dn->dn_maxblkid) { 312 for (int l = 0; l < DN_MAX_LEVELS; l++) 313 history[l] = -1ULL; 314 } 315 while (start <= dn->dn_maxblkid) { 316 dmu_buf_impl_t *db; 317 318 rw_enter(&dn->dn_struct_rwlock, RW_READER); 319 err = dbuf_hold_impl(dn, 0, start, 320 FALSE, FALSE, FTAG, &db); 321 rw_exit(&dn->dn_struct_rwlock); 322 323 if (err) { 324 txh->txh_tx->tx_err = err; 325 return; 326 } 327 328 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 329 history); 330 dbuf_rele(db, FTAG); 331 if (++start > end) { 332 /* 333 * Account for new indirects appearing 334 * before this IO gets assigned into a txg. 335 */ 336 bits = 64 - min_bs; 337 epbs = min_ibs - SPA_BLKPTRSHIFT; 338 for (bits -= epbs * (nlvls - 1); 339 bits >= 0; bits -= epbs) 340 txh->txh_fudge += 1ULL << max_ibs; 341 goto out; 342 } 343 off += delta; 344 if (len >= delta) 345 len -= delta; 346 delta = dn->dn_datablksz; 347 } 348 } 349 350 /* 351 * 'end' is the last thing we will access, not one past. 352 * This way we won't overflow when accessing the last byte. 353 */ 354 start = P2ALIGN(off, 1ULL << max_bs); 355 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 356 txh->txh_space_towrite += end - start + 1; 357 358 start >>= min_bs; 359 end >>= min_bs; 360 361 epbs = min_ibs - SPA_BLKPTRSHIFT; 362 363 /* 364 * The object contains at most 2^(64 - min_bs) blocks, 365 * and each indirect level maps 2^epbs. 366 */ 367 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 368 start >>= epbs; 369 end >>= epbs; 370 ASSERT3U(end, >=, start); 371 txh->txh_space_towrite += (end - start + 1) << max_ibs; 372 if (start != 0) { 373 /* 374 * We also need a new blkid=0 indirect block 375 * to reference any existing file data. 376 */ 377 txh->txh_space_towrite += 1ULL << max_ibs; 378 } 379 } 380 381out: 382 if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 383 2 * DMU_MAX_ACCESS) 384 err = SET_ERROR(EFBIG); 385 386 if (err) 387 txh->txh_tx->tx_err = err; 388} 389 390static void 391dmu_tx_count_dnode(dmu_tx_hold_t *txh) 392{ 393 dnode_t *dn = txh->txh_dnode; 394 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 395 uint64_t space = mdn->dn_datablksz + 396 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 397 398 if (dn && dn->dn_dbuf->db_blkptr && 399 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 400 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 401 txh->txh_space_tooverwrite += space; 402 txh->txh_space_tounref += space; 403 } else { 404 txh->txh_space_towrite += space; 405 if (dn && dn->dn_dbuf->db_blkptr) 406 txh->txh_space_tounref += space; 407 } 408} 409 410void 411dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 412{ 413 dmu_tx_hold_t *txh; 414 415 ASSERT(tx->tx_txg == 0); 416 ASSERT(len < DMU_MAX_ACCESS); 417 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 418 419 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 420 object, THT_WRITE, off, len); 421 if (txh == NULL) 422 return; 423 424 dmu_tx_count_write(txh, off, len); 425 dmu_tx_count_dnode(txh); 426} 427 428static void 429dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 430{ 431 uint64_t blkid, nblks, lastblk; 432 uint64_t space = 0, unref = 0, skipped = 0; 433 dnode_t *dn = txh->txh_dnode; 434 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 435 spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 436 int epbs; 437 uint64_t l0span = 0, nl1blks = 0; 438 439 if (dn->dn_nlevels == 0) 440 return; 441 442 /* 443 * The struct_rwlock protects us against dn_nlevels 444 * changing, in case (against all odds) we manage to dirty & 445 * sync out the changes after we check for being dirty. 446 * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 447 */ 448 rw_enter(&dn->dn_struct_rwlock, RW_READER); 449 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 450 if (dn->dn_maxblkid == 0) { 451 if (off == 0 && len >= dn->dn_datablksz) { 452 blkid = 0; 453 nblks = 1; 454 } else { 455 rw_exit(&dn->dn_struct_rwlock); 456 return; 457 } 458 } else { 459 blkid = off >> dn->dn_datablkshift; 460 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 461 462 if (blkid > dn->dn_maxblkid) { 463 rw_exit(&dn->dn_struct_rwlock); 464 return; 465 } 466 if (blkid + nblks > dn->dn_maxblkid) 467 nblks = dn->dn_maxblkid - blkid + 1; 468 469 } 470 l0span = nblks; /* save for later use to calc level > 1 overhead */ 471 if (dn->dn_nlevels == 1) { 472 int i; 473 for (i = 0; i < nblks; i++) { 474 blkptr_t *bp = dn->dn_phys->dn_blkptr; 475 ASSERT3U(blkid + i, <, dn->dn_nblkptr); 476 bp += blkid + i; 477 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 478 dprintf_bp(bp, "can free old%s", ""); 479 space += bp_get_dsize(spa, bp); 480 } 481 unref += BP_GET_ASIZE(bp); 482 } 483 nl1blks = 1; 484 nblks = 0; 485 } 486 487 lastblk = blkid + nblks - 1; 488 while (nblks) { 489 dmu_buf_impl_t *dbuf; 490 uint64_t ibyte, new_blkid; 491 int epb = 1 << epbs; 492 int err, i, blkoff, tochk; 493 blkptr_t *bp; 494 495 ibyte = blkid << dn->dn_datablkshift; 496 err = dnode_next_offset(dn, 497 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 498 new_blkid = ibyte >> dn->dn_datablkshift; 499 if (err == ESRCH) { 500 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 501 break; 502 } 503 if (err) { 504 txh->txh_tx->tx_err = err; 505 break; 506 } 507 if (new_blkid > lastblk) { 508 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 509 break; 510 } 511 512 if (new_blkid > blkid) { 513 ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 514 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 515 nblks -= new_blkid - blkid; 516 blkid = new_blkid; 517 } 518 blkoff = P2PHASE(blkid, epb); 519 tochk = MIN(epb - blkoff, nblks); 520 521 err = dbuf_hold_impl(dn, 1, blkid >> epbs, 522 FALSE, FALSE, FTAG, &dbuf); 523 if (err) { 524 txh->txh_tx->tx_err = err; 525 break; 526 } 527 528 txh->txh_memory_tohold += dbuf->db.db_size; 529 530 /* 531 * We don't check memory_tohold against DMU_MAX_ACCESS because 532 * memory_tohold is an over-estimation (especially the >L1 533 * indirect blocks), so it could fail. Callers should have 534 * already verified that they will not be holding too much 535 * memory. 536 */ 537 538 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 539 if (err != 0) { 540 txh->txh_tx->tx_err = err; 541 dbuf_rele(dbuf, FTAG); 542 break; 543 } 544 545 bp = dbuf->db.db_data; 546 bp += blkoff; 547 548 for (i = 0; i < tochk; i++) { 549 if (dsl_dataset_block_freeable(ds, &bp[i], 550 bp[i].blk_birth)) { 551 dprintf_bp(&bp[i], "can free old%s", ""); 552 space += bp_get_dsize(spa, &bp[i]); 553 } 554 unref += BP_GET_ASIZE(bp); 555 } 556 dbuf_rele(dbuf, FTAG); 557 558 ++nl1blks; 559 blkid += tochk; 560 nblks -= tochk; 561 } 562 rw_exit(&dn->dn_struct_rwlock); 563 564 /* 565 * Add in memory requirements of higher-level indirects. 566 * This assumes a worst-possible scenario for dn_nlevels and a 567 * worst-possible distribution of l1-blocks over the region to free. 568 */ 569 { 570 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 571 int level = 2; 572 /* 573 * Here we don't use DN_MAX_LEVEL, but calculate it with the 574 * given datablkshift and indblkshift. This makes the 575 * difference between 19 and 8 on large files. 576 */ 577 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 578 (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 579 580 while (level++ < maxlevel) { 581 txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) 582 << dn->dn_indblkshift; 583 blkcnt = 1 + (blkcnt >> epbs); 584 } 585 } 586 587 /* account for new level 1 indirect blocks that might show up */ 588 if (skipped > 0) { 589 txh->txh_fudge += skipped << dn->dn_indblkshift; 590 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 591 txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 592 } 593 txh->txh_space_tofree += space; 594 txh->txh_space_tounref += unref; 595} 596 597/* 598 * This function marks the transaction as being a "net free". The end 599 * result is that refquotas will be disabled for this transaction, and 600 * this transaction will be able to use half of the pool space overhead 601 * (see dsl_pool_adjustedsize()). Therefore this function should only 602 * be called for transactions that we expect will not cause a net increase 603 * in the amount of space used (but it's OK if that is occasionally not true). 604 */ 605void 606dmu_tx_mark_netfree(dmu_tx_t *tx) 607{ 608 dmu_tx_hold_t *txh; 609 610 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 611 DMU_NEW_OBJECT, THT_FREE, 0, 0); 612 613 /* 614 * Pretend that this operation will free 1GB of space. This 615 * should be large enough to cancel out the largest write. 616 * We don't want to use something like UINT64_MAX, because that would 617 * cause overflows when doing math with these values (e.g. in 618 * dmu_tx_try_assign()). 619 */ 620 txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024; 621} 622 623void 624dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 625{ 626 dmu_tx_hold_t *txh; 627 dnode_t *dn; 628 int err; 629 zio_t *zio; 630 631 ASSERT(tx->tx_txg == 0); 632 633 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 634 object, THT_FREE, off, len); 635 if (txh == NULL) 636 return; 637 dn = txh->txh_dnode; 638 dmu_tx_count_dnode(txh); 639 640 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 641 return; 642 if (len == DMU_OBJECT_END) 643 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 644 645 646 /* 647 * For i/o error checking, we read the first and last level-0 648 * blocks if they are not aligned, and all the level-1 blocks. 649 * 650 * Note: dbuf_free_range() assumes that we have not instantiated 651 * any level-0 dbufs that will be completely freed. Therefore we must 652 * exercise care to not read or count the first and last blocks 653 * if they are blocksize-aligned. 654 */ 655 if (dn->dn_datablkshift == 0) { 656 if (off != 0 || len < dn->dn_datablksz) 657 dmu_tx_count_write(txh, 0, dn->dn_datablksz); 658 } else { 659 /* first block will be modified if it is not aligned */ 660 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 661 dmu_tx_count_write(txh, off, 1); 662 /* last block will be modified if it is not aligned */ 663 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 664 dmu_tx_count_write(txh, off+len, 1); 665 } 666 667 /* 668 * Check level-1 blocks. 669 */ 670 if (dn->dn_nlevels > 1) { 671 int shift = dn->dn_datablkshift + dn->dn_indblkshift - 672 SPA_BLKPTRSHIFT; 673 uint64_t start = off >> shift; 674 uint64_t end = (off + len) >> shift; 675 676 ASSERT(dn->dn_indblkshift != 0); 677 678 /* 679 * dnode_reallocate() can result in an object with indirect 680 * blocks having an odd data block size. In this case, 681 * just check the single block. 682 */ 683 if (dn->dn_datablkshift == 0) 684 start = end = 0; 685 686 zio = zio_root(tx->tx_pool->dp_spa, 687 NULL, NULL, ZIO_FLAG_CANFAIL); 688 for (uint64_t i = start; i <= end; i++) { 689 uint64_t ibyte = i << shift; 690 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 691 i = ibyte >> shift; 692 if (err == ESRCH || i > end) 693 break; 694 if (err) { 695 tx->tx_err = err; 696 return; 697 } 698 699 err = dmu_tx_check_ioerr(zio, dn, 1, i); 700 if (err) { 701 tx->tx_err = err; 702 return; 703 } 704 } 705 err = zio_wait(zio); 706 if (err) { 707 tx->tx_err = err; 708 return; 709 } 710 } 711 712 dmu_tx_count_free(txh, off, len); 713} 714 715void 716dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 717{ 718 dmu_tx_hold_t *txh; 719 dnode_t *dn; 720 dsl_dataset_phys_t *ds_phys; 721 uint64_t nblocks; 722 int epbs, err; 723 724 ASSERT(tx->tx_txg == 0); 725 726 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 727 object, THT_ZAP, add, (uintptr_t)name); 728 if (txh == NULL) 729 return; 730 dn = txh->txh_dnode; 731 732 dmu_tx_count_dnode(txh); 733 734 if (dn == NULL) { 735 /* 736 * We will be able to fit a new object's entries into one leaf 737 * block. So there will be at most 2 blocks total, 738 * including the header block. 739 */ 740 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 741 return; 742 } 743 744 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 745 746 if (dn->dn_maxblkid == 0 && !add) { 747 blkptr_t *bp; 748 749 /* 750 * If there is only one block (i.e. this is a micro-zap) 751 * and we are not adding anything, the accounting is simple. 752 */ 753 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 754 if (err) { 755 tx->tx_err = err; 756 return; 757 } 758 759 /* 760 * Use max block size here, since we don't know how much 761 * the size will change between now and the dbuf dirty call. 762 */ 763 bp = &dn->dn_phys->dn_blkptr[0]; 764 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 765 bp, bp->blk_birth)) 766 txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ; 767 else 768 txh->txh_space_towrite += MZAP_MAX_BLKSZ; 769 if (!BP_IS_HOLE(bp)) 770 txh->txh_space_tounref += MZAP_MAX_BLKSZ; 771 return; 772 } 773 774 if (dn->dn_maxblkid > 0 && name) { 775 /* 776 * access the name in this fat-zap so that we'll check 777 * for i/o errors to the leaf blocks, etc. 778 */ 779 err = zap_lookup(dn->dn_objset, dn->dn_object, name, 780 8, 0, NULL); 781 if (err == EIO) { 782 tx->tx_err = err; 783 return; 784 } 785 } 786 787 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 788 &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 789 790 /* 791 * If the modified blocks are scattered to the four winds, 792 * we'll have to modify an indirect twig for each. 793 */ 794 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 795 ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset); 796 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 797 if (ds_phys->ds_prev_snap_obj) 798 txh->txh_space_towrite += 3 << dn->dn_indblkshift; 799 else 800 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 801} 802 803void 804dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 805{ 806 dmu_tx_hold_t *txh; 807 808 ASSERT(tx->tx_txg == 0); 809 810 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 811 object, THT_BONUS, 0, 0); 812 if (txh) 813 dmu_tx_count_dnode(txh); 814} 815 816void 817dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 818{ 819 dmu_tx_hold_t *txh; 820 ASSERT(tx->tx_txg == 0); 821 822 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 823 DMU_NEW_OBJECT, THT_SPACE, space, 0); 824 825 txh->txh_space_towrite += space; 826} 827 828int 829dmu_tx_holds(dmu_tx_t *tx, uint64_t object) 830{ 831 dmu_tx_hold_t *txh; 832 int holds = 0; 833 834 /* 835 * By asserting that the tx is assigned, we're counting the 836 * number of dn_tx_holds, which is the same as the number of 837 * dn_holds. Otherwise, we'd be counting dn_holds, but 838 * dn_tx_holds could be 0. 839 */ 840 ASSERT(tx->tx_txg != 0); 841 842 /* if (tx->tx_anyobj == TRUE) */ 843 /* return (0); */ 844 845 for (txh = list_head(&tx->tx_holds); txh; 846 txh = list_next(&tx->tx_holds, txh)) { 847 if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 848 holds++; 849 } 850 851 return (holds); 852} 853 854#ifdef ZFS_DEBUG 855void 856dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 857{ 858 dmu_tx_hold_t *txh; 859 int match_object = FALSE, match_offset = FALSE; 860 dnode_t *dn; 861 862 DB_DNODE_ENTER(db); 863 dn = DB_DNODE(db); 864 ASSERT(tx->tx_txg != 0); 865 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 866 ASSERT3U(dn->dn_object, ==, db->db.db_object); 867 868 if (tx->tx_anyobj) { 869 DB_DNODE_EXIT(db); 870 return; 871 } 872 873 /* XXX No checking on the meta dnode for now */ 874 if (db->db.db_object == DMU_META_DNODE_OBJECT) { 875 DB_DNODE_EXIT(db); 876 return; 877 } 878 879 for (txh = list_head(&tx->tx_holds); txh; 880 txh = list_next(&tx->tx_holds, txh)) { 881 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 882 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 883 match_object = TRUE; 884 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 885 int datablkshift = dn->dn_datablkshift ? 886 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 887 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 888 int shift = datablkshift + epbs * db->db_level; 889 uint64_t beginblk = shift >= 64 ? 0 : 890 (txh->txh_arg1 >> shift); 891 uint64_t endblk = shift >= 64 ? 0 : 892 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 893 uint64_t blkid = db->db_blkid; 894 895 /* XXX txh_arg2 better not be zero... */ 896 897 dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 898 txh->txh_type, beginblk, endblk); 899 900 switch (txh->txh_type) { 901 case THT_WRITE: 902 if (blkid >= beginblk && blkid <= endblk) 903 match_offset = TRUE; 904 /* 905 * We will let this hold work for the bonus 906 * or spill buffer so that we don't need to 907 * hold it when creating a new object. 908 */ 909 if (blkid == DMU_BONUS_BLKID || 910 blkid == DMU_SPILL_BLKID) 911 match_offset = TRUE; 912 /* 913 * They might have to increase nlevels, 914 * thus dirtying the new TLIBs. Or the 915 * might have to change the block size, 916 * thus dirying the new lvl=0 blk=0. 917 */ 918 if (blkid == 0) 919 match_offset = TRUE; 920 break; 921 case THT_FREE: 922 /* 923 * We will dirty all the level 1 blocks in 924 * the free range and perhaps the first and 925 * last level 0 block. 926 */ 927 if (blkid >= beginblk && (blkid <= endblk || 928 txh->txh_arg2 == DMU_OBJECT_END)) 929 match_offset = TRUE; 930 break; 931 case THT_SPILL: 932 if (blkid == DMU_SPILL_BLKID) 933 match_offset = TRUE; 934 break; 935 case THT_BONUS: 936 if (blkid == DMU_BONUS_BLKID) 937 match_offset = TRUE; 938 break; 939 case THT_ZAP: 940 match_offset = TRUE; 941 break; 942 case THT_NEWOBJECT: 943 match_object = TRUE; 944 break; 945 default: 946 ASSERT(!"bad txh_type"); 947 } 948 } 949 if (match_object && match_offset) { 950 DB_DNODE_EXIT(db); 951 return; 952 } 953 } 954 DB_DNODE_EXIT(db); 955 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 956 (u_longlong_t)db->db.db_object, db->db_level, 957 (u_longlong_t)db->db_blkid); 958} 959#endif 960 961/* 962 * If we can't do 10 iops, something is wrong. Let us go ahead 963 * and hit zfs_dirty_data_max. 964 */ 965hrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 966int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 967 968/* 969 * We delay transactions when we've determined that the backend storage 970 * isn't able to accommodate the rate of incoming writes. 971 * 972 * If there is already a transaction waiting, we delay relative to when 973 * that transaction finishes waiting. This way the calculated min_time 974 * is independent of the number of threads concurrently executing 975 * transactions. 976 * 977 * If we are the only waiter, wait relative to when the transaction 978 * started, rather than the current time. This credits the transaction for 979 * "time already served", e.g. reading indirect blocks. 980 * 981 * The minimum time for a transaction to take is calculated as: 982 * min_time = scale * (dirty - min) / (max - dirty) 983 * min_time is then capped at zfs_delay_max_ns. 984 * 985 * The delay has two degrees of freedom that can be adjusted via tunables. 986 * The percentage of dirty data at which we start to delay is defined by 987 * zfs_delay_min_dirty_percent. This should typically be at or above 988 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 989 * delay after writing at full speed has failed to keep up with the incoming 990 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 991 * speaking, this variable determines the amount of delay at the midpoint of 992 * the curve. 993 * 994 * delay 995 * 10ms +-------------------------------------------------------------*+ 996 * | *| 997 * 9ms + *+ 998 * | *| 999 * 8ms + *+ 1000 * | * | 1001 * 7ms + * + 1002 * | * | 1003 * 6ms + * + 1004 * | * | 1005 * 5ms + * + 1006 * | * | 1007 * 4ms + * + 1008 * | * | 1009 * 3ms + * + 1010 * | * | 1011 * 2ms + (midpoint) * + 1012 * | | ** | 1013 * 1ms + v *** + 1014 * | zfs_delay_scale ----------> ******** | 1015 * 0 +-------------------------------------*********----------------+ 1016 * 0% <- zfs_dirty_data_max -> 100% 1017 * 1018 * Note that since the delay is added to the outstanding time remaining on the 1019 * most recent transaction, the delay is effectively the inverse of IOPS. 1020 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 1021 * was chosen such that small changes in the amount of accumulated dirty data 1022 * in the first 3/4 of the curve yield relatively small differences in the 1023 * amount of delay. 1024 * 1025 * The effects can be easier to understand when the amount of delay is 1026 * represented on a log scale: 1027 * 1028 * delay 1029 * 100ms +-------------------------------------------------------------++ 1030 * + + 1031 * | | 1032 * + *+ 1033 * 10ms + *+ 1034 * + ** + 1035 * | (midpoint) ** | 1036 * + | ** + 1037 * 1ms + v **** + 1038 * + zfs_delay_scale ----------> ***** + 1039 * | **** | 1040 * + **** + 1041 * 100us + ** + 1042 * + * + 1043 * | * | 1044 * + * + 1045 * 10us + * + 1046 * + + 1047 * | | 1048 * + + 1049 * +--------------------------------------------------------------+ 1050 * 0% <- zfs_dirty_data_max -> 100% 1051 * 1052 * Note here that only as the amount of dirty data approaches its limit does 1053 * the delay start to increase rapidly. The goal of a properly tuned system 1054 * should be to keep the amount of dirty data out of that range by first 1055 * ensuring that the appropriate limits are set for the I/O scheduler to reach 1056 * optimal throughput on the backend storage, and then by changing the value 1057 * of zfs_delay_scale to increase the steepness of the curve. 1058 */ 1059static void 1060dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 1061{ 1062 dsl_pool_t *dp = tx->tx_pool; 1063 uint64_t delay_min_bytes = 1064 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 1065 hrtime_t wakeup, min_tx_time, now; 1066 1067 if (dirty <= delay_min_bytes) 1068 return; 1069 1070 /* 1071 * The caller has already waited until we are under the max. 1072 * We make them pass us the amount of dirty data so we don't 1073 * have to handle the case of it being >= the max, which could 1074 * cause a divide-by-zero if it's == the max. 1075 */ 1076 ASSERT3U(dirty, <, zfs_dirty_data_max); 1077 1078 now = gethrtime(); 1079 min_tx_time = zfs_delay_scale * 1080 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 1081 if (now > tx->tx_start + min_tx_time) 1082 return; 1083 1084 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 1085 1086 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 1087 uint64_t, min_tx_time); 1088 1089 mutex_enter(&dp->dp_lock); 1090 wakeup = MAX(tx->tx_start + min_tx_time, 1091 dp->dp_last_wakeup + min_tx_time); 1092 dp->dp_last_wakeup = wakeup; 1093 mutex_exit(&dp->dp_lock); 1094 1095#ifdef _KERNEL 1096#ifdef illumos 1097 mutex_enter(&curthread->t_delay_lock); 1098 while (cv_timedwait_hires(&curthread->t_delay_cv, 1099 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 1100 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 1101 continue; 1102 mutex_exit(&curthread->t_delay_lock); 1103#else 1104 pause_sbt("dmu_tx_delay", wakeup * SBT_1NS, 1105 zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE); 1106#endif 1107#else 1108 hrtime_t delta = wakeup - gethrtime(); 1109 struct timespec ts; 1110 ts.tv_sec = delta / NANOSEC; 1111 ts.tv_nsec = delta % NANOSEC; 1112 (void) nanosleep(&ts, NULL); 1113#endif 1114} 1115 1116static int 1117dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1118{ 1119 dmu_tx_hold_t *txh; 1120 spa_t *spa = tx->tx_pool->dp_spa; 1121 uint64_t memory, asize, fsize, usize; 1122 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1123 1124 ASSERT0(tx->tx_txg); 1125 1126 if (tx->tx_err) 1127 return (tx->tx_err); 1128 1129 if (spa_suspended(spa)) { 1130 /* 1131 * If the user has indicated a blocking failure mode 1132 * then return ERESTART which will block in dmu_tx_wait(). 1133 * Otherwise, return EIO so that an error can get 1134 * propagated back to the VOP calls. 1135 * 1136 * Note that we always honor the txg_how flag regardless 1137 * of the failuremode setting. 1138 */ 1139 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1140 txg_how != TXG_WAIT) 1141 return (SET_ERROR(EIO)); 1142 1143 return (SET_ERROR(ERESTART)); 1144 } 1145 1146 if (!tx->tx_waited && 1147 dsl_pool_need_dirty_delay(tx->tx_pool)) { 1148 tx->tx_wait_dirty = B_TRUE; 1149 return (SET_ERROR(ERESTART)); 1150 } 1151 1152 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1153 tx->tx_needassign_txh = NULL; 1154 1155 /* 1156 * NB: No error returns are allowed after txg_hold_open, but 1157 * before processing the dnode holds, due to the 1158 * dmu_tx_unassign() logic. 1159 */ 1160 1161 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 1162 for (txh = list_head(&tx->tx_holds); txh; 1163 txh = list_next(&tx->tx_holds, txh)) { 1164 dnode_t *dn = txh->txh_dnode; 1165 if (dn != NULL) { 1166 mutex_enter(&dn->dn_mtx); 1167 if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1168 mutex_exit(&dn->dn_mtx); 1169 tx->tx_needassign_txh = txh; 1170 return (SET_ERROR(ERESTART)); 1171 } 1172 if (dn->dn_assigned_txg == 0) 1173 dn->dn_assigned_txg = tx->tx_txg; 1174 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1175 (void) refcount_add(&dn->dn_tx_holds, tx); 1176 mutex_exit(&dn->dn_mtx); 1177 } 1178 towrite += txh->txh_space_towrite; 1179 tofree += txh->txh_space_tofree; 1180 tooverwrite += txh->txh_space_tooverwrite; 1181 tounref += txh->txh_space_tounref; 1182 tohold += txh->txh_memory_tohold; 1183 fudge += txh->txh_fudge; 1184 } 1185 1186 /* 1187 * If a snapshot has been taken since we made our estimates, 1188 * assume that we won't be able to free or overwrite anything. 1189 */ 1190 if (tx->tx_objset && 1191 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1192 tx->tx_lastsnap_txg) { 1193 towrite += tooverwrite; 1194 tooverwrite = tofree = 0; 1195 } 1196 1197 /* needed allocation: worst-case estimate of write space */ 1198 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1199 /* freed space estimate: worst-case overwrite + free estimate */ 1200 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1201 /* convert unrefd space to worst-case estimate */ 1202 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1203 /* calculate memory footprint estimate */ 1204 memory = towrite + tooverwrite + tohold; 1205 1206#ifdef ZFS_DEBUG 1207 /* 1208 * Add in 'tohold' to account for our dirty holds on this memory 1209 * XXX - the "fudge" factor is to account for skipped blocks that 1210 * we missed because dnode_next_offset() misses in-core-only blocks. 1211 */ 1212 tx->tx_space_towrite = asize + 1213 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 1214 tx->tx_space_tofree = tofree; 1215 tx->tx_space_tooverwrite = tooverwrite; 1216 tx->tx_space_tounref = tounref; 1217#endif 1218 1219 if (tx->tx_dir && asize != 0) { 1220 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1221 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 1222 if (err) 1223 return (err); 1224 } 1225 1226 return (0); 1227} 1228 1229static void 1230dmu_tx_unassign(dmu_tx_t *tx) 1231{ 1232 dmu_tx_hold_t *txh; 1233 1234 if (tx->tx_txg == 0) 1235 return; 1236 1237 txg_rele_to_quiesce(&tx->tx_txgh); 1238 1239 /* 1240 * Walk the transaction's hold list, removing the hold on the 1241 * associated dnode, and notifying waiters if the refcount drops to 0. 1242 */ 1243 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 1244 txh = list_next(&tx->tx_holds, txh)) { 1245 dnode_t *dn = txh->txh_dnode; 1246 1247 if (dn == NULL) 1248 continue; 1249 mutex_enter(&dn->dn_mtx); 1250 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1251 1252 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1253 dn->dn_assigned_txg = 0; 1254 cv_broadcast(&dn->dn_notxholds); 1255 } 1256 mutex_exit(&dn->dn_mtx); 1257 } 1258 1259 txg_rele_to_sync(&tx->tx_txgh); 1260 1261 tx->tx_lasttried_txg = tx->tx_txg; 1262 tx->tx_txg = 0; 1263} 1264 1265/* 1266 * Assign tx to a transaction group. txg_how can be one of: 1267 * 1268 * (1) TXG_WAIT. If the current open txg is full, waits until there's 1269 * a new one. This should be used when you're not holding locks. 1270 * It will only fail if we're truly out of space (or over quota). 1271 * 1272 * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1273 * blocking, returns immediately with ERESTART. This should be used 1274 * whenever you're holding locks. On an ERESTART error, the caller 1275 * should drop locks, do a dmu_tx_wait(tx), and try again. 1276 * 1277 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 1278 * has already been called on behalf of this operation (though 1279 * most likely on a different tx). 1280 */ 1281int 1282dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1283{ 1284 int err; 1285 1286 ASSERT(tx->tx_txg == 0); 1287 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 1288 txg_how == TXG_WAITED); 1289 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1290 1291 /* If we might wait, we must not hold the config lock. */ 1292 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 1293 1294 if (txg_how == TXG_WAITED) 1295 tx->tx_waited = B_TRUE; 1296 1297 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1298 dmu_tx_unassign(tx); 1299 1300 if (err != ERESTART || txg_how != TXG_WAIT) 1301 return (err); 1302 1303 dmu_tx_wait(tx); 1304 } 1305 1306 txg_rele_to_quiesce(&tx->tx_txgh); 1307 1308 return (0); 1309} 1310 1311void 1312dmu_tx_wait(dmu_tx_t *tx) 1313{ 1314 spa_t *spa = tx->tx_pool->dp_spa; 1315 dsl_pool_t *dp = tx->tx_pool; 1316 1317 ASSERT(tx->tx_txg == 0); 1318 ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1319 1320 if (tx->tx_wait_dirty) { 1321 /* 1322 * dmu_tx_try_assign() has determined that we need to wait 1323 * because we've consumed much or all of the dirty buffer 1324 * space. 1325 */ 1326 mutex_enter(&dp->dp_lock); 1327 while (dp->dp_dirty_total >= zfs_dirty_data_max) 1328 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1329 uint64_t dirty = dp->dp_dirty_total; 1330 mutex_exit(&dp->dp_lock); 1331 1332 dmu_tx_delay(tx, dirty); 1333 1334 tx->tx_wait_dirty = B_FALSE; 1335 1336 /* 1337 * Note: setting tx_waited only has effect if the caller 1338 * used TX_WAIT. Otherwise they are going to destroy 1339 * this tx and try again. The common case, zfs_write(), 1340 * uses TX_WAIT. 1341 */ 1342 tx->tx_waited = B_TRUE; 1343 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1344 /* 1345 * If the pool is suspended we need to wait until it 1346 * is resumed. Note that it's possible that the pool 1347 * has become active after this thread has tried to 1348 * obtain a tx. If that's the case then tx_lasttried_txg 1349 * would not have been set. 1350 */ 1351 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1352 } else if (tx->tx_needassign_txh) { 1353 /* 1354 * A dnode is assigned to the quiescing txg. Wait for its 1355 * transaction to complete. 1356 */ 1357 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1358 1359 mutex_enter(&dn->dn_mtx); 1360 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1361 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1362 mutex_exit(&dn->dn_mtx); 1363 tx->tx_needassign_txh = NULL; 1364 } else { 1365 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1366 } 1367} 1368 1369void 1370dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1371{ 1372#ifdef ZFS_DEBUG 1373 if (tx->tx_dir == NULL || delta == 0) 1374 return; 1375 1376 if (delta > 0) { 1377 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1378 tx->tx_space_towrite); 1379 (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1380 } else { 1381 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1382 } 1383#endif 1384} 1385 1386void 1387dmu_tx_commit(dmu_tx_t *tx) 1388{ 1389 dmu_tx_hold_t *txh; 1390 1391 ASSERT(tx->tx_txg != 0); 1392 1393 /* 1394 * Go through the transaction's hold list and remove holds on 1395 * associated dnodes, notifying waiters if no holds remain. 1396 */ 1397 while (txh = list_head(&tx->tx_holds)) { 1398 dnode_t *dn = txh->txh_dnode; 1399 1400 list_remove(&tx->tx_holds, txh); 1401 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1402 if (dn == NULL) 1403 continue; 1404 mutex_enter(&dn->dn_mtx); 1405 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1406 1407 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1408 dn->dn_assigned_txg = 0; 1409 cv_broadcast(&dn->dn_notxholds); 1410 } 1411 mutex_exit(&dn->dn_mtx); 1412 dnode_rele(dn, tx); 1413 } 1414 1415 if (tx->tx_tempreserve_cookie) 1416 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1417 1418 if (!list_is_empty(&tx->tx_callbacks)) 1419 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1420 1421 if (tx->tx_anyobj == FALSE) 1422 txg_rele_to_sync(&tx->tx_txgh); 1423 1424 list_destroy(&tx->tx_callbacks); 1425 list_destroy(&tx->tx_holds); 1426#ifdef ZFS_DEBUG 1427 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1428 tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1429 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1430 refcount_destroy_many(&tx->tx_space_written, 1431 refcount_count(&tx->tx_space_written)); 1432 refcount_destroy_many(&tx->tx_space_freed, 1433 refcount_count(&tx->tx_space_freed)); 1434#endif 1435 kmem_free(tx, sizeof (dmu_tx_t)); 1436} 1437 1438void 1439dmu_tx_abort(dmu_tx_t *tx) 1440{ 1441 dmu_tx_hold_t *txh; 1442 1443 ASSERT(tx->tx_txg == 0); 1444 1445 while (txh = list_head(&tx->tx_holds)) { 1446 dnode_t *dn = txh->txh_dnode; 1447 1448 list_remove(&tx->tx_holds, txh); 1449 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1450 if (dn != NULL) 1451 dnode_rele(dn, tx); 1452 } 1453 1454 /* 1455 * Call any registered callbacks with an error code. 1456 */ 1457 if (!list_is_empty(&tx->tx_callbacks)) 1458 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1459 1460 list_destroy(&tx->tx_callbacks); 1461 list_destroy(&tx->tx_holds); 1462#ifdef ZFS_DEBUG 1463 refcount_destroy_many(&tx->tx_space_written, 1464 refcount_count(&tx->tx_space_written)); 1465 refcount_destroy_many(&tx->tx_space_freed, 1466 refcount_count(&tx->tx_space_freed)); 1467#endif 1468 kmem_free(tx, sizeof (dmu_tx_t)); 1469} 1470 1471uint64_t 1472dmu_tx_get_txg(dmu_tx_t *tx) 1473{ 1474 ASSERT(tx->tx_txg != 0); 1475 return (tx->tx_txg); 1476} 1477 1478dsl_pool_t * 1479dmu_tx_pool(dmu_tx_t *tx) 1480{ 1481 ASSERT(tx->tx_pool != NULL); 1482 return (tx->tx_pool); 1483} 1484 1485 1486void 1487dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1488{ 1489 dmu_tx_callback_t *dcb; 1490 1491 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1492 1493 dcb->dcb_func = func; 1494 dcb->dcb_data = data; 1495 1496 list_insert_tail(&tx->tx_callbacks, dcb); 1497} 1498 1499/* 1500 * Call all the commit callbacks on a list, with a given error code. 1501 */ 1502void 1503dmu_tx_do_callbacks(list_t *cb_list, int error) 1504{ 1505 dmu_tx_callback_t *dcb; 1506 1507 while (dcb = list_head(cb_list)) { 1508 list_remove(cb_list, dcb); 1509 dcb->dcb_func(dcb->dcb_data, error); 1510 kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1511 } 1512} 1513 1514/* 1515 * Interface to hold a bunch of attributes. 1516 * used for creating new files. 1517 * attrsize is the total size of all attributes 1518 * to be added during object creation 1519 * 1520 * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1521 */ 1522 1523/* 1524 * hold necessary attribute name for attribute registration. 1525 * should be a very rare case where this is needed. If it does 1526 * happen it would only happen on the first write to the file system. 1527 */ 1528static void 1529dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1530{ 1531 int i; 1532 1533 if (!sa->sa_need_attr_registration) 1534 return; 1535 1536 for (i = 0; i != sa->sa_num_attrs; i++) { 1537 if (!sa->sa_attr_table[i].sa_registered) { 1538 if (sa->sa_reg_attr_obj) 1539 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1540 B_TRUE, sa->sa_attr_table[i].sa_name); 1541 else 1542 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1543 B_TRUE, sa->sa_attr_table[i].sa_name); 1544 } 1545 } 1546} 1547 1548 1549void 1550dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1551{ 1552 dnode_t *dn; 1553 dmu_tx_hold_t *txh; 1554 1555 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1556 THT_SPILL, 0, 0); 1557 1558 dn = txh->txh_dnode; 1559 1560 if (dn == NULL) 1561 return; 1562 1563 /* If blkptr doesn't exist then add space to towrite */ 1564 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 1565 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1566 } else { 1567 blkptr_t *bp; 1568 1569 bp = &dn->dn_phys->dn_spill; 1570 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1571 bp, bp->blk_birth)) 1572 txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE; 1573 else 1574 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1575 if (!BP_IS_HOLE(bp)) 1576 txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE; 1577 } 1578} 1579 1580void 1581dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1582{ 1583 sa_os_t *sa = tx->tx_objset->os_sa; 1584 1585 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1586 1587 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1588 return; 1589 1590 if (tx->tx_objset->os_sa->sa_layout_attr_obj) 1591 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1592 else { 1593 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1594 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1595 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1596 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1597 } 1598 1599 dmu_tx_sa_registration_hold(sa, tx); 1600 1601 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 1602 return; 1603 1604 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1605 THT_SPILL, 0, 0); 1606} 1607 1608/* 1609 * Hold SA attribute 1610 * 1611 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1612 * 1613 * variable_size is the total size of all variable sized attributes 1614 * passed to this function. It is not the total size of all 1615 * variable size attributes that *may* exist on this object. 1616 */ 1617void 1618dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1619{ 1620 uint64_t object; 1621 sa_os_t *sa = tx->tx_objset->os_sa; 1622 1623 ASSERT(hdl != NULL); 1624 1625 object = sa_handle_object(hdl); 1626 1627 dmu_tx_hold_bonus(tx, object); 1628 1629 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1630 return; 1631 1632 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1633 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1634 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1635 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1636 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1637 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1638 } 1639 1640 dmu_tx_sa_registration_hold(sa, tx); 1641 1642 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1643 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1644 1645 if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1646 ASSERT(tx->tx_txg == 0); 1647 dmu_tx_hold_spill(tx, object); 1648 } else { 1649 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1650 dnode_t *dn; 1651 1652 DB_DNODE_ENTER(db); 1653 dn = DB_DNODE(db); 1654 if (dn->dn_have_spill) { 1655 ASSERT(tx->tx_txg == 0); 1656 dmu_tx_hold_spill(tx, object); 1657 } 1658 DB_DNODE_EXIT(db); 1659 } 1660} 1661