dmu_tx.c revision 288571
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved. 25 */ 26 27#include <sys/dmu.h> 28#include <sys/dmu_impl.h> 29#include <sys/dbuf.h> 30#include <sys/dmu_tx.h> 31#include <sys/dmu_objset.h> 32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 34#include <sys/dsl_pool.h> 35#include <sys/zap_impl.h> /* for fzap_default_block_shift */ 36#include <sys/spa.h> 37#include <sys/sa.h> 38#include <sys/sa_impl.h> 39#include <sys/zfs_context.h> 40#include <sys/varargs.h> 41 42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 43 uint64_t arg1, uint64_t arg2); 44 45 46dmu_tx_t * 47dmu_tx_create_dd(dsl_dir_t *dd) 48{ 49 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 50 tx->tx_dir = dd; 51 if (dd != NULL) 52 tx->tx_pool = dd->dd_pool; 53 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 54 offsetof(dmu_tx_hold_t, txh_node)); 55 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 56 offsetof(dmu_tx_callback_t, dcb_node)); 57 tx->tx_start = gethrtime(); 58#ifdef ZFS_DEBUG 59 refcount_create(&tx->tx_space_written); 60 refcount_create(&tx->tx_space_freed); 61#endif 62 return (tx); 63} 64 65dmu_tx_t * 66dmu_tx_create(objset_t *os) 67{ 68 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 69 tx->tx_objset = os; 70 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 71 return (tx); 72} 73 74dmu_tx_t * 75dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 76{ 77 dmu_tx_t *tx = dmu_tx_create_dd(NULL); 78 79 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 80 tx->tx_pool = dp; 81 tx->tx_txg = txg; 82 tx->tx_anyobj = TRUE; 83 84 return (tx); 85} 86 87int 88dmu_tx_is_syncing(dmu_tx_t *tx) 89{ 90 return (tx->tx_anyobj); 91} 92 93int 94dmu_tx_private_ok(dmu_tx_t *tx) 95{ 96 return (tx->tx_anyobj); 97} 98 99static dmu_tx_hold_t * 100dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 101 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 102{ 103 dmu_tx_hold_t *txh; 104 dnode_t *dn = NULL; 105 int err; 106 107 if (object != DMU_NEW_OBJECT) { 108 err = dnode_hold(os, object, tx, &dn); 109 if (err) { 110 tx->tx_err = err; 111 return (NULL); 112 } 113 114 if (err == 0 && tx->tx_txg != 0) { 115 mutex_enter(&dn->dn_mtx); 116 /* 117 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 118 * problem, but there's no way for it to happen (for 119 * now, at least). 120 */ 121 ASSERT(dn->dn_assigned_txg == 0); 122 dn->dn_assigned_txg = tx->tx_txg; 123 (void) refcount_add(&dn->dn_tx_holds, tx); 124 mutex_exit(&dn->dn_mtx); 125 } 126 } 127 128 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 129 txh->txh_tx = tx; 130 txh->txh_dnode = dn; 131#ifdef ZFS_DEBUG 132 txh->txh_type = type; 133 txh->txh_arg1 = arg1; 134 txh->txh_arg2 = arg2; 135#endif 136 list_insert_tail(&tx->tx_holds, txh); 137 138 return (txh); 139} 140 141void 142dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 143{ 144 /* 145 * If we're syncing, they can manipulate any object anyhow, and 146 * the hold on the dnode_t can cause problems. 147 */ 148 if (!dmu_tx_is_syncing(tx)) { 149 (void) dmu_tx_hold_object_impl(tx, os, 150 object, THT_NEWOBJECT, 0, 0); 151 } 152} 153 154static int 155dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 156{ 157 int err; 158 dmu_buf_impl_t *db; 159 160 rw_enter(&dn->dn_struct_rwlock, RW_READER); 161 db = dbuf_hold_level(dn, level, blkid, FTAG); 162 rw_exit(&dn->dn_struct_rwlock); 163 if (db == NULL) 164 return (SET_ERROR(EIO)); 165 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 166 dbuf_rele(db, FTAG); 167 return (err); 168} 169 170static void 171dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 172 int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 173{ 174 objset_t *os = dn->dn_objset; 175 dsl_dataset_t *ds = os->os_dsl_dataset; 176 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 177 dmu_buf_impl_t *parent = NULL; 178 blkptr_t *bp = NULL; 179 uint64_t space; 180 181 if (level >= dn->dn_nlevels || history[level] == blkid) 182 return; 183 184 history[level] = blkid; 185 186 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 187 188 if (db == NULL || db == dn->dn_dbuf) { 189 ASSERT(level != 0); 190 db = NULL; 191 } else { 192 ASSERT(DB_DNODE(db) == dn); 193 ASSERT(db->db_level == level); 194 ASSERT(db->db.db_size == space); 195 ASSERT(db->db_blkid == blkid); 196 bp = db->db_blkptr; 197 parent = db->db_parent; 198 } 199 200 freeable = (bp && (freeable || 201 dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 202 203 if (freeable) 204 txh->txh_space_tooverwrite += space; 205 else 206 txh->txh_space_towrite += space; 207 if (bp) 208 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); 209 210 dmu_tx_count_twig(txh, dn, parent, level + 1, 211 blkid >> epbs, freeable, history); 212} 213 214/* ARGSUSED */ 215static void 216dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 217{ 218 dnode_t *dn = txh->txh_dnode; 219 uint64_t start, end, i; 220 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 221 int err = 0; 222 223 if (len == 0) 224 return; 225 226 min_bs = SPA_MINBLOCKSHIFT; 227 max_bs = highbit64(txh->txh_tx->tx_objset->os_recordsize) - 1; 228 min_ibs = DN_MIN_INDBLKSHIFT; 229 max_ibs = DN_MAX_INDBLKSHIFT; 230 231 if (dn) { 232 uint64_t history[DN_MAX_LEVELS]; 233 int nlvls = dn->dn_nlevels; 234 int delta; 235 236 /* 237 * For i/o error checking, read the first and last level-0 238 * blocks (if they are not aligned), and all the level-1 blocks. 239 */ 240 if (dn->dn_maxblkid == 0) { 241 delta = dn->dn_datablksz; 242 start = (off < dn->dn_datablksz) ? 0 : 1; 243 end = (off+len <= dn->dn_datablksz) ? 0 : 1; 244 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 245 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 246 if (err) 247 goto out; 248 delta -= off; 249 } 250 } else { 251 zio_t *zio = zio_root(dn->dn_objset->os_spa, 252 NULL, NULL, ZIO_FLAG_CANFAIL); 253 254 /* first level-0 block */ 255 start = off >> dn->dn_datablkshift; 256 if (P2PHASE(off, dn->dn_datablksz) || 257 len < dn->dn_datablksz) { 258 err = dmu_tx_check_ioerr(zio, dn, 0, start); 259 if (err) 260 goto out; 261 } 262 263 /* last level-0 block */ 264 end = (off+len-1) >> dn->dn_datablkshift; 265 if (end != start && end <= dn->dn_maxblkid && 266 P2PHASE(off+len, dn->dn_datablksz)) { 267 err = dmu_tx_check_ioerr(zio, dn, 0, end); 268 if (err) 269 goto out; 270 } 271 272 /* level-1 blocks */ 273 if (nlvls > 1) { 274 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 275 for (i = (start>>shft)+1; i < end>>shft; i++) { 276 err = dmu_tx_check_ioerr(zio, dn, 1, i); 277 if (err) 278 goto out; 279 } 280 } 281 282 err = zio_wait(zio); 283 if (err) 284 goto out; 285 delta = P2NPHASE(off, dn->dn_datablksz); 286 } 287 288 min_ibs = max_ibs = dn->dn_indblkshift; 289 if (dn->dn_maxblkid > 0) { 290 /* 291 * The blocksize can't change, 292 * so we can make a more precise estimate. 293 */ 294 ASSERT(dn->dn_datablkshift != 0); 295 min_bs = max_bs = dn->dn_datablkshift; 296 } else { 297 /* 298 * The blocksize can increase up to the recordsize, 299 * or if it is already more than the recordsize, 300 * up to the next power of 2. 301 */ 302 min_bs = highbit64(dn->dn_datablksz - 1); 303 max_bs = MAX(max_bs, highbit64(dn->dn_datablksz - 1)); 304 } 305 306 /* 307 * If this write is not off the end of the file 308 * we need to account for overwrites/unref. 309 */ 310 if (start <= dn->dn_maxblkid) { 311 for (int l = 0; l < DN_MAX_LEVELS; l++) 312 history[l] = -1ULL; 313 } 314 while (start <= dn->dn_maxblkid) { 315 dmu_buf_impl_t *db; 316 317 rw_enter(&dn->dn_struct_rwlock, RW_READER); 318 err = dbuf_hold_impl(dn, 0, start, 319 FALSE, FALSE, FTAG, &db); 320 rw_exit(&dn->dn_struct_rwlock); 321 322 if (err) { 323 txh->txh_tx->tx_err = err; 324 return; 325 } 326 327 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 328 history); 329 dbuf_rele(db, FTAG); 330 if (++start > end) { 331 /* 332 * Account for new indirects appearing 333 * before this IO gets assigned into a txg. 334 */ 335 bits = 64 - min_bs; 336 epbs = min_ibs - SPA_BLKPTRSHIFT; 337 for (bits -= epbs * (nlvls - 1); 338 bits >= 0; bits -= epbs) 339 txh->txh_fudge += 1ULL << max_ibs; 340 goto out; 341 } 342 off += delta; 343 if (len >= delta) 344 len -= delta; 345 delta = dn->dn_datablksz; 346 } 347 } 348 349 /* 350 * 'end' is the last thing we will access, not one past. 351 * This way we won't overflow when accessing the last byte. 352 */ 353 start = P2ALIGN(off, 1ULL << max_bs); 354 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 355 txh->txh_space_towrite += end - start + 1; 356 357 start >>= min_bs; 358 end >>= min_bs; 359 360 epbs = min_ibs - SPA_BLKPTRSHIFT; 361 362 /* 363 * The object contains at most 2^(64 - min_bs) blocks, 364 * and each indirect level maps 2^epbs. 365 */ 366 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 367 start >>= epbs; 368 end >>= epbs; 369 ASSERT3U(end, >=, start); 370 txh->txh_space_towrite += (end - start + 1) << max_ibs; 371 if (start != 0) { 372 /* 373 * We also need a new blkid=0 indirect block 374 * to reference any existing file data. 375 */ 376 txh->txh_space_towrite += 1ULL << max_ibs; 377 } 378 } 379 380out: 381 if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 382 2 * DMU_MAX_ACCESS) 383 err = SET_ERROR(EFBIG); 384 385 if (err) 386 txh->txh_tx->tx_err = err; 387} 388 389static void 390dmu_tx_count_dnode(dmu_tx_hold_t *txh) 391{ 392 dnode_t *dn = txh->txh_dnode; 393 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 394 uint64_t space = mdn->dn_datablksz + 395 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 396 397 if (dn && dn->dn_dbuf->db_blkptr && 398 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 399 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 400 txh->txh_space_tooverwrite += space; 401 txh->txh_space_tounref += space; 402 } else { 403 txh->txh_space_towrite += space; 404 if (dn && dn->dn_dbuf->db_blkptr) 405 txh->txh_space_tounref += space; 406 } 407} 408 409void 410dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 411{ 412 dmu_tx_hold_t *txh; 413 414 ASSERT(tx->tx_txg == 0); 415 ASSERT(len < DMU_MAX_ACCESS); 416 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 417 418 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 419 object, THT_WRITE, off, len); 420 if (txh == NULL) 421 return; 422 423 dmu_tx_count_write(txh, off, len); 424 dmu_tx_count_dnode(txh); 425} 426 427static void 428dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 429{ 430 uint64_t blkid, nblks, lastblk; 431 uint64_t space = 0, unref = 0, skipped = 0; 432 dnode_t *dn = txh->txh_dnode; 433 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 434 spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 435 int epbs; 436 uint64_t l0span = 0, nl1blks = 0; 437 438 if (dn->dn_nlevels == 0) 439 return; 440 441 /* 442 * The struct_rwlock protects us against dn_nlevels 443 * changing, in case (against all odds) we manage to dirty & 444 * sync out the changes after we check for being dirty. 445 * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 446 */ 447 rw_enter(&dn->dn_struct_rwlock, RW_READER); 448 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 449 if (dn->dn_maxblkid == 0) { 450 if (off == 0 && len >= dn->dn_datablksz) { 451 blkid = 0; 452 nblks = 1; 453 } else { 454 rw_exit(&dn->dn_struct_rwlock); 455 return; 456 } 457 } else { 458 blkid = off >> dn->dn_datablkshift; 459 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 460 461 if (blkid > dn->dn_maxblkid) { 462 rw_exit(&dn->dn_struct_rwlock); 463 return; 464 } 465 if (blkid + nblks > dn->dn_maxblkid) 466 nblks = dn->dn_maxblkid - blkid + 1; 467 468 } 469 l0span = nblks; /* save for later use to calc level > 1 overhead */ 470 if (dn->dn_nlevels == 1) { 471 int i; 472 for (i = 0; i < nblks; i++) { 473 blkptr_t *bp = dn->dn_phys->dn_blkptr; 474 ASSERT3U(blkid + i, <, dn->dn_nblkptr); 475 bp += blkid + i; 476 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 477 dprintf_bp(bp, "can free old%s", ""); 478 space += bp_get_dsize(spa, bp); 479 } 480 unref += BP_GET_ASIZE(bp); 481 } 482 nl1blks = 1; 483 nblks = 0; 484 } 485 486 lastblk = blkid + nblks - 1; 487 while (nblks) { 488 dmu_buf_impl_t *dbuf; 489 uint64_t ibyte, new_blkid; 490 int epb = 1 << epbs; 491 int err, i, blkoff, tochk; 492 blkptr_t *bp; 493 494 ibyte = blkid << dn->dn_datablkshift; 495 err = dnode_next_offset(dn, 496 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 497 new_blkid = ibyte >> dn->dn_datablkshift; 498 if (err == ESRCH) { 499 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 500 break; 501 } 502 if (err) { 503 txh->txh_tx->tx_err = err; 504 break; 505 } 506 if (new_blkid > lastblk) { 507 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 508 break; 509 } 510 511 if (new_blkid > blkid) { 512 ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 513 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 514 nblks -= new_blkid - blkid; 515 blkid = new_blkid; 516 } 517 blkoff = P2PHASE(blkid, epb); 518 tochk = MIN(epb - blkoff, nblks); 519 520 err = dbuf_hold_impl(dn, 1, blkid >> epbs, 521 FALSE, FALSE, FTAG, &dbuf); 522 if (err) { 523 txh->txh_tx->tx_err = err; 524 break; 525 } 526 527 txh->txh_memory_tohold += dbuf->db.db_size; 528 529 /* 530 * We don't check memory_tohold against DMU_MAX_ACCESS because 531 * memory_tohold is an over-estimation (especially the >L1 532 * indirect blocks), so it could fail. Callers should have 533 * already verified that they will not be holding too much 534 * memory. 535 */ 536 537 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 538 if (err != 0) { 539 txh->txh_tx->tx_err = err; 540 dbuf_rele(dbuf, FTAG); 541 break; 542 } 543 544 bp = dbuf->db.db_data; 545 bp += blkoff; 546 547 for (i = 0; i < tochk; i++) { 548 if (dsl_dataset_block_freeable(ds, &bp[i], 549 bp[i].blk_birth)) { 550 dprintf_bp(&bp[i], "can free old%s", ""); 551 space += bp_get_dsize(spa, &bp[i]); 552 } 553 unref += BP_GET_ASIZE(bp); 554 } 555 dbuf_rele(dbuf, FTAG); 556 557 ++nl1blks; 558 blkid += tochk; 559 nblks -= tochk; 560 } 561 rw_exit(&dn->dn_struct_rwlock); 562 563 /* 564 * Add in memory requirements of higher-level indirects. 565 * This assumes a worst-possible scenario for dn_nlevels and a 566 * worst-possible distribution of l1-blocks over the region to free. 567 */ 568 { 569 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 570 int level = 2; 571 /* 572 * Here we don't use DN_MAX_LEVEL, but calculate it with the 573 * given datablkshift and indblkshift. This makes the 574 * difference between 19 and 8 on large files. 575 */ 576 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 577 (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 578 579 while (level++ < maxlevel) { 580 txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) 581 << dn->dn_indblkshift; 582 blkcnt = 1 + (blkcnt >> epbs); 583 } 584 } 585 586 /* account for new level 1 indirect blocks that might show up */ 587 if (skipped > 0) { 588 txh->txh_fudge += skipped << dn->dn_indblkshift; 589 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 590 txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 591 } 592 txh->txh_space_tofree += space; 593 txh->txh_space_tounref += unref; 594} 595 596/* 597 * This function marks the transaction as being a "net free". The end 598 * result is that refquotas will be disabled for this transaction, and 599 * this transaction will be able to use half of the pool space overhead 600 * (see dsl_pool_adjustedsize()). Therefore this function should only 601 * be called for transactions that we expect will not cause a net increase 602 * in the amount of space used (but it's OK if that is occasionally not true). 603 */ 604void 605dmu_tx_mark_netfree(dmu_tx_t *tx) 606{ 607 dmu_tx_hold_t *txh; 608 609 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 610 DMU_NEW_OBJECT, THT_FREE, 0, 0); 611 612 /* 613 * Pretend that this operation will free 1GB of space. This 614 * should be large enough to cancel out the largest write. 615 * We don't want to use something like UINT64_MAX, because that would 616 * cause overflows when doing math with these values (e.g. in 617 * dmu_tx_try_assign()). 618 */ 619 txh->txh_space_tofree = txh->txh_space_tounref = 1024 * 1024 * 1024; 620} 621 622void 623dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 624{ 625 dmu_tx_hold_t *txh; 626 dnode_t *dn; 627 int err; 628 zio_t *zio; 629 630 ASSERT(tx->tx_txg == 0); 631 632 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 633 object, THT_FREE, off, len); 634 if (txh == NULL) 635 return; 636 dn = txh->txh_dnode; 637 dmu_tx_count_dnode(txh); 638 639 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 640 return; 641 if (len == DMU_OBJECT_END) 642 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 643 644 645 /* 646 * For i/o error checking, we read the first and last level-0 647 * blocks if they are not aligned, and all the level-1 blocks. 648 * 649 * Note: dbuf_free_range() assumes that we have not instantiated 650 * any level-0 dbufs that will be completely freed. Therefore we must 651 * exercise care to not read or count the first and last blocks 652 * if they are blocksize-aligned. 653 */ 654 if (dn->dn_datablkshift == 0) { 655 if (off != 0 || len < dn->dn_datablksz) 656 dmu_tx_count_write(txh, 0, dn->dn_datablksz); 657 } else { 658 /* first block will be modified if it is not aligned */ 659 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 660 dmu_tx_count_write(txh, off, 1); 661 /* last block will be modified if it is not aligned */ 662 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 663 dmu_tx_count_write(txh, off+len, 1); 664 } 665 666 /* 667 * Check level-1 blocks. 668 */ 669 if (dn->dn_nlevels > 1) { 670 int shift = dn->dn_datablkshift + dn->dn_indblkshift - 671 SPA_BLKPTRSHIFT; 672 uint64_t start = off >> shift; 673 uint64_t end = (off + len) >> shift; 674 675 ASSERT(dn->dn_indblkshift != 0); 676 677 /* 678 * dnode_reallocate() can result in an object with indirect 679 * blocks having an odd data block size. In this case, 680 * just check the single block. 681 */ 682 if (dn->dn_datablkshift == 0) 683 start = end = 0; 684 685 zio = zio_root(tx->tx_pool->dp_spa, 686 NULL, NULL, ZIO_FLAG_CANFAIL); 687 for (uint64_t i = start; i <= end; i++) { 688 uint64_t ibyte = i << shift; 689 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 690 i = ibyte >> shift; 691 if (err == ESRCH || i > end) 692 break; 693 if (err) { 694 tx->tx_err = err; 695 return; 696 } 697 698 err = dmu_tx_check_ioerr(zio, dn, 1, i); 699 if (err) { 700 tx->tx_err = err; 701 return; 702 } 703 } 704 err = zio_wait(zio); 705 if (err) { 706 tx->tx_err = err; 707 return; 708 } 709 } 710 711 dmu_tx_count_free(txh, off, len); 712} 713 714void 715dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 716{ 717 dmu_tx_hold_t *txh; 718 dnode_t *dn; 719 dsl_dataset_phys_t *ds_phys; 720 uint64_t nblocks; 721 int epbs, err; 722 723 ASSERT(tx->tx_txg == 0); 724 725 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 726 object, THT_ZAP, add, (uintptr_t)name); 727 if (txh == NULL) 728 return; 729 dn = txh->txh_dnode; 730 731 dmu_tx_count_dnode(txh); 732 733 if (dn == NULL) { 734 /* 735 * We will be able to fit a new object's entries into one leaf 736 * block. So there will be at most 2 blocks total, 737 * including the header block. 738 */ 739 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 740 return; 741 } 742 743 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 744 745 if (dn->dn_maxblkid == 0 && !add) { 746 blkptr_t *bp; 747 748 /* 749 * If there is only one block (i.e. this is a micro-zap) 750 * and we are not adding anything, the accounting is simple. 751 */ 752 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 753 if (err) { 754 tx->tx_err = err; 755 return; 756 } 757 758 /* 759 * Use max block size here, since we don't know how much 760 * the size will change between now and the dbuf dirty call. 761 */ 762 bp = &dn->dn_phys->dn_blkptr[0]; 763 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 764 bp, bp->blk_birth)) 765 txh->txh_space_tooverwrite += MZAP_MAX_BLKSZ; 766 else 767 txh->txh_space_towrite += MZAP_MAX_BLKSZ; 768 if (!BP_IS_HOLE(bp)) 769 txh->txh_space_tounref += MZAP_MAX_BLKSZ; 770 return; 771 } 772 773 if (dn->dn_maxblkid > 0 && name) { 774 /* 775 * access the name in this fat-zap so that we'll check 776 * for i/o errors to the leaf blocks, etc. 777 */ 778 err = zap_lookup(dn->dn_objset, dn->dn_object, name, 779 8, 0, NULL); 780 if (err == EIO) { 781 tx->tx_err = err; 782 return; 783 } 784 } 785 786 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 787 &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 788 789 /* 790 * If the modified blocks are scattered to the four winds, 791 * we'll have to modify an indirect twig for each. 792 */ 793 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 794 ds_phys = dsl_dataset_phys(dn->dn_objset->os_dsl_dataset); 795 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 796 if (ds_phys->ds_prev_snap_obj) 797 txh->txh_space_towrite += 3 << dn->dn_indblkshift; 798 else 799 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 800} 801 802void 803dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 804{ 805 dmu_tx_hold_t *txh; 806 807 ASSERT(tx->tx_txg == 0); 808 809 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 810 object, THT_BONUS, 0, 0); 811 if (txh) 812 dmu_tx_count_dnode(txh); 813} 814 815void 816dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 817{ 818 dmu_tx_hold_t *txh; 819 ASSERT(tx->tx_txg == 0); 820 821 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 822 DMU_NEW_OBJECT, THT_SPACE, space, 0); 823 824 txh->txh_space_towrite += space; 825} 826 827int 828dmu_tx_holds(dmu_tx_t *tx, uint64_t object) 829{ 830 dmu_tx_hold_t *txh; 831 int holds = 0; 832 833 /* 834 * By asserting that the tx is assigned, we're counting the 835 * number of dn_tx_holds, which is the same as the number of 836 * dn_holds. Otherwise, we'd be counting dn_holds, but 837 * dn_tx_holds could be 0. 838 */ 839 ASSERT(tx->tx_txg != 0); 840 841 /* if (tx->tx_anyobj == TRUE) */ 842 /* return (0); */ 843 844 for (txh = list_head(&tx->tx_holds); txh; 845 txh = list_next(&tx->tx_holds, txh)) { 846 if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 847 holds++; 848 } 849 850 return (holds); 851} 852 853#ifdef ZFS_DEBUG 854void 855dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 856{ 857 dmu_tx_hold_t *txh; 858 int match_object = FALSE, match_offset = FALSE; 859 dnode_t *dn; 860 861 DB_DNODE_ENTER(db); 862 dn = DB_DNODE(db); 863 ASSERT(tx->tx_txg != 0); 864 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 865 ASSERT3U(dn->dn_object, ==, db->db.db_object); 866 867 if (tx->tx_anyobj) { 868 DB_DNODE_EXIT(db); 869 return; 870 } 871 872 /* XXX No checking on the meta dnode for now */ 873 if (db->db.db_object == DMU_META_DNODE_OBJECT) { 874 DB_DNODE_EXIT(db); 875 return; 876 } 877 878 for (txh = list_head(&tx->tx_holds); txh; 879 txh = list_next(&tx->tx_holds, txh)) { 880 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 881 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 882 match_object = TRUE; 883 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 884 int datablkshift = dn->dn_datablkshift ? 885 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 886 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 887 int shift = datablkshift + epbs * db->db_level; 888 uint64_t beginblk = shift >= 64 ? 0 : 889 (txh->txh_arg1 >> shift); 890 uint64_t endblk = shift >= 64 ? 0 : 891 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 892 uint64_t blkid = db->db_blkid; 893 894 /* XXX txh_arg2 better not be zero... */ 895 896 dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 897 txh->txh_type, beginblk, endblk); 898 899 switch (txh->txh_type) { 900 case THT_WRITE: 901 if (blkid >= beginblk && blkid <= endblk) 902 match_offset = TRUE; 903 /* 904 * We will let this hold work for the bonus 905 * or spill buffer so that we don't need to 906 * hold it when creating a new object. 907 */ 908 if (blkid == DMU_BONUS_BLKID || 909 blkid == DMU_SPILL_BLKID) 910 match_offset = TRUE; 911 /* 912 * They might have to increase nlevels, 913 * thus dirtying the new TLIBs. Or the 914 * might have to change the block size, 915 * thus dirying the new lvl=0 blk=0. 916 */ 917 if (blkid == 0) 918 match_offset = TRUE; 919 break; 920 case THT_FREE: 921 /* 922 * We will dirty all the level 1 blocks in 923 * the free range and perhaps the first and 924 * last level 0 block. 925 */ 926 if (blkid >= beginblk && (blkid <= endblk || 927 txh->txh_arg2 == DMU_OBJECT_END)) 928 match_offset = TRUE; 929 break; 930 case THT_SPILL: 931 if (blkid == DMU_SPILL_BLKID) 932 match_offset = TRUE; 933 break; 934 case THT_BONUS: 935 if (blkid == DMU_BONUS_BLKID) 936 match_offset = TRUE; 937 break; 938 case THT_ZAP: 939 match_offset = TRUE; 940 break; 941 case THT_NEWOBJECT: 942 match_object = TRUE; 943 break; 944 default: 945 ASSERT(!"bad txh_type"); 946 } 947 } 948 if (match_object && match_offset) { 949 DB_DNODE_EXIT(db); 950 return; 951 } 952 } 953 DB_DNODE_EXIT(db); 954 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 955 (u_longlong_t)db->db.db_object, db->db_level, 956 (u_longlong_t)db->db_blkid); 957} 958#endif 959 960/* 961 * If we can't do 10 iops, something is wrong. Let us go ahead 962 * and hit zfs_dirty_data_max. 963 */ 964hrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 965int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 966 967/* 968 * We delay transactions when we've determined that the backend storage 969 * isn't able to accommodate the rate of incoming writes. 970 * 971 * If there is already a transaction waiting, we delay relative to when 972 * that transaction finishes waiting. This way the calculated min_time 973 * is independent of the number of threads concurrently executing 974 * transactions. 975 * 976 * If we are the only waiter, wait relative to when the transaction 977 * started, rather than the current time. This credits the transaction for 978 * "time already served", e.g. reading indirect blocks. 979 * 980 * The minimum time for a transaction to take is calculated as: 981 * min_time = scale * (dirty - min) / (max - dirty) 982 * min_time is then capped at zfs_delay_max_ns. 983 * 984 * The delay has two degrees of freedom that can be adjusted via tunables. 985 * The percentage of dirty data at which we start to delay is defined by 986 * zfs_delay_min_dirty_percent. This should typically be at or above 987 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 988 * delay after writing at full speed has failed to keep up with the incoming 989 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 990 * speaking, this variable determines the amount of delay at the midpoint of 991 * the curve. 992 * 993 * delay 994 * 10ms +-------------------------------------------------------------*+ 995 * | *| 996 * 9ms + *+ 997 * | *| 998 * 8ms + *+ 999 * | * | 1000 * 7ms + * + 1001 * | * | 1002 * 6ms + * + 1003 * | * | 1004 * 5ms + * + 1005 * | * | 1006 * 4ms + * + 1007 * | * | 1008 * 3ms + * + 1009 * | * | 1010 * 2ms + (midpoint) * + 1011 * | | ** | 1012 * 1ms + v *** + 1013 * | zfs_delay_scale ----------> ******** | 1014 * 0 +-------------------------------------*********----------------+ 1015 * 0% <- zfs_dirty_data_max -> 100% 1016 * 1017 * Note that since the delay is added to the outstanding time remaining on the 1018 * most recent transaction, the delay is effectively the inverse of IOPS. 1019 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 1020 * was chosen such that small changes in the amount of accumulated dirty data 1021 * in the first 3/4 of the curve yield relatively small differences in the 1022 * amount of delay. 1023 * 1024 * The effects can be easier to understand when the amount of delay is 1025 * represented on a log scale: 1026 * 1027 * delay 1028 * 100ms +-------------------------------------------------------------++ 1029 * + + 1030 * | | 1031 * + *+ 1032 * 10ms + *+ 1033 * + ** + 1034 * | (midpoint) ** | 1035 * + | ** + 1036 * 1ms + v **** + 1037 * + zfs_delay_scale ----------> ***** + 1038 * | **** | 1039 * + **** + 1040 * 100us + ** + 1041 * + * + 1042 * | * | 1043 * + * + 1044 * 10us + * + 1045 * + + 1046 * | | 1047 * + + 1048 * +--------------------------------------------------------------+ 1049 * 0% <- zfs_dirty_data_max -> 100% 1050 * 1051 * Note here that only as the amount of dirty data approaches its limit does 1052 * the delay start to increase rapidly. The goal of a properly tuned system 1053 * should be to keep the amount of dirty data out of that range by first 1054 * ensuring that the appropriate limits are set for the I/O scheduler to reach 1055 * optimal throughput on the backend storage, and then by changing the value 1056 * of zfs_delay_scale to increase the steepness of the curve. 1057 */ 1058static void 1059dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 1060{ 1061 dsl_pool_t *dp = tx->tx_pool; 1062 uint64_t delay_min_bytes = 1063 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 1064 hrtime_t wakeup, min_tx_time, now; 1065 1066 if (dirty <= delay_min_bytes) 1067 return; 1068 1069 /* 1070 * The caller has already waited until we are under the max. 1071 * We make them pass us the amount of dirty data so we don't 1072 * have to handle the case of it being >= the max, which could 1073 * cause a divide-by-zero if it's == the max. 1074 */ 1075 ASSERT3U(dirty, <, zfs_dirty_data_max); 1076 1077 now = gethrtime(); 1078 min_tx_time = zfs_delay_scale * 1079 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 1080 if (now > tx->tx_start + min_tx_time) 1081 return; 1082 1083 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 1084 1085 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 1086 uint64_t, min_tx_time); 1087 1088 mutex_enter(&dp->dp_lock); 1089 wakeup = MAX(tx->tx_start + min_tx_time, 1090 dp->dp_last_wakeup + min_tx_time); 1091 dp->dp_last_wakeup = wakeup; 1092 mutex_exit(&dp->dp_lock); 1093 1094#ifdef _KERNEL 1095#ifdef illumos 1096 mutex_enter(&curthread->t_delay_lock); 1097 while (cv_timedwait_hires(&curthread->t_delay_cv, 1098 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 1099 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 1100 continue; 1101 mutex_exit(&curthread->t_delay_lock); 1102#else 1103 pause_sbt("dmu_tx_delay", wakeup * SBT_1NS, 1104 zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE); 1105#endif 1106#else 1107 hrtime_t delta = wakeup - gethrtime(); 1108 struct timespec ts; 1109 ts.tv_sec = delta / NANOSEC; 1110 ts.tv_nsec = delta % NANOSEC; 1111 (void) nanosleep(&ts, NULL); 1112#endif 1113} 1114 1115static int 1116dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1117{ 1118 dmu_tx_hold_t *txh; 1119 spa_t *spa = tx->tx_pool->dp_spa; 1120 uint64_t memory, asize, fsize, usize; 1121 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1122 1123 ASSERT0(tx->tx_txg); 1124 1125 if (tx->tx_err) 1126 return (tx->tx_err); 1127 1128 if (spa_suspended(spa)) { 1129 /* 1130 * If the user has indicated a blocking failure mode 1131 * then return ERESTART which will block in dmu_tx_wait(). 1132 * Otherwise, return EIO so that an error can get 1133 * propagated back to the VOP calls. 1134 * 1135 * Note that we always honor the txg_how flag regardless 1136 * of the failuremode setting. 1137 */ 1138 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1139 txg_how != TXG_WAIT) 1140 return (SET_ERROR(EIO)); 1141 1142 return (SET_ERROR(ERESTART)); 1143 } 1144 1145 if (!tx->tx_waited && 1146 dsl_pool_need_dirty_delay(tx->tx_pool)) { 1147 tx->tx_wait_dirty = B_TRUE; 1148 return (SET_ERROR(ERESTART)); 1149 } 1150 1151 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1152 tx->tx_needassign_txh = NULL; 1153 1154 /* 1155 * NB: No error returns are allowed after txg_hold_open, but 1156 * before processing the dnode holds, due to the 1157 * dmu_tx_unassign() logic. 1158 */ 1159 1160 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 1161 for (txh = list_head(&tx->tx_holds); txh; 1162 txh = list_next(&tx->tx_holds, txh)) { 1163 dnode_t *dn = txh->txh_dnode; 1164 if (dn != NULL) { 1165 mutex_enter(&dn->dn_mtx); 1166 if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1167 mutex_exit(&dn->dn_mtx); 1168 tx->tx_needassign_txh = txh; 1169 return (SET_ERROR(ERESTART)); 1170 } 1171 if (dn->dn_assigned_txg == 0) 1172 dn->dn_assigned_txg = tx->tx_txg; 1173 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1174 (void) refcount_add(&dn->dn_tx_holds, tx); 1175 mutex_exit(&dn->dn_mtx); 1176 } 1177 towrite += txh->txh_space_towrite; 1178 tofree += txh->txh_space_tofree; 1179 tooverwrite += txh->txh_space_tooverwrite; 1180 tounref += txh->txh_space_tounref; 1181 tohold += txh->txh_memory_tohold; 1182 fudge += txh->txh_fudge; 1183 } 1184 1185 /* 1186 * If a snapshot has been taken since we made our estimates, 1187 * assume that we won't be able to free or overwrite anything. 1188 */ 1189 if (tx->tx_objset && 1190 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1191 tx->tx_lastsnap_txg) { 1192 towrite += tooverwrite; 1193 tooverwrite = tofree = 0; 1194 } 1195 1196 /* needed allocation: worst-case estimate of write space */ 1197 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1198 /* freed space estimate: worst-case overwrite + free estimate */ 1199 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1200 /* convert unrefd space to worst-case estimate */ 1201 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1202 /* calculate memory footprint estimate */ 1203 memory = towrite + tooverwrite + tohold; 1204 1205#ifdef ZFS_DEBUG 1206 /* 1207 * Add in 'tohold' to account for our dirty holds on this memory 1208 * XXX - the "fudge" factor is to account for skipped blocks that 1209 * we missed because dnode_next_offset() misses in-core-only blocks. 1210 */ 1211 tx->tx_space_towrite = asize + 1212 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 1213 tx->tx_space_tofree = tofree; 1214 tx->tx_space_tooverwrite = tooverwrite; 1215 tx->tx_space_tounref = tounref; 1216#endif 1217 1218 if (tx->tx_dir && asize != 0) { 1219 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1220 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 1221 if (err) 1222 return (err); 1223 } 1224 1225 return (0); 1226} 1227 1228static void 1229dmu_tx_unassign(dmu_tx_t *tx) 1230{ 1231 dmu_tx_hold_t *txh; 1232 1233 if (tx->tx_txg == 0) 1234 return; 1235 1236 txg_rele_to_quiesce(&tx->tx_txgh); 1237 1238 /* 1239 * Walk the transaction's hold list, removing the hold on the 1240 * associated dnode, and notifying waiters if the refcount drops to 0. 1241 */ 1242 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 1243 txh = list_next(&tx->tx_holds, txh)) { 1244 dnode_t *dn = txh->txh_dnode; 1245 1246 if (dn == NULL) 1247 continue; 1248 mutex_enter(&dn->dn_mtx); 1249 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1250 1251 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1252 dn->dn_assigned_txg = 0; 1253 cv_broadcast(&dn->dn_notxholds); 1254 } 1255 mutex_exit(&dn->dn_mtx); 1256 } 1257 1258 txg_rele_to_sync(&tx->tx_txgh); 1259 1260 tx->tx_lasttried_txg = tx->tx_txg; 1261 tx->tx_txg = 0; 1262} 1263 1264/* 1265 * Assign tx to a transaction group. txg_how can be one of: 1266 * 1267 * (1) TXG_WAIT. If the current open txg is full, waits until there's 1268 * a new one. This should be used when you're not holding locks. 1269 * It will only fail if we're truly out of space (or over quota). 1270 * 1271 * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1272 * blocking, returns immediately with ERESTART. This should be used 1273 * whenever you're holding locks. On an ERESTART error, the caller 1274 * should drop locks, do a dmu_tx_wait(tx), and try again. 1275 * 1276 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 1277 * has already been called on behalf of this operation (though 1278 * most likely on a different tx). 1279 */ 1280int 1281dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1282{ 1283 int err; 1284 1285 ASSERT(tx->tx_txg == 0); 1286 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 1287 txg_how == TXG_WAITED); 1288 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1289 1290 /* If we might wait, we must not hold the config lock. */ 1291 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 1292 1293 if (txg_how == TXG_WAITED) 1294 tx->tx_waited = B_TRUE; 1295 1296 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1297 dmu_tx_unassign(tx); 1298 1299 if (err != ERESTART || txg_how != TXG_WAIT) 1300 return (err); 1301 1302 dmu_tx_wait(tx); 1303 } 1304 1305 txg_rele_to_quiesce(&tx->tx_txgh); 1306 1307 return (0); 1308} 1309 1310void 1311dmu_tx_wait(dmu_tx_t *tx) 1312{ 1313 spa_t *spa = tx->tx_pool->dp_spa; 1314 dsl_pool_t *dp = tx->tx_pool; 1315 1316 ASSERT(tx->tx_txg == 0); 1317 ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1318 1319 if (tx->tx_wait_dirty) { 1320 /* 1321 * dmu_tx_try_assign() has determined that we need to wait 1322 * because we've consumed much or all of the dirty buffer 1323 * space. 1324 */ 1325 mutex_enter(&dp->dp_lock); 1326 while (dp->dp_dirty_total >= zfs_dirty_data_max) 1327 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1328 uint64_t dirty = dp->dp_dirty_total; 1329 mutex_exit(&dp->dp_lock); 1330 1331 dmu_tx_delay(tx, dirty); 1332 1333 tx->tx_wait_dirty = B_FALSE; 1334 1335 /* 1336 * Note: setting tx_waited only has effect if the caller 1337 * used TX_WAIT. Otherwise they are going to destroy 1338 * this tx and try again. The common case, zfs_write(), 1339 * uses TX_WAIT. 1340 */ 1341 tx->tx_waited = B_TRUE; 1342 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1343 /* 1344 * If the pool is suspended we need to wait until it 1345 * is resumed. Note that it's possible that the pool 1346 * has become active after this thread has tried to 1347 * obtain a tx. If that's the case then tx_lasttried_txg 1348 * would not have been set. 1349 */ 1350 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1351 } else if (tx->tx_needassign_txh) { 1352 /* 1353 * A dnode is assigned to the quiescing txg. Wait for its 1354 * transaction to complete. 1355 */ 1356 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1357 1358 mutex_enter(&dn->dn_mtx); 1359 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1360 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1361 mutex_exit(&dn->dn_mtx); 1362 tx->tx_needassign_txh = NULL; 1363 } else { 1364 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1365 } 1366} 1367 1368void 1369dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1370{ 1371#ifdef ZFS_DEBUG 1372 if (tx->tx_dir == NULL || delta == 0) 1373 return; 1374 1375 if (delta > 0) { 1376 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1377 tx->tx_space_towrite); 1378 (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1379 } else { 1380 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1381 } 1382#endif 1383} 1384 1385void 1386dmu_tx_commit(dmu_tx_t *tx) 1387{ 1388 dmu_tx_hold_t *txh; 1389 1390 ASSERT(tx->tx_txg != 0); 1391 1392 /* 1393 * Go through the transaction's hold list and remove holds on 1394 * associated dnodes, notifying waiters if no holds remain. 1395 */ 1396 while (txh = list_head(&tx->tx_holds)) { 1397 dnode_t *dn = txh->txh_dnode; 1398 1399 list_remove(&tx->tx_holds, txh); 1400 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1401 if (dn == NULL) 1402 continue; 1403 mutex_enter(&dn->dn_mtx); 1404 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1405 1406 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1407 dn->dn_assigned_txg = 0; 1408 cv_broadcast(&dn->dn_notxholds); 1409 } 1410 mutex_exit(&dn->dn_mtx); 1411 dnode_rele(dn, tx); 1412 } 1413 1414 if (tx->tx_tempreserve_cookie) 1415 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1416 1417 if (!list_is_empty(&tx->tx_callbacks)) 1418 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1419 1420 if (tx->tx_anyobj == FALSE) 1421 txg_rele_to_sync(&tx->tx_txgh); 1422 1423 list_destroy(&tx->tx_callbacks); 1424 list_destroy(&tx->tx_holds); 1425#ifdef ZFS_DEBUG 1426 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1427 tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1428 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1429 refcount_destroy_many(&tx->tx_space_written, 1430 refcount_count(&tx->tx_space_written)); 1431 refcount_destroy_many(&tx->tx_space_freed, 1432 refcount_count(&tx->tx_space_freed)); 1433#endif 1434 kmem_free(tx, sizeof (dmu_tx_t)); 1435} 1436 1437void 1438dmu_tx_abort(dmu_tx_t *tx) 1439{ 1440 dmu_tx_hold_t *txh; 1441 1442 ASSERT(tx->tx_txg == 0); 1443 1444 while (txh = list_head(&tx->tx_holds)) { 1445 dnode_t *dn = txh->txh_dnode; 1446 1447 list_remove(&tx->tx_holds, txh); 1448 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1449 if (dn != NULL) 1450 dnode_rele(dn, tx); 1451 } 1452 1453 /* 1454 * Call any registered callbacks with an error code. 1455 */ 1456 if (!list_is_empty(&tx->tx_callbacks)) 1457 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1458 1459 list_destroy(&tx->tx_callbacks); 1460 list_destroy(&tx->tx_holds); 1461#ifdef ZFS_DEBUG 1462 refcount_destroy_many(&tx->tx_space_written, 1463 refcount_count(&tx->tx_space_written)); 1464 refcount_destroy_many(&tx->tx_space_freed, 1465 refcount_count(&tx->tx_space_freed)); 1466#endif 1467 kmem_free(tx, sizeof (dmu_tx_t)); 1468} 1469 1470uint64_t 1471dmu_tx_get_txg(dmu_tx_t *tx) 1472{ 1473 ASSERT(tx->tx_txg != 0); 1474 return (tx->tx_txg); 1475} 1476 1477dsl_pool_t * 1478dmu_tx_pool(dmu_tx_t *tx) 1479{ 1480 ASSERT(tx->tx_pool != NULL); 1481 return (tx->tx_pool); 1482} 1483 1484 1485void 1486dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1487{ 1488 dmu_tx_callback_t *dcb; 1489 1490 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1491 1492 dcb->dcb_func = func; 1493 dcb->dcb_data = data; 1494 1495 list_insert_tail(&tx->tx_callbacks, dcb); 1496} 1497 1498/* 1499 * Call all the commit callbacks on a list, with a given error code. 1500 */ 1501void 1502dmu_tx_do_callbacks(list_t *cb_list, int error) 1503{ 1504 dmu_tx_callback_t *dcb; 1505 1506 while (dcb = list_head(cb_list)) { 1507 list_remove(cb_list, dcb); 1508 dcb->dcb_func(dcb->dcb_data, error); 1509 kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1510 } 1511} 1512 1513/* 1514 * Interface to hold a bunch of attributes. 1515 * used for creating new files. 1516 * attrsize is the total size of all attributes 1517 * to be added during object creation 1518 * 1519 * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1520 */ 1521 1522/* 1523 * hold necessary attribute name for attribute registration. 1524 * should be a very rare case where this is needed. If it does 1525 * happen it would only happen on the first write to the file system. 1526 */ 1527static void 1528dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1529{ 1530 int i; 1531 1532 if (!sa->sa_need_attr_registration) 1533 return; 1534 1535 for (i = 0; i != sa->sa_num_attrs; i++) { 1536 if (!sa->sa_attr_table[i].sa_registered) { 1537 if (sa->sa_reg_attr_obj) 1538 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1539 B_TRUE, sa->sa_attr_table[i].sa_name); 1540 else 1541 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1542 B_TRUE, sa->sa_attr_table[i].sa_name); 1543 } 1544 } 1545} 1546 1547 1548void 1549dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1550{ 1551 dnode_t *dn; 1552 dmu_tx_hold_t *txh; 1553 1554 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1555 THT_SPILL, 0, 0); 1556 1557 dn = txh->txh_dnode; 1558 1559 if (dn == NULL) 1560 return; 1561 1562 /* If blkptr doesn't exist then add space to towrite */ 1563 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 1564 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1565 } else { 1566 blkptr_t *bp; 1567 1568 bp = &dn->dn_phys->dn_spill; 1569 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1570 bp, bp->blk_birth)) 1571 txh->txh_space_tooverwrite += SPA_OLD_MAXBLOCKSIZE; 1572 else 1573 txh->txh_space_towrite += SPA_OLD_MAXBLOCKSIZE; 1574 if (!BP_IS_HOLE(bp)) 1575 txh->txh_space_tounref += SPA_OLD_MAXBLOCKSIZE; 1576 } 1577} 1578 1579void 1580dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1581{ 1582 sa_os_t *sa = tx->tx_objset->os_sa; 1583 1584 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1585 1586 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1587 return; 1588 1589 if (tx->tx_objset->os_sa->sa_layout_attr_obj) 1590 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1591 else { 1592 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1593 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1594 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1595 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1596 } 1597 1598 dmu_tx_sa_registration_hold(sa, tx); 1599 1600 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 1601 return; 1602 1603 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1604 THT_SPILL, 0, 0); 1605} 1606 1607/* 1608 * Hold SA attribute 1609 * 1610 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1611 * 1612 * variable_size is the total size of all variable sized attributes 1613 * passed to this function. It is not the total size of all 1614 * variable size attributes that *may* exist on this object. 1615 */ 1616void 1617dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1618{ 1619 uint64_t object; 1620 sa_os_t *sa = tx->tx_objset->os_sa; 1621 1622 ASSERT(hdl != NULL); 1623 1624 object = sa_handle_object(hdl); 1625 1626 dmu_tx_hold_bonus(tx, object); 1627 1628 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1629 return; 1630 1631 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1632 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1633 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1634 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1635 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1636 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1637 } 1638 1639 dmu_tx_sa_registration_hold(sa, tx); 1640 1641 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1642 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1643 1644 if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1645 ASSERT(tx->tx_txg == 0); 1646 dmu_tx_hold_spill(tx, object); 1647 } else { 1648 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1649 dnode_t *dn; 1650 1651 DB_DNODE_ENTER(db); 1652 dn = DB_DNODE(db); 1653 if (dn->dn_have_spill) { 1654 ASSERT(tx->tx_txg == 0); 1655 dmu_tx_hold_spill(tx, object); 1656 } 1657 DB_DNODE_EXIT(db); 1658 } 1659} 1660