dmu_tx.c revision 260763
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 24 * Copyright (c) 2013 by Delphix. All rights reserved. 25 */ 26 27#include <sys/dmu.h> 28#include <sys/dmu_impl.h> 29#include <sys/dbuf.h> 30#include <sys/dmu_tx.h> 31#include <sys/dmu_objset.h> 32#include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */ 33#include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */ 34#include <sys/dsl_pool.h> 35#include <sys/zap_impl.h> /* for fzap_default_block_shift */ 36#include <sys/spa.h> 37#include <sys/sa.h> 38#include <sys/sa_impl.h> 39#include <sys/zfs_context.h> 40#include <sys/varargs.h> 41 42typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn, 43 uint64_t arg1, uint64_t arg2); 44 45 46dmu_tx_t * 47dmu_tx_create_dd(dsl_dir_t *dd) 48{ 49 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP); 50 tx->tx_dir = dd; 51 if (dd != NULL) 52 tx->tx_pool = dd->dd_pool; 53 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t), 54 offsetof(dmu_tx_hold_t, txh_node)); 55 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t), 56 offsetof(dmu_tx_callback_t, dcb_node)); 57 tx->tx_start = gethrtime(); 58#ifdef ZFS_DEBUG 59 refcount_create(&tx->tx_space_written); 60 refcount_create(&tx->tx_space_freed); 61#endif 62 return (tx); 63} 64 65dmu_tx_t * 66dmu_tx_create(objset_t *os) 67{ 68 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir); 69 tx->tx_objset = os; 70 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset); 71 return (tx); 72} 73 74dmu_tx_t * 75dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg) 76{ 77 dmu_tx_t *tx = dmu_tx_create_dd(NULL); 78 79 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg); 80 tx->tx_pool = dp; 81 tx->tx_txg = txg; 82 tx->tx_anyobj = TRUE; 83 84 return (tx); 85} 86 87int 88dmu_tx_is_syncing(dmu_tx_t *tx) 89{ 90 return (tx->tx_anyobj); 91} 92 93int 94dmu_tx_private_ok(dmu_tx_t *tx) 95{ 96 return (tx->tx_anyobj); 97} 98 99static dmu_tx_hold_t * 100dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object, 101 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2) 102{ 103 dmu_tx_hold_t *txh; 104 dnode_t *dn = NULL; 105 int err; 106 107 if (object != DMU_NEW_OBJECT) { 108 err = dnode_hold(os, object, tx, &dn); 109 if (err) { 110 tx->tx_err = err; 111 return (NULL); 112 } 113 114 if (err == 0 && tx->tx_txg != 0) { 115 mutex_enter(&dn->dn_mtx); 116 /* 117 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a 118 * problem, but there's no way for it to happen (for 119 * now, at least). 120 */ 121 ASSERT(dn->dn_assigned_txg == 0); 122 dn->dn_assigned_txg = tx->tx_txg; 123 (void) refcount_add(&dn->dn_tx_holds, tx); 124 mutex_exit(&dn->dn_mtx); 125 } 126 } 127 128 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP); 129 txh->txh_tx = tx; 130 txh->txh_dnode = dn; 131#ifdef ZFS_DEBUG 132 txh->txh_type = type; 133 txh->txh_arg1 = arg1; 134 txh->txh_arg2 = arg2; 135#endif 136 list_insert_tail(&tx->tx_holds, txh); 137 138 return (txh); 139} 140 141void 142dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object) 143{ 144 /* 145 * If we're syncing, they can manipulate any object anyhow, and 146 * the hold on the dnode_t can cause problems. 147 */ 148 if (!dmu_tx_is_syncing(tx)) { 149 (void) dmu_tx_hold_object_impl(tx, os, 150 object, THT_NEWOBJECT, 0, 0); 151 } 152} 153 154static int 155dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid) 156{ 157 int err; 158 dmu_buf_impl_t *db; 159 160 rw_enter(&dn->dn_struct_rwlock, RW_READER); 161 db = dbuf_hold_level(dn, level, blkid, FTAG); 162 rw_exit(&dn->dn_struct_rwlock); 163 if (db == NULL) 164 return (SET_ERROR(EIO)); 165 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH); 166 dbuf_rele(db, FTAG); 167 return (err); 168} 169 170static void 171dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db, 172 int level, uint64_t blkid, boolean_t freeable, uint64_t *history) 173{ 174 objset_t *os = dn->dn_objset; 175 dsl_dataset_t *ds = os->os_dsl_dataset; 176 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 177 dmu_buf_impl_t *parent = NULL; 178 blkptr_t *bp = NULL; 179 uint64_t space; 180 181 if (level >= dn->dn_nlevels || history[level] == blkid) 182 return; 183 184 history[level] = blkid; 185 186 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift); 187 188 if (db == NULL || db == dn->dn_dbuf) { 189 ASSERT(level != 0); 190 db = NULL; 191 } else { 192 ASSERT(DB_DNODE(db) == dn); 193 ASSERT(db->db_level == level); 194 ASSERT(db->db.db_size == space); 195 ASSERT(db->db_blkid == blkid); 196 bp = db->db_blkptr; 197 parent = db->db_parent; 198 } 199 200 freeable = (bp && (freeable || 201 dsl_dataset_block_freeable(ds, bp, bp->blk_birth))); 202 203 if (freeable) 204 txh->txh_space_tooverwrite += space; 205 else 206 txh->txh_space_towrite += space; 207 if (bp) 208 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp); 209 210 dmu_tx_count_twig(txh, dn, parent, level + 1, 211 blkid >> epbs, freeable, history); 212} 213 214/* ARGSUSED */ 215static void 216dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 217{ 218 dnode_t *dn = txh->txh_dnode; 219 uint64_t start, end, i; 220 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits; 221 int err = 0; 222 223 if (len == 0) 224 return; 225 226 min_bs = SPA_MINBLOCKSHIFT; 227 max_bs = SPA_MAXBLOCKSHIFT; 228 min_ibs = DN_MIN_INDBLKSHIFT; 229 max_ibs = DN_MAX_INDBLKSHIFT; 230 231 if (dn) { 232 uint64_t history[DN_MAX_LEVELS]; 233 int nlvls = dn->dn_nlevels; 234 int delta; 235 236 /* 237 * For i/o error checking, read the first and last level-0 238 * blocks (if they are not aligned), and all the level-1 blocks. 239 */ 240 if (dn->dn_maxblkid == 0) { 241 delta = dn->dn_datablksz; 242 start = (off < dn->dn_datablksz) ? 0 : 1; 243 end = (off+len <= dn->dn_datablksz) ? 0 : 1; 244 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) { 245 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 246 if (err) 247 goto out; 248 delta -= off; 249 } 250 } else { 251 zio_t *zio = zio_root(dn->dn_objset->os_spa, 252 NULL, NULL, ZIO_FLAG_CANFAIL); 253 254 /* first level-0 block */ 255 start = off >> dn->dn_datablkshift; 256 if (P2PHASE(off, dn->dn_datablksz) || 257 len < dn->dn_datablksz) { 258 err = dmu_tx_check_ioerr(zio, dn, 0, start); 259 if (err) 260 goto out; 261 } 262 263 /* last level-0 block */ 264 end = (off+len-1) >> dn->dn_datablkshift; 265 if (end != start && end <= dn->dn_maxblkid && 266 P2PHASE(off+len, dn->dn_datablksz)) { 267 err = dmu_tx_check_ioerr(zio, dn, 0, end); 268 if (err) 269 goto out; 270 } 271 272 /* level-1 blocks */ 273 if (nlvls > 1) { 274 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 275 for (i = (start>>shft)+1; i < end>>shft; i++) { 276 err = dmu_tx_check_ioerr(zio, dn, 1, i); 277 if (err) 278 goto out; 279 } 280 } 281 282 err = zio_wait(zio); 283 if (err) 284 goto out; 285 delta = P2NPHASE(off, dn->dn_datablksz); 286 } 287 288 min_ibs = max_ibs = dn->dn_indblkshift; 289 if (dn->dn_maxblkid > 0) { 290 /* 291 * The blocksize can't change, 292 * so we can make a more precise estimate. 293 */ 294 ASSERT(dn->dn_datablkshift != 0); 295 min_bs = max_bs = dn->dn_datablkshift; 296 } 297 298 /* 299 * If this write is not off the end of the file 300 * we need to account for overwrites/unref. 301 */ 302 if (start <= dn->dn_maxblkid) { 303 for (int l = 0; l < DN_MAX_LEVELS; l++) 304 history[l] = -1ULL; 305 } 306 while (start <= dn->dn_maxblkid) { 307 dmu_buf_impl_t *db; 308 309 rw_enter(&dn->dn_struct_rwlock, RW_READER); 310 err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db); 311 rw_exit(&dn->dn_struct_rwlock); 312 313 if (err) { 314 txh->txh_tx->tx_err = err; 315 return; 316 } 317 318 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE, 319 history); 320 dbuf_rele(db, FTAG); 321 if (++start > end) { 322 /* 323 * Account for new indirects appearing 324 * before this IO gets assigned into a txg. 325 */ 326 bits = 64 - min_bs; 327 epbs = min_ibs - SPA_BLKPTRSHIFT; 328 for (bits -= epbs * (nlvls - 1); 329 bits >= 0; bits -= epbs) 330 txh->txh_fudge += 1ULL << max_ibs; 331 goto out; 332 } 333 off += delta; 334 if (len >= delta) 335 len -= delta; 336 delta = dn->dn_datablksz; 337 } 338 } 339 340 /* 341 * 'end' is the last thing we will access, not one past. 342 * This way we won't overflow when accessing the last byte. 343 */ 344 start = P2ALIGN(off, 1ULL << max_bs); 345 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1; 346 txh->txh_space_towrite += end - start + 1; 347 348 start >>= min_bs; 349 end >>= min_bs; 350 351 epbs = min_ibs - SPA_BLKPTRSHIFT; 352 353 /* 354 * The object contains at most 2^(64 - min_bs) blocks, 355 * and each indirect level maps 2^epbs. 356 */ 357 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) { 358 start >>= epbs; 359 end >>= epbs; 360 ASSERT3U(end, >=, start); 361 txh->txh_space_towrite += (end - start + 1) << max_ibs; 362 if (start != 0) { 363 /* 364 * We also need a new blkid=0 indirect block 365 * to reference any existing file data. 366 */ 367 txh->txh_space_towrite += 1ULL << max_ibs; 368 } 369 } 370 371out: 372 if (txh->txh_space_towrite + txh->txh_space_tooverwrite > 373 2 * DMU_MAX_ACCESS) 374 err = SET_ERROR(EFBIG); 375 376 if (err) 377 txh->txh_tx->tx_err = err; 378} 379 380static void 381dmu_tx_count_dnode(dmu_tx_hold_t *txh) 382{ 383 dnode_t *dn = txh->txh_dnode; 384 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset); 385 uint64_t space = mdn->dn_datablksz + 386 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift); 387 388 if (dn && dn->dn_dbuf->db_blkptr && 389 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 390 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) { 391 txh->txh_space_tooverwrite += space; 392 txh->txh_space_tounref += space; 393 } else { 394 txh->txh_space_towrite += space; 395 if (dn && dn->dn_dbuf->db_blkptr) 396 txh->txh_space_tounref += space; 397 } 398} 399 400void 401dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len) 402{ 403 dmu_tx_hold_t *txh; 404 405 ASSERT(tx->tx_txg == 0); 406 ASSERT(len < DMU_MAX_ACCESS); 407 ASSERT(len == 0 || UINT64_MAX - off >= len - 1); 408 409 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 410 object, THT_WRITE, off, len); 411 if (txh == NULL) 412 return; 413 414 dmu_tx_count_write(txh, off, len); 415 dmu_tx_count_dnode(txh); 416} 417 418static void 419dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len) 420{ 421 uint64_t blkid, nblks, lastblk; 422 uint64_t space = 0, unref = 0, skipped = 0; 423 dnode_t *dn = txh->txh_dnode; 424 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset; 425 spa_t *spa = txh->txh_tx->tx_pool->dp_spa; 426 int epbs; 427 uint64_t l0span = 0, nl1blks = 0; 428 429 if (dn->dn_nlevels == 0) 430 return; 431 432 /* 433 * The struct_rwlock protects us against dn_nlevels 434 * changing, in case (against all odds) we manage to dirty & 435 * sync out the changes after we check for being dirty. 436 * Also, dbuf_hold_impl() wants us to have the struct_rwlock. 437 */ 438 rw_enter(&dn->dn_struct_rwlock, RW_READER); 439 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 440 if (dn->dn_maxblkid == 0) { 441 if (off == 0 && len >= dn->dn_datablksz) { 442 blkid = 0; 443 nblks = 1; 444 } else { 445 rw_exit(&dn->dn_struct_rwlock); 446 return; 447 } 448 } else { 449 blkid = off >> dn->dn_datablkshift; 450 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift; 451 452 if (blkid > dn->dn_maxblkid) { 453 rw_exit(&dn->dn_struct_rwlock); 454 return; 455 } 456 if (blkid + nblks > dn->dn_maxblkid) 457 nblks = dn->dn_maxblkid - blkid + 1; 458 459 } 460 l0span = nblks; /* save for later use to calc level > 1 overhead */ 461 if (dn->dn_nlevels == 1) { 462 int i; 463 for (i = 0; i < nblks; i++) { 464 blkptr_t *bp = dn->dn_phys->dn_blkptr; 465 ASSERT3U(blkid + i, <, dn->dn_nblkptr); 466 bp += blkid + i; 467 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) { 468 dprintf_bp(bp, "can free old%s", ""); 469 space += bp_get_dsize(spa, bp); 470 } 471 unref += BP_GET_ASIZE(bp); 472 } 473 nl1blks = 1; 474 nblks = 0; 475 } 476 477 lastblk = blkid + nblks - 1; 478 while (nblks) { 479 dmu_buf_impl_t *dbuf; 480 uint64_t ibyte, new_blkid; 481 int epb = 1 << epbs; 482 int err, i, blkoff, tochk; 483 blkptr_t *bp; 484 485 ibyte = blkid << dn->dn_datablkshift; 486 err = dnode_next_offset(dn, 487 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0); 488 new_blkid = ibyte >> dn->dn_datablkshift; 489 if (err == ESRCH) { 490 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 491 break; 492 } 493 if (err) { 494 txh->txh_tx->tx_err = err; 495 break; 496 } 497 if (new_blkid > lastblk) { 498 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1; 499 break; 500 } 501 502 if (new_blkid > blkid) { 503 ASSERT((new_blkid >> epbs) > (blkid >> epbs)); 504 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1; 505 nblks -= new_blkid - blkid; 506 blkid = new_blkid; 507 } 508 blkoff = P2PHASE(blkid, epb); 509 tochk = MIN(epb - blkoff, nblks); 510 511 err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf); 512 if (err) { 513 txh->txh_tx->tx_err = err; 514 break; 515 } 516 517 txh->txh_memory_tohold += dbuf->db.db_size; 518 519 /* 520 * We don't check memory_tohold against DMU_MAX_ACCESS because 521 * memory_tohold is an over-estimation (especially the >L1 522 * indirect blocks), so it could fail. Callers should have 523 * already verified that they will not be holding too much 524 * memory. 525 */ 526 527 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL); 528 if (err != 0) { 529 txh->txh_tx->tx_err = err; 530 dbuf_rele(dbuf, FTAG); 531 break; 532 } 533 534 bp = dbuf->db.db_data; 535 bp += blkoff; 536 537 for (i = 0; i < tochk; i++) { 538 if (dsl_dataset_block_freeable(ds, &bp[i], 539 bp[i].blk_birth)) { 540 dprintf_bp(&bp[i], "can free old%s", ""); 541 space += bp_get_dsize(spa, &bp[i]); 542 } 543 unref += BP_GET_ASIZE(bp); 544 } 545 dbuf_rele(dbuf, FTAG); 546 547 ++nl1blks; 548 blkid += tochk; 549 nblks -= tochk; 550 } 551 rw_exit(&dn->dn_struct_rwlock); 552 553 /* 554 * Add in memory requirements of higher-level indirects. 555 * This assumes a worst-possible scenario for dn_nlevels and a 556 * worst-possible distribution of l1-blocks over the region to free. 557 */ 558 { 559 uint64_t blkcnt = 1 + ((l0span >> epbs) >> epbs); 560 int level = 2; 561 /* 562 * Here we don't use DN_MAX_LEVEL, but calculate it with the 563 * given datablkshift and indblkshift. This makes the 564 * difference between 19 and 8 on large files. 565 */ 566 int maxlevel = 2 + (DN_MAX_OFFSET_SHIFT - dn->dn_datablkshift) / 567 (dn->dn_indblkshift - SPA_BLKPTRSHIFT); 568 569 while (level++ < maxlevel) { 570 txh->txh_memory_tohold += MAX(MIN(blkcnt, nl1blks), 1) 571 << dn->dn_indblkshift; 572 blkcnt = 1 + (blkcnt >> epbs); 573 } 574 } 575 576 /* account for new level 1 indirect blocks that might show up */ 577 if (skipped > 0) { 578 txh->txh_fudge += skipped << dn->dn_indblkshift; 579 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs); 580 txh->txh_memory_tohold += skipped << dn->dn_indblkshift; 581 } 582 txh->txh_space_tofree += space; 583 txh->txh_space_tounref += unref; 584} 585 586void 587dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len) 588{ 589 dmu_tx_hold_t *txh; 590 dnode_t *dn; 591 int err; 592 zio_t *zio; 593 594 ASSERT(tx->tx_txg == 0); 595 596 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 597 object, THT_FREE, off, len); 598 if (txh == NULL) 599 return; 600 dn = txh->txh_dnode; 601 dmu_tx_count_dnode(txh); 602 603 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz) 604 return; 605 if (len == DMU_OBJECT_END) 606 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off; 607 608 609 /* 610 * For i/o error checking, we read the first and last level-0 611 * blocks if they are not aligned, and all the level-1 blocks. 612 * 613 * Note: dbuf_free_range() assumes that we have not instantiated 614 * any level-0 dbufs that will be completely freed. Therefore we must 615 * exercise care to not read or count the first and last blocks 616 * if they are blocksize-aligned. 617 */ 618 if (dn->dn_datablkshift == 0) { 619 if (off != 0 || len < dn->dn_datablksz) 620 dmu_tx_count_write(txh, 0, dn->dn_datablksz); 621 } else { 622 /* first block will be modified if it is not aligned */ 623 if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift)) 624 dmu_tx_count_write(txh, off, 1); 625 /* last block will be modified if it is not aligned */ 626 if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift)) 627 dmu_tx_count_write(txh, off+len, 1); 628 } 629 630 /* 631 * Check level-1 blocks. 632 */ 633 if (dn->dn_nlevels > 1) { 634 int shift = dn->dn_datablkshift + dn->dn_indblkshift - 635 SPA_BLKPTRSHIFT; 636 uint64_t start = off >> shift; 637 uint64_t end = (off + len) >> shift; 638 639 ASSERT(dn->dn_indblkshift != 0); 640 641 /* 642 * dnode_reallocate() can result in an object with indirect 643 * blocks having an odd data block size. In this case, 644 * just check the single block. 645 */ 646 if (dn->dn_datablkshift == 0) 647 start = end = 0; 648 649 zio = zio_root(tx->tx_pool->dp_spa, 650 NULL, NULL, ZIO_FLAG_CANFAIL); 651 for (uint64_t i = start; i <= end; i++) { 652 uint64_t ibyte = i << shift; 653 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0); 654 i = ibyte >> shift; 655 if (err == ESRCH) 656 break; 657 if (err) { 658 tx->tx_err = err; 659 return; 660 } 661 662 err = dmu_tx_check_ioerr(zio, dn, 1, i); 663 if (err) { 664 tx->tx_err = err; 665 return; 666 } 667 } 668 err = zio_wait(zio); 669 if (err) { 670 tx->tx_err = err; 671 return; 672 } 673 } 674 675 dmu_tx_count_free(txh, off, len); 676} 677 678void 679dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name) 680{ 681 dmu_tx_hold_t *txh; 682 dnode_t *dn; 683 uint64_t nblocks; 684 int epbs, err; 685 686 ASSERT(tx->tx_txg == 0); 687 688 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 689 object, THT_ZAP, add, (uintptr_t)name); 690 if (txh == NULL) 691 return; 692 dn = txh->txh_dnode; 693 694 dmu_tx_count_dnode(txh); 695 696 if (dn == NULL) { 697 /* 698 * We will be able to fit a new object's entries into one leaf 699 * block. So there will be at most 2 blocks total, 700 * including the header block. 701 */ 702 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift); 703 return; 704 } 705 706 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP); 707 708 if (dn->dn_maxblkid == 0 && !add) { 709 blkptr_t *bp; 710 711 /* 712 * If there is only one block (i.e. this is a micro-zap) 713 * and we are not adding anything, the accounting is simple. 714 */ 715 err = dmu_tx_check_ioerr(NULL, dn, 0, 0); 716 if (err) { 717 tx->tx_err = err; 718 return; 719 } 720 721 /* 722 * Use max block size here, since we don't know how much 723 * the size will change between now and the dbuf dirty call. 724 */ 725 bp = &dn->dn_phys->dn_blkptr[0]; 726 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 727 bp, bp->blk_birth)) 728 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; 729 else 730 txh->txh_space_towrite += SPA_MAXBLOCKSIZE; 731 if (!BP_IS_HOLE(bp)) 732 txh->txh_space_tounref += SPA_MAXBLOCKSIZE; 733 return; 734 } 735 736 if (dn->dn_maxblkid > 0 && name) { 737 /* 738 * access the name in this fat-zap so that we'll check 739 * for i/o errors to the leaf blocks, etc. 740 */ 741 err = zap_lookup(dn->dn_objset, dn->dn_object, name, 742 8, 0, NULL); 743 if (err == EIO) { 744 tx->tx_err = err; 745 return; 746 } 747 } 748 749 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add, 750 &txh->txh_space_towrite, &txh->txh_space_tooverwrite); 751 752 /* 753 * If the modified blocks are scattered to the four winds, 754 * we'll have to modify an indirect twig for each. 755 */ 756 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 757 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs) 758 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj) 759 txh->txh_space_towrite += 3 << dn->dn_indblkshift; 760 else 761 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift; 762} 763 764void 765dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object) 766{ 767 dmu_tx_hold_t *txh; 768 769 ASSERT(tx->tx_txg == 0); 770 771 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 772 object, THT_BONUS, 0, 0); 773 if (txh) 774 dmu_tx_count_dnode(txh); 775} 776 777void 778dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space) 779{ 780 dmu_tx_hold_t *txh; 781 ASSERT(tx->tx_txg == 0); 782 783 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, 784 DMU_NEW_OBJECT, THT_SPACE, space, 0); 785 786 txh->txh_space_towrite += space; 787} 788 789int 790dmu_tx_holds(dmu_tx_t *tx, uint64_t object) 791{ 792 dmu_tx_hold_t *txh; 793 int holds = 0; 794 795 /* 796 * By asserting that the tx is assigned, we're counting the 797 * number of dn_tx_holds, which is the same as the number of 798 * dn_holds. Otherwise, we'd be counting dn_holds, but 799 * dn_tx_holds could be 0. 800 */ 801 ASSERT(tx->tx_txg != 0); 802 803 /* if (tx->tx_anyobj == TRUE) */ 804 /* return (0); */ 805 806 for (txh = list_head(&tx->tx_holds); txh; 807 txh = list_next(&tx->tx_holds, txh)) { 808 if (txh->txh_dnode && txh->txh_dnode->dn_object == object) 809 holds++; 810 } 811 812 return (holds); 813} 814 815#ifdef ZFS_DEBUG 816void 817dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db) 818{ 819 dmu_tx_hold_t *txh; 820 int match_object = FALSE, match_offset = FALSE; 821 dnode_t *dn; 822 823 DB_DNODE_ENTER(db); 824 dn = DB_DNODE(db); 825 ASSERT(tx->tx_txg != 0); 826 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset); 827 ASSERT3U(dn->dn_object, ==, db->db.db_object); 828 829 if (tx->tx_anyobj) { 830 DB_DNODE_EXIT(db); 831 return; 832 } 833 834 /* XXX No checking on the meta dnode for now */ 835 if (db->db.db_object == DMU_META_DNODE_OBJECT) { 836 DB_DNODE_EXIT(db); 837 return; 838 } 839 840 for (txh = list_head(&tx->tx_holds); txh; 841 txh = list_next(&tx->tx_holds, txh)) { 842 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg); 843 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT) 844 match_object = TRUE; 845 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) { 846 int datablkshift = dn->dn_datablkshift ? 847 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT; 848 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT; 849 int shift = datablkshift + epbs * db->db_level; 850 uint64_t beginblk = shift >= 64 ? 0 : 851 (txh->txh_arg1 >> shift); 852 uint64_t endblk = shift >= 64 ? 0 : 853 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift); 854 uint64_t blkid = db->db_blkid; 855 856 /* XXX txh_arg2 better not be zero... */ 857 858 dprintf("found txh type %x beginblk=%llx endblk=%llx\n", 859 txh->txh_type, beginblk, endblk); 860 861 switch (txh->txh_type) { 862 case THT_WRITE: 863 if (blkid >= beginblk && blkid <= endblk) 864 match_offset = TRUE; 865 /* 866 * We will let this hold work for the bonus 867 * or spill buffer so that we don't need to 868 * hold it when creating a new object. 869 */ 870 if (blkid == DMU_BONUS_BLKID || 871 blkid == DMU_SPILL_BLKID) 872 match_offset = TRUE; 873 /* 874 * They might have to increase nlevels, 875 * thus dirtying the new TLIBs. Or the 876 * might have to change the block size, 877 * thus dirying the new lvl=0 blk=0. 878 */ 879 if (blkid == 0) 880 match_offset = TRUE; 881 break; 882 case THT_FREE: 883 /* 884 * We will dirty all the level 1 blocks in 885 * the free range and perhaps the first and 886 * last level 0 block. 887 */ 888 if (blkid >= beginblk && (blkid <= endblk || 889 txh->txh_arg2 == DMU_OBJECT_END)) 890 match_offset = TRUE; 891 break; 892 case THT_SPILL: 893 if (blkid == DMU_SPILL_BLKID) 894 match_offset = TRUE; 895 break; 896 case THT_BONUS: 897 if (blkid == DMU_BONUS_BLKID) 898 match_offset = TRUE; 899 break; 900 case THT_ZAP: 901 match_offset = TRUE; 902 break; 903 case THT_NEWOBJECT: 904 match_object = TRUE; 905 break; 906 default: 907 ASSERT(!"bad txh_type"); 908 } 909 } 910 if (match_object && match_offset) { 911 DB_DNODE_EXIT(db); 912 return; 913 } 914 } 915 DB_DNODE_EXIT(db); 916 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n", 917 (u_longlong_t)db->db.db_object, db->db_level, 918 (u_longlong_t)db->db_blkid); 919} 920#endif 921 922/* 923 * If we can't do 10 iops, something is wrong. Let us go ahead 924 * and hit zfs_dirty_data_max. 925 */ 926hrtime_t zfs_delay_max_ns = MSEC2NSEC(100); 927int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */ 928 929/* 930 * We delay transactions when we've determined that the backend storage 931 * isn't able to accommodate the rate of incoming writes. 932 * 933 * If there is already a transaction waiting, we delay relative to when 934 * that transaction finishes waiting. This way the calculated min_time 935 * is independent of the number of threads concurrently executing 936 * transactions. 937 * 938 * If we are the only waiter, wait relative to when the transaction 939 * started, rather than the current time. This credits the transaction for 940 * "time already served", e.g. reading indirect blocks. 941 * 942 * The minimum time for a transaction to take is calculated as: 943 * min_time = scale * (dirty - min) / (max - dirty) 944 * min_time is then capped at zfs_delay_max_ns. 945 * 946 * The delay has two degrees of freedom that can be adjusted via tunables. 947 * The percentage of dirty data at which we start to delay is defined by 948 * zfs_delay_min_dirty_percent. This should typically be at or above 949 * zfs_vdev_async_write_active_max_dirty_percent so that we only start to 950 * delay after writing at full speed has failed to keep up with the incoming 951 * write rate. The scale of the curve is defined by zfs_delay_scale. Roughly 952 * speaking, this variable determines the amount of delay at the midpoint of 953 * the curve. 954 * 955 * delay 956 * 10ms +-------------------------------------------------------------*+ 957 * | *| 958 * 9ms + *+ 959 * | *| 960 * 8ms + *+ 961 * | * | 962 * 7ms + * + 963 * | * | 964 * 6ms + * + 965 * | * | 966 * 5ms + * + 967 * | * | 968 * 4ms + * + 969 * | * | 970 * 3ms + * + 971 * | * | 972 * 2ms + (midpoint) * + 973 * | | ** | 974 * 1ms + v *** + 975 * | zfs_delay_scale ----------> ******** | 976 * 0 +-------------------------------------*********----------------+ 977 * 0% <- zfs_dirty_data_max -> 100% 978 * 979 * Note that since the delay is added to the outstanding time remaining on the 980 * most recent transaction, the delay is effectively the inverse of IOPS. 981 * Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve 982 * was chosen such that small changes in the amount of accumulated dirty data 983 * in the first 3/4 of the curve yield relatively small differences in the 984 * amount of delay. 985 * 986 * The effects can be easier to understand when the amount of delay is 987 * represented on a log scale: 988 * 989 * delay 990 * 100ms +-------------------------------------------------------------++ 991 * + + 992 * | | 993 * + *+ 994 * 10ms + *+ 995 * + ** + 996 * | (midpoint) ** | 997 * + | ** + 998 * 1ms + v **** + 999 * + zfs_delay_scale ----------> ***** + 1000 * | **** | 1001 * + **** + 1002 * 100us + ** + 1003 * + * + 1004 * | * | 1005 * + * + 1006 * 10us + * + 1007 * + + 1008 * | | 1009 * + + 1010 * +--------------------------------------------------------------+ 1011 * 0% <- zfs_dirty_data_max -> 100% 1012 * 1013 * Note here that only as the amount of dirty data approaches its limit does 1014 * the delay start to increase rapidly. The goal of a properly tuned system 1015 * should be to keep the amount of dirty data out of that range by first 1016 * ensuring that the appropriate limits are set for the I/O scheduler to reach 1017 * optimal throughput on the backend storage, and then by changing the value 1018 * of zfs_delay_scale to increase the steepness of the curve. 1019 */ 1020static void 1021dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty) 1022{ 1023 dsl_pool_t *dp = tx->tx_pool; 1024 uint64_t delay_min_bytes = 1025 zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100; 1026 hrtime_t wakeup, min_tx_time, now; 1027 1028 if (dirty <= delay_min_bytes) 1029 return; 1030 1031 /* 1032 * The caller has already waited until we are under the max. 1033 * We make them pass us the amount of dirty data so we don't 1034 * have to handle the case of it being >= the max, which could 1035 * cause a divide-by-zero if it's == the max. 1036 */ 1037 ASSERT3U(dirty, <, zfs_dirty_data_max); 1038 1039 now = gethrtime(); 1040 min_tx_time = zfs_delay_scale * 1041 (dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty); 1042 if (now > tx->tx_start + min_tx_time) 1043 return; 1044 1045 min_tx_time = MIN(min_tx_time, zfs_delay_max_ns); 1046 1047 DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty, 1048 uint64_t, min_tx_time); 1049 1050 mutex_enter(&dp->dp_lock); 1051 wakeup = MAX(tx->tx_start + min_tx_time, 1052 dp->dp_last_wakeup + min_tx_time); 1053 dp->dp_last_wakeup = wakeup; 1054 mutex_exit(&dp->dp_lock); 1055 1056#ifdef _KERNEL 1057#ifdef illumos 1058 mutex_enter(&curthread->t_delay_lock); 1059 while (cv_timedwait_hires(&curthread->t_delay_cv, 1060 &curthread->t_delay_lock, wakeup, zfs_delay_resolution_ns, 1061 CALLOUT_FLAG_ABSOLUTE | CALLOUT_FLAG_ROUNDUP) > 0) 1062 continue; 1063 mutex_exit(&curthread->t_delay_lock); 1064#else 1065 pause_sbt("dmu_tx_delay", wakeup * SBT_1NS, 1066 zfs_delay_resolution_ns * SBT_1NS, C_ABSOLUTE); 1067#endif 1068#else 1069 hrtime_t delta = wakeup - gethrtime(); 1070 struct timespec ts; 1071 ts.tv_sec = delta / NANOSEC; 1072 ts.tv_nsec = delta % NANOSEC; 1073 (void) nanosleep(&ts, NULL); 1074#endif 1075} 1076 1077static int 1078dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how) 1079{ 1080 dmu_tx_hold_t *txh; 1081 spa_t *spa = tx->tx_pool->dp_spa; 1082 uint64_t memory, asize, fsize, usize; 1083 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge; 1084 1085 ASSERT0(tx->tx_txg); 1086 1087 if (tx->tx_err) 1088 return (tx->tx_err); 1089 1090 if (spa_suspended(spa)) { 1091 /* 1092 * If the user has indicated a blocking failure mode 1093 * then return ERESTART which will block in dmu_tx_wait(). 1094 * Otherwise, return EIO so that an error can get 1095 * propagated back to the VOP calls. 1096 * 1097 * Note that we always honor the txg_how flag regardless 1098 * of the failuremode setting. 1099 */ 1100 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && 1101 txg_how != TXG_WAIT) 1102 return (SET_ERROR(EIO)); 1103 1104 return (SET_ERROR(ERESTART)); 1105 } 1106 1107 if (!tx->tx_waited && 1108 dsl_pool_need_dirty_delay(tx->tx_pool)) { 1109 tx->tx_wait_dirty = B_TRUE; 1110 return (SET_ERROR(ERESTART)); 1111 } 1112 1113 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh); 1114 tx->tx_needassign_txh = NULL; 1115 1116 /* 1117 * NB: No error returns are allowed after txg_hold_open, but 1118 * before processing the dnode holds, due to the 1119 * dmu_tx_unassign() logic. 1120 */ 1121 1122 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0; 1123 for (txh = list_head(&tx->tx_holds); txh; 1124 txh = list_next(&tx->tx_holds, txh)) { 1125 dnode_t *dn = txh->txh_dnode; 1126 if (dn != NULL) { 1127 mutex_enter(&dn->dn_mtx); 1128 if (dn->dn_assigned_txg == tx->tx_txg - 1) { 1129 mutex_exit(&dn->dn_mtx); 1130 tx->tx_needassign_txh = txh; 1131 return (SET_ERROR(ERESTART)); 1132 } 1133 if (dn->dn_assigned_txg == 0) 1134 dn->dn_assigned_txg = tx->tx_txg; 1135 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1136 (void) refcount_add(&dn->dn_tx_holds, tx); 1137 mutex_exit(&dn->dn_mtx); 1138 } 1139 towrite += txh->txh_space_towrite; 1140 tofree += txh->txh_space_tofree; 1141 tooverwrite += txh->txh_space_tooverwrite; 1142 tounref += txh->txh_space_tounref; 1143 tohold += txh->txh_memory_tohold; 1144 fudge += txh->txh_fudge; 1145 } 1146 1147 /* 1148 * If a snapshot has been taken since we made our estimates, 1149 * assume that we won't be able to free or overwrite anything. 1150 */ 1151 if (tx->tx_objset && 1152 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) > 1153 tx->tx_lastsnap_txg) { 1154 towrite += tooverwrite; 1155 tooverwrite = tofree = 0; 1156 } 1157 1158 /* needed allocation: worst-case estimate of write space */ 1159 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite); 1160 /* freed space estimate: worst-case overwrite + free estimate */ 1161 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree; 1162 /* convert unrefd space to worst-case estimate */ 1163 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref); 1164 /* calculate memory footprint estimate */ 1165 memory = towrite + tooverwrite + tohold; 1166 1167#ifdef ZFS_DEBUG 1168 /* 1169 * Add in 'tohold' to account for our dirty holds on this memory 1170 * XXX - the "fudge" factor is to account for skipped blocks that 1171 * we missed because dnode_next_offset() misses in-core-only blocks. 1172 */ 1173 tx->tx_space_towrite = asize + 1174 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge); 1175 tx->tx_space_tofree = tofree; 1176 tx->tx_space_tooverwrite = tooverwrite; 1177 tx->tx_space_tounref = tounref; 1178#endif 1179 1180 if (tx->tx_dir && asize != 0) { 1181 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory, 1182 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx); 1183 if (err) 1184 return (err); 1185 } 1186 1187 return (0); 1188} 1189 1190static void 1191dmu_tx_unassign(dmu_tx_t *tx) 1192{ 1193 dmu_tx_hold_t *txh; 1194 1195 if (tx->tx_txg == 0) 1196 return; 1197 1198 txg_rele_to_quiesce(&tx->tx_txgh); 1199 1200 /* 1201 * Walk the transaction's hold list, removing the hold on the 1202 * associated dnode, and notifying waiters if the refcount drops to 0. 1203 */ 1204 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh; 1205 txh = list_next(&tx->tx_holds, txh)) { 1206 dnode_t *dn = txh->txh_dnode; 1207 1208 if (dn == NULL) 1209 continue; 1210 mutex_enter(&dn->dn_mtx); 1211 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1212 1213 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1214 dn->dn_assigned_txg = 0; 1215 cv_broadcast(&dn->dn_notxholds); 1216 } 1217 mutex_exit(&dn->dn_mtx); 1218 } 1219 1220 txg_rele_to_sync(&tx->tx_txgh); 1221 1222 tx->tx_lasttried_txg = tx->tx_txg; 1223 tx->tx_txg = 0; 1224} 1225 1226/* 1227 * Assign tx to a transaction group. txg_how can be one of: 1228 * 1229 * (1) TXG_WAIT. If the current open txg is full, waits until there's 1230 * a new one. This should be used when you're not holding locks. 1231 * It will only fail if we're truly out of space (or over quota). 1232 * 1233 * (2) TXG_NOWAIT. If we can't assign into the current open txg without 1234 * blocking, returns immediately with ERESTART. This should be used 1235 * whenever you're holding locks. On an ERESTART error, the caller 1236 * should drop locks, do a dmu_tx_wait(tx), and try again. 1237 * 1238 * (3) TXG_WAITED. Like TXG_NOWAIT, but indicates that dmu_tx_wait() 1239 * has already been called on behalf of this operation (though 1240 * most likely on a different tx). 1241 */ 1242int 1243dmu_tx_assign(dmu_tx_t *tx, txg_how_t txg_how) 1244{ 1245 int err; 1246 1247 ASSERT(tx->tx_txg == 0); 1248 ASSERT(txg_how == TXG_WAIT || txg_how == TXG_NOWAIT || 1249 txg_how == TXG_WAITED); 1250 ASSERT(!dsl_pool_sync_context(tx->tx_pool)); 1251 1252 /* If we might wait, we must not hold the config lock. */ 1253 ASSERT(txg_how != TXG_WAIT || !dsl_pool_config_held(tx->tx_pool)); 1254 1255 if (txg_how == TXG_WAITED) 1256 tx->tx_waited = B_TRUE; 1257 1258 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { 1259 dmu_tx_unassign(tx); 1260 1261 if (err != ERESTART || txg_how != TXG_WAIT) 1262 return (err); 1263 1264 dmu_tx_wait(tx); 1265 } 1266 1267 txg_rele_to_quiesce(&tx->tx_txgh); 1268 1269 return (0); 1270} 1271 1272void 1273dmu_tx_wait(dmu_tx_t *tx) 1274{ 1275 spa_t *spa = tx->tx_pool->dp_spa; 1276 dsl_pool_t *dp = tx->tx_pool; 1277 1278 ASSERT(tx->tx_txg == 0); 1279 ASSERT(!dsl_pool_config_held(tx->tx_pool)); 1280 1281 if (tx->tx_wait_dirty) { 1282 /* 1283 * dmu_tx_try_assign() has determined that we need to wait 1284 * because we've consumed much or all of the dirty buffer 1285 * space. 1286 */ 1287 mutex_enter(&dp->dp_lock); 1288 while (dp->dp_dirty_total >= zfs_dirty_data_max) 1289 cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock); 1290 uint64_t dirty = dp->dp_dirty_total; 1291 mutex_exit(&dp->dp_lock); 1292 1293 dmu_tx_delay(tx, dirty); 1294 1295 tx->tx_wait_dirty = B_FALSE; 1296 1297 /* 1298 * Note: setting tx_waited only has effect if the caller 1299 * used TX_WAIT. Otherwise they are going to destroy 1300 * this tx and try again. The common case, zfs_write(), 1301 * uses TX_WAIT. 1302 */ 1303 tx->tx_waited = B_TRUE; 1304 } else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) { 1305 /* 1306 * If the pool is suspended we need to wait until it 1307 * is resumed. Note that it's possible that the pool 1308 * has become active after this thread has tried to 1309 * obtain a tx. If that's the case then tx_lasttried_txg 1310 * would not have been set. 1311 */ 1312 txg_wait_synced(dp, spa_last_synced_txg(spa) + 1); 1313 } else if (tx->tx_needassign_txh) { 1314 /* 1315 * A dnode is assigned to the quiescing txg. Wait for its 1316 * transaction to complete. 1317 */ 1318 dnode_t *dn = tx->tx_needassign_txh->txh_dnode; 1319 1320 mutex_enter(&dn->dn_mtx); 1321 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1) 1322 cv_wait(&dn->dn_notxholds, &dn->dn_mtx); 1323 mutex_exit(&dn->dn_mtx); 1324 tx->tx_needassign_txh = NULL; 1325 } else { 1326 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1); 1327 } 1328} 1329 1330void 1331dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta) 1332{ 1333#ifdef ZFS_DEBUG 1334 if (tx->tx_dir == NULL || delta == 0) 1335 return; 1336 1337 if (delta > 0) { 1338 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=, 1339 tx->tx_space_towrite); 1340 (void) refcount_add_many(&tx->tx_space_written, delta, NULL); 1341 } else { 1342 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL); 1343 } 1344#endif 1345} 1346 1347void 1348dmu_tx_commit(dmu_tx_t *tx) 1349{ 1350 dmu_tx_hold_t *txh; 1351 1352 ASSERT(tx->tx_txg != 0); 1353 1354 /* 1355 * Go through the transaction's hold list and remove holds on 1356 * associated dnodes, notifying waiters if no holds remain. 1357 */ 1358 while (txh = list_head(&tx->tx_holds)) { 1359 dnode_t *dn = txh->txh_dnode; 1360 1361 list_remove(&tx->tx_holds, txh); 1362 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1363 if (dn == NULL) 1364 continue; 1365 mutex_enter(&dn->dn_mtx); 1366 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg); 1367 1368 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) { 1369 dn->dn_assigned_txg = 0; 1370 cv_broadcast(&dn->dn_notxholds); 1371 } 1372 mutex_exit(&dn->dn_mtx); 1373 dnode_rele(dn, tx); 1374 } 1375 1376 if (tx->tx_tempreserve_cookie) 1377 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx); 1378 1379 if (!list_is_empty(&tx->tx_callbacks)) 1380 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks); 1381 1382 if (tx->tx_anyobj == FALSE) 1383 txg_rele_to_sync(&tx->tx_txgh); 1384 1385 list_destroy(&tx->tx_callbacks); 1386 list_destroy(&tx->tx_holds); 1387#ifdef ZFS_DEBUG 1388 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n", 1389 tx->tx_space_towrite, refcount_count(&tx->tx_space_written), 1390 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed)); 1391 refcount_destroy_many(&tx->tx_space_written, 1392 refcount_count(&tx->tx_space_written)); 1393 refcount_destroy_many(&tx->tx_space_freed, 1394 refcount_count(&tx->tx_space_freed)); 1395#endif 1396 kmem_free(tx, sizeof (dmu_tx_t)); 1397} 1398 1399void 1400dmu_tx_abort(dmu_tx_t *tx) 1401{ 1402 dmu_tx_hold_t *txh; 1403 1404 ASSERT(tx->tx_txg == 0); 1405 1406 while (txh = list_head(&tx->tx_holds)) { 1407 dnode_t *dn = txh->txh_dnode; 1408 1409 list_remove(&tx->tx_holds, txh); 1410 kmem_free(txh, sizeof (dmu_tx_hold_t)); 1411 if (dn != NULL) 1412 dnode_rele(dn, tx); 1413 } 1414 1415 /* 1416 * Call any registered callbacks with an error code. 1417 */ 1418 if (!list_is_empty(&tx->tx_callbacks)) 1419 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED); 1420 1421 list_destroy(&tx->tx_callbacks); 1422 list_destroy(&tx->tx_holds); 1423#ifdef ZFS_DEBUG 1424 refcount_destroy_many(&tx->tx_space_written, 1425 refcount_count(&tx->tx_space_written)); 1426 refcount_destroy_many(&tx->tx_space_freed, 1427 refcount_count(&tx->tx_space_freed)); 1428#endif 1429 kmem_free(tx, sizeof (dmu_tx_t)); 1430} 1431 1432uint64_t 1433dmu_tx_get_txg(dmu_tx_t *tx) 1434{ 1435 ASSERT(tx->tx_txg != 0); 1436 return (tx->tx_txg); 1437} 1438 1439dsl_pool_t * 1440dmu_tx_pool(dmu_tx_t *tx) 1441{ 1442 ASSERT(tx->tx_pool != NULL); 1443 return (tx->tx_pool); 1444} 1445 1446 1447void 1448dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data) 1449{ 1450 dmu_tx_callback_t *dcb; 1451 1452 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP); 1453 1454 dcb->dcb_func = func; 1455 dcb->dcb_data = data; 1456 1457 list_insert_tail(&tx->tx_callbacks, dcb); 1458} 1459 1460/* 1461 * Call all the commit callbacks on a list, with a given error code. 1462 */ 1463void 1464dmu_tx_do_callbacks(list_t *cb_list, int error) 1465{ 1466 dmu_tx_callback_t *dcb; 1467 1468 while (dcb = list_head(cb_list)) { 1469 list_remove(cb_list, dcb); 1470 dcb->dcb_func(dcb->dcb_data, error); 1471 kmem_free(dcb, sizeof (dmu_tx_callback_t)); 1472 } 1473} 1474 1475/* 1476 * Interface to hold a bunch of attributes. 1477 * used for creating new files. 1478 * attrsize is the total size of all attributes 1479 * to be added during object creation 1480 * 1481 * For updating/adding a single attribute dmu_tx_hold_sa() should be used. 1482 */ 1483 1484/* 1485 * hold necessary attribute name for attribute registration. 1486 * should be a very rare case where this is needed. If it does 1487 * happen it would only happen on the first write to the file system. 1488 */ 1489static void 1490dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx) 1491{ 1492 int i; 1493 1494 if (!sa->sa_need_attr_registration) 1495 return; 1496 1497 for (i = 0; i != sa->sa_num_attrs; i++) { 1498 if (!sa->sa_attr_table[i].sa_registered) { 1499 if (sa->sa_reg_attr_obj) 1500 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj, 1501 B_TRUE, sa->sa_attr_table[i].sa_name); 1502 else 1503 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, 1504 B_TRUE, sa->sa_attr_table[i].sa_name); 1505 } 1506 } 1507} 1508 1509 1510void 1511dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object) 1512{ 1513 dnode_t *dn; 1514 dmu_tx_hold_t *txh; 1515 1516 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object, 1517 THT_SPILL, 0, 0); 1518 1519 dn = txh->txh_dnode; 1520 1521 if (dn == NULL) 1522 return; 1523 1524 /* If blkptr doesn't exist then add space to towrite */ 1525 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) { 1526 txh->txh_space_towrite += SPA_MAXBLOCKSIZE; 1527 } else { 1528 blkptr_t *bp; 1529 1530 bp = &dn->dn_phys->dn_spill; 1531 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset, 1532 bp, bp->blk_birth)) 1533 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE; 1534 else 1535 txh->txh_space_towrite += SPA_MAXBLOCKSIZE; 1536 if (!BP_IS_HOLE(bp)) 1537 txh->txh_space_tounref += SPA_MAXBLOCKSIZE; 1538 } 1539} 1540 1541void 1542dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) 1543{ 1544 sa_os_t *sa = tx->tx_objset->os_sa; 1545 1546 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); 1547 1548 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1549 return; 1550 1551 if (tx->tx_objset->os_sa->sa_layout_attr_obj) 1552 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1553 else { 1554 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1555 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1556 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1557 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1558 } 1559 1560 dmu_tx_sa_registration_hold(sa, tx); 1561 1562 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill) 1563 return; 1564 1565 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT, 1566 THT_SPILL, 0, 0); 1567} 1568 1569/* 1570 * Hold SA attribute 1571 * 1572 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) 1573 * 1574 * variable_size is the total size of all variable sized attributes 1575 * passed to this function. It is not the total size of all 1576 * variable size attributes that *may* exist on this object. 1577 */ 1578void 1579dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow) 1580{ 1581 uint64_t object; 1582 sa_os_t *sa = tx->tx_objset->os_sa; 1583 1584 ASSERT(hdl != NULL); 1585 1586 object = sa_handle_object(hdl); 1587 1588 dmu_tx_hold_bonus(tx, object); 1589 1590 if (tx->tx_objset->os_sa->sa_master_obj == 0) 1591 return; 1592 1593 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 || 1594 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) { 1595 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS); 1596 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY); 1597 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1598 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL); 1599 } 1600 1601 dmu_tx_sa_registration_hold(sa, tx); 1602 1603 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj) 1604 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL); 1605 1606 if (sa->sa_force_spill || may_grow || hdl->sa_spill) { 1607 ASSERT(tx->tx_txg == 0); 1608 dmu_tx_hold_spill(tx, object); 1609 } else { 1610 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus; 1611 dnode_t *dn; 1612 1613 DB_DNODE_ENTER(db); 1614 dn = DB_DNODE(db); 1615 if (dn->dn_have_spill) { 1616 ASSERT(tx->tx_txg == 0); 1617 dmu_tx_hold_spill(tx, object); 1618 } 1619 DB_DNODE_EXIT(db); 1620 } 1621} 1622