ffs_softdep.c revision 284200
1/*- 2 * Copyright 1998, 2000 Marshall Kirk McKusick. 3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org> 4 * All rights reserved. 5 * 6 * The soft updates code is derived from the appendix of a University 7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 8 * "Soft Updates: A Solution to the Metadata Update Problem in File 9 * Systems", CSE-TR-254-95, August 1995). 10 * 11 * Further information about soft updates can be obtained from: 12 * 13 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 14 * 1614 Oxford Street mckusick@mckusick.com 15 * Berkeley, CA 94709-1608 +1-510-843-9542 16 * USA 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 40 */ 41 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_softdep.c 284200 2015-06-10 02:12:01Z kib $"); 44 45#include "opt_ffs.h" 46#include "opt_quota.h" 47#include "opt_ddb.h" 48 49/* 50 * For now we want the safety net that the DEBUG flag provides. 51 */ 52#ifndef DEBUG 53#define DEBUG 54#endif 55 56#include <sys/param.h> 57#include <sys/kernel.h> 58#include <sys/systm.h> 59#include <sys/bio.h> 60#include <sys/buf.h> 61#include <sys/kdb.h> 62#include <sys/kthread.h> 63#include <sys/ktr.h> 64#include <sys/limits.h> 65#include <sys/lock.h> 66#include <sys/malloc.h> 67#include <sys/mount.h> 68#include <sys/mutex.h> 69#include <sys/namei.h> 70#include <sys/priv.h> 71#include <sys/proc.h> 72#include <sys/rwlock.h> 73#include <sys/stat.h> 74#include <sys/sysctl.h> 75#include <sys/syslog.h> 76#include <sys/vnode.h> 77#include <sys/conf.h> 78 79#include <ufs/ufs/dir.h> 80#include <ufs/ufs/extattr.h> 81#include <ufs/ufs/quota.h> 82#include <ufs/ufs/inode.h> 83#include <ufs/ufs/ufsmount.h> 84#include <ufs/ffs/fs.h> 85#include <ufs/ffs/softdep.h> 86#include <ufs/ffs/ffs_extern.h> 87#include <ufs/ufs/ufs_extern.h> 88 89#include <vm/vm.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_object.h> 92 93#include <geom/geom.h> 94 95#include <ddb/ddb.h> 96 97#define KTR_SUJ 0 /* Define to KTR_SPARE. */ 98 99#ifndef SOFTUPDATES 100 101int 102softdep_flushfiles(oldmnt, flags, td) 103 struct mount *oldmnt; 104 int flags; 105 struct thread *td; 106{ 107 108 panic("softdep_flushfiles called"); 109} 110 111int 112softdep_mount(devvp, mp, fs, cred) 113 struct vnode *devvp; 114 struct mount *mp; 115 struct fs *fs; 116 struct ucred *cred; 117{ 118 119 return (0); 120} 121 122void 123softdep_initialize() 124{ 125 126 return; 127} 128 129void 130softdep_uninitialize() 131{ 132 133 return; 134} 135 136void 137softdep_unmount(mp) 138 struct mount *mp; 139{ 140 141 panic("softdep_unmount called"); 142} 143 144void 145softdep_setup_sbupdate(ump, fs, bp) 146 struct ufsmount *ump; 147 struct fs *fs; 148 struct buf *bp; 149{ 150 151 panic("softdep_setup_sbupdate called"); 152} 153 154void 155softdep_setup_inomapdep(bp, ip, newinum, mode) 156 struct buf *bp; 157 struct inode *ip; 158 ino_t newinum; 159 int mode; 160{ 161 162 panic("softdep_setup_inomapdep called"); 163} 164 165void 166softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 167 struct buf *bp; 168 struct mount *mp; 169 ufs2_daddr_t newblkno; 170 int frags; 171 int oldfrags; 172{ 173 174 panic("softdep_setup_blkmapdep called"); 175} 176 177void 178softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 179 struct inode *ip; 180 ufs_lbn_t lbn; 181 ufs2_daddr_t newblkno; 182 ufs2_daddr_t oldblkno; 183 long newsize; 184 long oldsize; 185 struct buf *bp; 186{ 187 188 panic("softdep_setup_allocdirect called"); 189} 190 191void 192softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 193 struct inode *ip; 194 ufs_lbn_t lbn; 195 ufs2_daddr_t newblkno; 196 ufs2_daddr_t oldblkno; 197 long newsize; 198 long oldsize; 199 struct buf *bp; 200{ 201 202 panic("softdep_setup_allocext called"); 203} 204 205void 206softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 207 struct inode *ip; 208 ufs_lbn_t lbn; 209 struct buf *bp; 210 int ptrno; 211 ufs2_daddr_t newblkno; 212 ufs2_daddr_t oldblkno; 213 struct buf *nbp; 214{ 215 216 panic("softdep_setup_allocindir_page called"); 217} 218 219void 220softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 221 struct buf *nbp; 222 struct inode *ip; 223 struct buf *bp; 224 int ptrno; 225 ufs2_daddr_t newblkno; 226{ 227 228 panic("softdep_setup_allocindir_meta called"); 229} 230 231void 232softdep_journal_freeblocks(ip, cred, length, flags) 233 struct inode *ip; 234 struct ucred *cred; 235 off_t length; 236 int flags; 237{ 238 239 panic("softdep_journal_freeblocks called"); 240} 241 242void 243softdep_journal_fsync(ip) 244 struct inode *ip; 245{ 246 247 panic("softdep_journal_fsync called"); 248} 249 250void 251softdep_setup_freeblocks(ip, length, flags) 252 struct inode *ip; 253 off_t length; 254 int flags; 255{ 256 257 panic("softdep_setup_freeblocks called"); 258} 259 260void 261softdep_freefile(pvp, ino, mode) 262 struct vnode *pvp; 263 ino_t ino; 264 int mode; 265{ 266 267 panic("softdep_freefile called"); 268} 269 270int 271softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 272 struct buf *bp; 273 struct inode *dp; 274 off_t diroffset; 275 ino_t newinum; 276 struct buf *newdirbp; 277 int isnewblk; 278{ 279 280 panic("softdep_setup_directory_add called"); 281} 282 283void 284softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 285 struct buf *bp; 286 struct inode *dp; 287 caddr_t base; 288 caddr_t oldloc; 289 caddr_t newloc; 290 int entrysize; 291{ 292 293 panic("softdep_change_directoryentry_offset called"); 294} 295 296void 297softdep_setup_remove(bp, dp, ip, isrmdir) 298 struct buf *bp; 299 struct inode *dp; 300 struct inode *ip; 301 int isrmdir; 302{ 303 304 panic("softdep_setup_remove called"); 305} 306 307void 308softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 309 struct buf *bp; 310 struct inode *dp; 311 struct inode *ip; 312 ino_t newinum; 313 int isrmdir; 314{ 315 316 panic("softdep_setup_directory_change called"); 317} 318 319void 320softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 321 struct mount *mp; 322 struct buf *bp; 323 ufs2_daddr_t blkno; 324 int frags; 325 struct workhead *wkhd; 326{ 327 328 panic("%s called", __FUNCTION__); 329} 330 331void 332softdep_setup_inofree(mp, bp, ino, wkhd) 333 struct mount *mp; 334 struct buf *bp; 335 ino_t ino; 336 struct workhead *wkhd; 337{ 338 339 panic("%s called", __FUNCTION__); 340} 341 342void 343softdep_setup_unlink(dp, ip) 344 struct inode *dp; 345 struct inode *ip; 346{ 347 348 panic("%s called", __FUNCTION__); 349} 350 351void 352softdep_setup_link(dp, ip) 353 struct inode *dp; 354 struct inode *ip; 355{ 356 357 panic("%s called", __FUNCTION__); 358} 359 360void 361softdep_revert_link(dp, ip) 362 struct inode *dp; 363 struct inode *ip; 364{ 365 366 panic("%s called", __FUNCTION__); 367} 368 369void 370softdep_setup_rmdir(dp, ip) 371 struct inode *dp; 372 struct inode *ip; 373{ 374 375 panic("%s called", __FUNCTION__); 376} 377 378void 379softdep_revert_rmdir(dp, ip) 380 struct inode *dp; 381 struct inode *ip; 382{ 383 384 panic("%s called", __FUNCTION__); 385} 386 387void 388softdep_setup_create(dp, ip) 389 struct inode *dp; 390 struct inode *ip; 391{ 392 393 panic("%s called", __FUNCTION__); 394} 395 396void 397softdep_revert_create(dp, ip) 398 struct inode *dp; 399 struct inode *ip; 400{ 401 402 panic("%s called", __FUNCTION__); 403} 404 405void 406softdep_setup_mkdir(dp, ip) 407 struct inode *dp; 408 struct inode *ip; 409{ 410 411 panic("%s called", __FUNCTION__); 412} 413 414void 415softdep_revert_mkdir(dp, ip) 416 struct inode *dp; 417 struct inode *ip; 418{ 419 420 panic("%s called", __FUNCTION__); 421} 422 423void 424softdep_setup_dotdot_link(dp, ip) 425 struct inode *dp; 426 struct inode *ip; 427{ 428 429 panic("%s called", __FUNCTION__); 430} 431 432int 433softdep_prealloc(vp, waitok) 434 struct vnode *vp; 435 int waitok; 436{ 437 438 panic("%s called", __FUNCTION__); 439} 440 441int 442softdep_journal_lookup(mp, vpp) 443 struct mount *mp; 444 struct vnode **vpp; 445{ 446 447 return (ENOENT); 448} 449 450void 451softdep_change_linkcnt(ip) 452 struct inode *ip; 453{ 454 455 panic("softdep_change_linkcnt called"); 456} 457 458void 459softdep_load_inodeblock(ip) 460 struct inode *ip; 461{ 462 463 panic("softdep_load_inodeblock called"); 464} 465 466void 467softdep_update_inodeblock(ip, bp, waitfor) 468 struct inode *ip; 469 struct buf *bp; 470 int waitfor; 471{ 472 473 panic("softdep_update_inodeblock called"); 474} 475 476int 477softdep_fsync(vp) 478 struct vnode *vp; /* the "in_core" copy of the inode */ 479{ 480 481 return (0); 482} 483 484void 485softdep_fsync_mountdev(vp) 486 struct vnode *vp; 487{ 488 489 return; 490} 491 492int 493softdep_flushworklist(oldmnt, countp, td) 494 struct mount *oldmnt; 495 int *countp; 496 struct thread *td; 497{ 498 499 *countp = 0; 500 return (0); 501} 502 503int 504softdep_sync_metadata(struct vnode *vp) 505{ 506 507 panic("softdep_sync_metadata called"); 508} 509 510int 511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 512{ 513 514 panic("softdep_sync_buf called"); 515} 516 517int 518softdep_slowdown(vp) 519 struct vnode *vp; 520{ 521 522 panic("softdep_slowdown called"); 523} 524 525int 526softdep_request_cleanup(fs, vp, cred, resource) 527 struct fs *fs; 528 struct vnode *vp; 529 struct ucred *cred; 530 int resource; 531{ 532 533 return (0); 534} 535 536int 537softdep_check_suspend(struct mount *mp, 538 struct vnode *devvp, 539 int softdep_depcnt, 540 int softdep_accdepcnt, 541 int secondary_writes, 542 int secondary_accwrites) 543{ 544 struct bufobj *bo; 545 int error; 546 547 (void) softdep_depcnt, 548 (void) softdep_accdepcnt; 549 550 bo = &devvp->v_bufobj; 551 ASSERT_BO_WLOCKED(bo); 552 553 MNT_ILOCK(mp); 554 while (mp->mnt_secondary_writes != 0) { 555 BO_UNLOCK(bo); 556 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 557 (PUSER - 1) | PDROP, "secwr", 0); 558 BO_LOCK(bo); 559 MNT_ILOCK(mp); 560 } 561 562 /* 563 * Reasons for needing more work before suspend: 564 * - Dirty buffers on devvp. 565 * - Secondary writes occurred after start of vnode sync loop 566 */ 567 error = 0; 568 if (bo->bo_numoutput > 0 || 569 bo->bo_dirty.bv_cnt > 0 || 570 secondary_writes != 0 || 571 mp->mnt_secondary_writes != 0 || 572 secondary_accwrites != mp->mnt_secondary_accwrites) 573 error = EAGAIN; 574 BO_UNLOCK(bo); 575 return (error); 576} 577 578void 579softdep_get_depcounts(struct mount *mp, 580 int *softdepactivep, 581 int *softdepactiveaccp) 582{ 583 (void) mp; 584 *softdepactivep = 0; 585 *softdepactiveaccp = 0; 586} 587 588void 589softdep_buf_append(bp, wkhd) 590 struct buf *bp; 591 struct workhead *wkhd; 592{ 593 594 panic("softdep_buf_appendwork called"); 595} 596 597void 598softdep_inode_append(ip, cred, wkhd) 599 struct inode *ip; 600 struct ucred *cred; 601 struct workhead *wkhd; 602{ 603 604 panic("softdep_inode_appendwork called"); 605} 606 607void 608softdep_freework(wkhd) 609 struct workhead *wkhd; 610{ 611 612 panic("softdep_freework called"); 613} 614 615#else 616 617FEATURE(softupdates, "FFS soft-updates support"); 618 619static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0, 620 "soft updates stats"); 621static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0, 622 "total dependencies allocated"); 623static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0, 624 "high use dependencies allocated"); 625static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0, 626 "current dependencies allocated"); 627static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0, 628 "current dependencies written"); 629 630unsigned long dep_current[D_LAST + 1]; 631unsigned long dep_highuse[D_LAST + 1]; 632unsigned long dep_total[D_LAST + 1]; 633unsigned long dep_write[D_LAST + 1]; 634 635#define SOFTDEP_TYPE(type, str, long) \ 636 static MALLOC_DEFINE(M_ ## type, #str, long); \ 637 SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \ 638 &dep_total[D_ ## type], 0, ""); \ 639 SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \ 640 &dep_current[D_ ## type], 0, ""); \ 641 SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, \ 642 &dep_highuse[D_ ## type], 0, ""); \ 643 SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \ 644 &dep_write[D_ ## type], 0, ""); 645 646SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies"); 647SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies"); 648SOFTDEP_TYPE(BMSAFEMAP, bmsafemap, 649 "Block or frag allocated from cyl group map"); 650SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency"); 651SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode"); 652SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies"); 653SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block"); 654SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode"); 655SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode"); 656SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated"); 657SOFTDEP_TYPE(DIRADD, diradd, "New directory entry"); 658SOFTDEP_TYPE(MKDIR, mkdir, "New directory"); 659SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted"); 660SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block"); 661SOFTDEP_TYPE(FREEWORK, freework, "free an inode block"); 662SOFTDEP_TYPE(FREEDEP, freedep, "track a block free"); 663SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add"); 664SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove"); 665SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move"); 666SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block"); 667SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block"); 668SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag"); 669SOFTDEP_TYPE(JSEG, jseg, "Journal segment"); 670SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete"); 671SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency"); 672SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation"); 673SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete"); 674 675static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel"); 676 677static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes"); 678static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations"); 679static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data"); 680 681#define M_SOFTDEP_FLAGS (M_WAITOK) 682 683/* 684 * translate from workitem type to memory type 685 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 686 */ 687static struct malloc_type *memtype[] = { 688 M_PAGEDEP, 689 M_INODEDEP, 690 M_BMSAFEMAP, 691 M_NEWBLK, 692 M_ALLOCDIRECT, 693 M_INDIRDEP, 694 M_ALLOCINDIR, 695 M_FREEFRAG, 696 M_FREEBLKS, 697 M_FREEFILE, 698 M_DIRADD, 699 M_MKDIR, 700 M_DIRREM, 701 M_NEWDIRBLK, 702 M_FREEWORK, 703 M_FREEDEP, 704 M_JADDREF, 705 M_JREMREF, 706 M_JMVREF, 707 M_JNEWBLK, 708 M_JFREEBLK, 709 M_JFREEFRAG, 710 M_JSEG, 711 M_JSEGDEP, 712 M_SBDEP, 713 M_JTRUNC, 714 M_JFSYNC, 715 M_SENTINEL 716}; 717 718#define DtoM(type) (memtype[type]) 719 720/* 721 * Names of malloc types. 722 */ 723#define TYPENAME(type) \ 724 ((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???") 725/* 726 * End system adaptation definitions. 727 */ 728 729#define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino) 730#define DOT_OFFSET offsetof(struct dirtemplate, dot_ino) 731 732/* 733 * Internal function prototypes. 734 */ 735static void check_clear_deps(struct mount *); 736static void softdep_error(char *, int); 737static int softdep_process_worklist(struct mount *, int); 738static int softdep_waitidle(struct mount *, int); 739static void drain_output(struct vnode *); 740static struct buf *getdirtybuf(struct buf *, struct rwlock *, int); 741static int check_inodedep_free(struct inodedep *); 742static void clear_remove(struct mount *); 743static void clear_inodedeps(struct mount *); 744static void unlinked_inodedep(struct mount *, struct inodedep *); 745static void clear_unlinked_inodedep(struct inodedep *); 746static struct inodedep *first_unlinked_inodedep(struct ufsmount *); 747static int flush_pagedep_deps(struct vnode *, struct mount *, 748 struct diraddhd *); 749static int free_pagedep(struct pagedep *); 750static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t); 751static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t); 752static int flush_deplist(struct allocdirectlst *, int, int *); 753static int sync_cgs(struct mount *, int); 754static int handle_written_filepage(struct pagedep *, struct buf *); 755static int handle_written_sbdep(struct sbdep *, struct buf *); 756static void initiate_write_sbdep(struct sbdep *); 757static void diradd_inode_written(struct diradd *, struct inodedep *); 758static int handle_written_indirdep(struct indirdep *, struct buf *, 759 struct buf**); 760static int handle_written_inodeblock(struct inodedep *, struct buf *); 761static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *, 762 uint8_t *); 763static int handle_written_bmsafemap(struct bmsafemap *, struct buf *); 764static void handle_written_jaddref(struct jaddref *); 765static void handle_written_jremref(struct jremref *); 766static void handle_written_jseg(struct jseg *, struct buf *); 767static void handle_written_jnewblk(struct jnewblk *); 768static void handle_written_jblkdep(struct jblkdep *); 769static void handle_written_jfreefrag(struct jfreefrag *); 770static void complete_jseg(struct jseg *); 771static void complete_jsegs(struct jseg *); 772static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *); 773static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *); 774static void jremref_write(struct jremref *, struct jseg *, uint8_t *); 775static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *); 776static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *); 777static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data); 778static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *); 779static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *); 780static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *); 781static inline void inoref_write(struct inoref *, struct jseg *, 782 struct jrefrec *); 783static void handle_allocdirect_partdone(struct allocdirect *, 784 struct workhead *); 785static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *, 786 struct workhead *); 787static void indirdep_complete(struct indirdep *); 788static int indirblk_lookup(struct mount *, ufs2_daddr_t); 789static void indirblk_insert(struct freework *); 790static void indirblk_remove(struct freework *); 791static void handle_allocindir_partdone(struct allocindir *); 792static void initiate_write_filepage(struct pagedep *, struct buf *); 793static void initiate_write_indirdep(struct indirdep*, struct buf *); 794static void handle_written_mkdir(struct mkdir *, int); 795static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *, 796 uint8_t *); 797static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *); 798static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 799static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 800static void handle_workitem_freefile(struct freefile *); 801static int handle_workitem_remove(struct dirrem *, int); 802static struct dirrem *newdirrem(struct buf *, struct inode *, 803 struct inode *, int, struct dirrem **); 804static struct indirdep *indirdep_lookup(struct mount *, struct inode *, 805 struct buf *); 806static void cancel_indirdep(struct indirdep *, struct buf *, 807 struct freeblks *); 808static void free_indirdep(struct indirdep *); 809static void free_diradd(struct diradd *, struct workhead *); 810static void merge_diradd(struct inodedep *, struct diradd *); 811static void complete_diradd(struct diradd *); 812static struct diradd *diradd_lookup(struct pagedep *, int); 813static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *, 814 struct jremref *); 815static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *, 816 struct jremref *); 817static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *, 818 struct jremref *, struct jremref *); 819static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *, 820 struct jremref *); 821static void cancel_allocindir(struct allocindir *, struct buf *bp, 822 struct freeblks *, int); 823static int setup_trunc_indir(struct freeblks *, struct inode *, 824 ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t); 825static void complete_trunc_indir(struct freework *); 826static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *, 827 int); 828static void complete_mkdir(struct mkdir *); 829static void free_newdirblk(struct newdirblk *); 830static void free_jremref(struct jremref *); 831static void free_jaddref(struct jaddref *); 832static void free_jsegdep(struct jsegdep *); 833static void free_jsegs(struct jblocks *); 834static void rele_jseg(struct jseg *); 835static void free_jseg(struct jseg *, struct jblocks *); 836static void free_jnewblk(struct jnewblk *); 837static void free_jblkdep(struct jblkdep *); 838static void free_jfreefrag(struct jfreefrag *); 839static void free_freedep(struct freedep *); 840static void journal_jremref(struct dirrem *, struct jremref *, 841 struct inodedep *); 842static void cancel_jnewblk(struct jnewblk *, struct workhead *); 843static int cancel_jaddref(struct jaddref *, struct inodedep *, 844 struct workhead *); 845static void cancel_jfreefrag(struct jfreefrag *); 846static inline void setup_freedirect(struct freeblks *, struct inode *, 847 int, int); 848static inline void setup_freeext(struct freeblks *, struct inode *, int, int); 849static inline void setup_freeindir(struct freeblks *, struct inode *, int, 850 ufs_lbn_t, int); 851static inline struct freeblks *newfreeblks(struct mount *, struct inode *); 852static void freeblks_free(struct ufsmount *, struct freeblks *, int); 853static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t); 854static ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t); 855static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int); 856static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t, 857 int, int); 858static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int); 859static int cancel_pagedep(struct pagedep *, struct freeblks *, int); 860static int deallocate_dependencies(struct buf *, struct freeblks *, int); 861static void newblk_freefrag(struct newblk*); 862static void free_newblk(struct newblk *); 863static void cancel_allocdirect(struct allocdirectlst *, 864 struct allocdirect *, struct freeblks *); 865static int check_inode_unwritten(struct inodedep *); 866static int free_inodedep(struct inodedep *); 867static void freework_freeblock(struct freework *); 868static void freework_enqueue(struct freework *); 869static int handle_workitem_freeblocks(struct freeblks *, int); 870static int handle_complete_freeblocks(struct freeblks *, int); 871static void handle_workitem_indirblk(struct freework *); 872static void handle_written_freework(struct freework *); 873static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 874static struct worklist *jnewblk_merge(struct worklist *, struct worklist *, 875 struct workhead *); 876static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *, 877 struct inodedep *, struct allocindir *, ufs_lbn_t); 878static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 879 ufs2_daddr_t, ufs_lbn_t); 880static void handle_workitem_freefrag(struct freefrag *); 881static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long, 882 ufs_lbn_t); 883static void allocdirect_merge(struct allocdirectlst *, 884 struct allocdirect *, struct allocdirect *); 885static struct freefrag *allocindir_merge(struct allocindir *, 886 struct allocindir *); 887static int bmsafemap_find(struct bmsafemap_hashhead *, int, 888 struct bmsafemap **); 889static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *, 890 int cg, struct bmsafemap *); 891static int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int, 892 struct newblk **); 893static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **); 894static int inodedep_find(struct inodedep_hashhead *, ino_t, 895 struct inodedep **); 896static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **); 897static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t, 898 int, struct pagedep **); 899static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t, 900 struct pagedep **); 901static void pause_timer(void *); 902static int request_cleanup(struct mount *, int); 903static void schedule_cleanup(struct mount *); 904static void softdep_ast_cleanup_proc(void); 905static int process_worklist_item(struct mount *, int, int); 906static void process_removes(struct vnode *); 907static void process_truncates(struct vnode *); 908static void jwork_move(struct workhead *, struct workhead *); 909static void jwork_insert(struct workhead *, struct jsegdep *); 910static void add_to_worklist(struct worklist *, int); 911static void wake_worklist(struct worklist *); 912static void wait_worklist(struct worklist *, char *); 913static void remove_from_worklist(struct worklist *); 914static void softdep_flush(void *); 915static void softdep_flushjournal(struct mount *); 916static int softdep_speedup(struct ufsmount *); 917static void worklist_speedup(struct mount *); 918static int journal_mount(struct mount *, struct fs *, struct ucred *); 919static void journal_unmount(struct ufsmount *); 920static int journal_space(struct ufsmount *, int); 921static void journal_suspend(struct ufsmount *); 922static int journal_unsuspend(struct ufsmount *ump); 923static void softdep_prelink(struct vnode *, struct vnode *); 924static void add_to_journal(struct worklist *); 925static void remove_from_journal(struct worklist *); 926static bool softdep_excess_inodes(struct ufsmount *); 927static bool softdep_excess_dirrem(struct ufsmount *); 928static void softdep_process_journal(struct mount *, struct worklist *, int); 929static struct jremref *newjremref(struct dirrem *, struct inode *, 930 struct inode *ip, off_t, nlink_t); 931static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t, 932 uint16_t); 933static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t, 934 uint16_t); 935static inline struct jsegdep *inoref_jseg(struct inoref *); 936static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t); 937static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t, 938 ufs2_daddr_t, int); 939static void adjust_newfreework(struct freeblks *, int); 940static struct jtrunc *newjtrunc(struct freeblks *, off_t, int); 941static void move_newblock_dep(struct jaddref *, struct inodedep *); 942static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t); 943static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *, 944 ufs2_daddr_t, long, ufs_lbn_t); 945static struct freework *newfreework(struct ufsmount *, struct freeblks *, 946 struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int); 947static int jwait(struct worklist *, int); 948static struct inodedep *inodedep_lookup_ip(struct inode *); 949static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *); 950static struct freefile *handle_bufwait(struct inodedep *, struct workhead *); 951static void handle_jwork(struct workhead *); 952static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *, 953 struct mkdir **); 954static struct jblocks *jblocks_create(void); 955static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *); 956static void jblocks_free(struct jblocks *, struct mount *, int); 957static void jblocks_destroy(struct jblocks *); 958static void jblocks_add(struct jblocks *, ufs2_daddr_t, int); 959 960/* 961 * Exported softdep operations. 962 */ 963static void softdep_disk_io_initiation(struct buf *); 964static void softdep_disk_write_complete(struct buf *); 965static void softdep_deallocate_dependencies(struct buf *); 966static int softdep_count_dependencies(struct buf *bp, int); 967 968/* 969 * Global lock over all of soft updates. 970 */ 971static struct mtx lk; 972MTX_SYSINIT(softdep_lock, &lk, "Global Softdep Lock", MTX_DEF); 973 974#define ACQUIRE_GBLLOCK(lk) mtx_lock(lk) 975#define FREE_GBLLOCK(lk) mtx_unlock(lk) 976#define GBLLOCK_OWNED(lk) mtx_assert((lk), MA_OWNED) 977 978/* 979 * Per-filesystem soft-updates locking. 980 */ 981#define LOCK_PTR(ump) (&(ump)->um_softdep->sd_fslock) 982#define TRY_ACQUIRE_LOCK(ump) rw_try_wlock(&(ump)->um_softdep->sd_fslock) 983#define ACQUIRE_LOCK(ump) rw_wlock(&(ump)->um_softdep->sd_fslock) 984#define FREE_LOCK(ump) rw_wunlock(&(ump)->um_softdep->sd_fslock) 985#define LOCK_OWNED(ump) rw_assert(&(ump)->um_softdep->sd_fslock, \ 986 RA_WLOCKED) 987 988#define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) 989#define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) 990 991/* 992 * Worklist queue management. 993 * These routines require that the lock be held. 994 */ 995#ifndef /* NOT */ DEBUG 996#define WORKLIST_INSERT(head, item) do { \ 997 (item)->wk_state |= ONWORKLIST; \ 998 LIST_INSERT_HEAD(head, item, wk_list); \ 999} while (0) 1000#define WORKLIST_REMOVE(item) do { \ 1001 (item)->wk_state &= ~ONWORKLIST; \ 1002 LIST_REMOVE(item, wk_list); \ 1003} while (0) 1004#define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT 1005#define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE 1006 1007#else /* DEBUG */ 1008static void worklist_insert(struct workhead *, struct worklist *, int); 1009static void worklist_remove(struct worklist *, int); 1010 1011#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1) 1012#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0) 1013#define WORKLIST_REMOVE(item) worklist_remove(item, 1) 1014#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0) 1015 1016static void 1017worklist_insert(head, item, locked) 1018 struct workhead *head; 1019 struct worklist *item; 1020 int locked; 1021{ 1022 1023 if (locked) 1024 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1025 if (item->wk_state & ONWORKLIST) 1026 panic("worklist_insert: %p %s(0x%X) already on list", 1027 item, TYPENAME(item->wk_type), item->wk_state); 1028 item->wk_state |= ONWORKLIST; 1029 LIST_INSERT_HEAD(head, item, wk_list); 1030} 1031 1032static void 1033worklist_remove(item, locked) 1034 struct worklist *item; 1035 int locked; 1036{ 1037 1038 if (locked) 1039 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1040 if ((item->wk_state & ONWORKLIST) == 0) 1041 panic("worklist_remove: %p %s(0x%X) not on list", 1042 item, TYPENAME(item->wk_type), item->wk_state); 1043 item->wk_state &= ~ONWORKLIST; 1044 LIST_REMOVE(item, wk_list); 1045} 1046#endif /* DEBUG */ 1047 1048/* 1049 * Merge two jsegdeps keeping only the oldest one as newer references 1050 * can't be discarded until after older references. 1051 */ 1052static inline struct jsegdep * 1053jsegdep_merge(struct jsegdep *one, struct jsegdep *two) 1054{ 1055 struct jsegdep *swp; 1056 1057 if (two == NULL) 1058 return (one); 1059 1060 if (one->jd_seg->js_seq > two->jd_seg->js_seq) { 1061 swp = one; 1062 one = two; 1063 two = swp; 1064 } 1065 WORKLIST_REMOVE(&two->jd_list); 1066 free_jsegdep(two); 1067 1068 return (one); 1069} 1070 1071/* 1072 * If two freedeps are compatible free one to reduce list size. 1073 */ 1074static inline struct freedep * 1075freedep_merge(struct freedep *one, struct freedep *two) 1076{ 1077 if (two == NULL) 1078 return (one); 1079 1080 if (one->fd_freework == two->fd_freework) { 1081 WORKLIST_REMOVE(&two->fd_list); 1082 free_freedep(two); 1083 } 1084 return (one); 1085} 1086 1087/* 1088 * Move journal work from one list to another. Duplicate freedeps and 1089 * jsegdeps are coalesced to keep the lists as small as possible. 1090 */ 1091static void 1092jwork_move(dst, src) 1093 struct workhead *dst; 1094 struct workhead *src; 1095{ 1096 struct freedep *freedep; 1097 struct jsegdep *jsegdep; 1098 struct worklist *wkn; 1099 struct worklist *wk; 1100 1101 KASSERT(dst != src, 1102 ("jwork_move: dst == src")); 1103 freedep = NULL; 1104 jsegdep = NULL; 1105 LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) { 1106 if (wk->wk_type == D_JSEGDEP) 1107 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1108 if (wk->wk_type == D_FREEDEP) 1109 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1110 } 1111 1112 while ((wk = LIST_FIRST(src)) != NULL) { 1113 WORKLIST_REMOVE(wk); 1114 WORKLIST_INSERT(dst, wk); 1115 if (wk->wk_type == D_JSEGDEP) { 1116 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1117 continue; 1118 } 1119 if (wk->wk_type == D_FREEDEP) 1120 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1121 } 1122} 1123 1124static void 1125jwork_insert(dst, jsegdep) 1126 struct workhead *dst; 1127 struct jsegdep *jsegdep; 1128{ 1129 struct jsegdep *jsegdepn; 1130 struct worklist *wk; 1131 1132 LIST_FOREACH(wk, dst, wk_list) 1133 if (wk->wk_type == D_JSEGDEP) 1134 break; 1135 if (wk == NULL) { 1136 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1137 return; 1138 } 1139 jsegdepn = WK_JSEGDEP(wk); 1140 if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) { 1141 WORKLIST_REMOVE(wk); 1142 free_jsegdep(jsegdepn); 1143 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1144 } else 1145 free_jsegdep(jsegdep); 1146} 1147 1148/* 1149 * Routines for tracking and managing workitems. 1150 */ 1151static void workitem_free(struct worklist *, int); 1152static void workitem_alloc(struct worklist *, int, struct mount *); 1153static void workitem_reassign(struct worklist *, int); 1154 1155#define WORKITEM_FREE(item, type) \ 1156 workitem_free((struct worklist *)(item), (type)) 1157#define WORKITEM_REASSIGN(item, type) \ 1158 workitem_reassign((struct worklist *)(item), (type)) 1159 1160static void 1161workitem_free(item, type) 1162 struct worklist *item; 1163 int type; 1164{ 1165 struct ufsmount *ump; 1166 1167#ifdef DEBUG 1168 if (item->wk_state & ONWORKLIST) 1169 panic("workitem_free: %s(0x%X) still on list", 1170 TYPENAME(item->wk_type), item->wk_state); 1171 if (item->wk_type != type && type != D_NEWBLK) 1172 panic("workitem_free: type mismatch %s != %s", 1173 TYPENAME(item->wk_type), TYPENAME(type)); 1174#endif 1175 if (item->wk_state & IOWAITING) 1176 wakeup(item); 1177 ump = VFSTOUFS(item->wk_mp); 1178 LOCK_OWNED(ump); 1179 KASSERT(ump->softdep_deps > 0, 1180 ("workitem_free: %s: softdep_deps going negative", 1181 ump->um_fs->fs_fsmnt)); 1182 if (--ump->softdep_deps == 0 && ump->softdep_req) 1183 wakeup(&ump->softdep_deps); 1184 KASSERT(dep_current[item->wk_type] > 0, 1185 ("workitem_free: %s: dep_current[%s] going negative", 1186 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1187 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1188 ("workitem_free: %s: softdep_curdeps[%s] going negative", 1189 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1190 atomic_subtract_long(&dep_current[item->wk_type], 1); 1191 ump->softdep_curdeps[item->wk_type] -= 1; 1192 free(item, DtoM(type)); 1193} 1194 1195static void 1196workitem_alloc(item, type, mp) 1197 struct worklist *item; 1198 int type; 1199 struct mount *mp; 1200{ 1201 struct ufsmount *ump; 1202 1203 item->wk_type = type; 1204 item->wk_mp = mp; 1205 item->wk_state = 0; 1206 1207 ump = VFSTOUFS(mp); 1208 ACQUIRE_GBLLOCK(&lk); 1209 dep_current[type]++; 1210 if (dep_current[type] > dep_highuse[type]) 1211 dep_highuse[type] = dep_current[type]; 1212 dep_total[type]++; 1213 FREE_GBLLOCK(&lk); 1214 ACQUIRE_LOCK(ump); 1215 ump->softdep_curdeps[type] += 1; 1216 ump->softdep_deps++; 1217 ump->softdep_accdeps++; 1218 FREE_LOCK(ump); 1219} 1220 1221static void 1222workitem_reassign(item, newtype) 1223 struct worklist *item; 1224 int newtype; 1225{ 1226 struct ufsmount *ump; 1227 1228 ump = VFSTOUFS(item->wk_mp); 1229 LOCK_OWNED(ump); 1230 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1231 ("workitem_reassign: %s: softdep_curdeps[%s] going negative", 1232 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1233 ump->softdep_curdeps[item->wk_type] -= 1; 1234 ump->softdep_curdeps[newtype] += 1; 1235 KASSERT(dep_current[item->wk_type] > 0, 1236 ("workitem_reassign: %s: dep_current[%s] going negative", 1237 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1238 ACQUIRE_GBLLOCK(&lk); 1239 dep_current[newtype]++; 1240 dep_current[item->wk_type]--; 1241 if (dep_current[newtype] > dep_highuse[newtype]) 1242 dep_highuse[newtype] = dep_current[newtype]; 1243 dep_total[newtype]++; 1244 FREE_GBLLOCK(&lk); 1245 item->wk_type = newtype; 1246} 1247 1248/* 1249 * Workitem queue management 1250 */ 1251static int max_softdeps; /* maximum number of structs before slowdown */ 1252static int tickdelay = 2; /* number of ticks to pause during slowdown */ 1253static int proc_waiting; /* tracks whether we have a timeout posted */ 1254static int *stat_countp; /* statistic to count in proc_waiting timeout */ 1255static struct callout softdep_callout; 1256static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 1257static int req_clear_remove; /* syncer process flush some freeblks */ 1258static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */ 1259 1260/* 1261 * runtime statistics 1262 */ 1263static int stat_flush_threads; /* number of softdep flushing threads */ 1264static int stat_worklist_push; /* number of worklist cleanups */ 1265static int stat_blk_limit_push; /* number of times block limit neared */ 1266static int stat_ino_limit_push; /* number of times inode limit neared */ 1267static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 1268static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 1269static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 1270static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 1271static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 1272static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 1273static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 1274static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */ 1275static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */ 1276static int stat_journal_min; /* Times hit journal min threshold */ 1277static int stat_journal_low; /* Times hit journal low threshold */ 1278static int stat_journal_wait; /* Times blocked in jwait(). */ 1279static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */ 1280static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */ 1281static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */ 1282static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */ 1283static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */ 1284static int stat_cleanup_blkrequests; /* Number of block cleanup requests */ 1285static int stat_cleanup_inorequests; /* Number of inode cleanup requests */ 1286static int stat_cleanup_retries; /* Number of cleanups that needed to flush */ 1287static int stat_cleanup_failures; /* Number of cleanup requests that failed */ 1288static int stat_emptyjblocks; /* Number of potentially empty journal blocks */ 1289 1290SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW, 1291 &max_softdeps, 0, ""); 1292SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW, 1293 &tickdelay, 0, ""); 1294SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD, 1295 &stat_flush_threads, 0, ""); 1296SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW, 1297 &stat_worklist_push, 0,""); 1298SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW, 1299 &stat_blk_limit_push, 0,""); 1300SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW, 1301 &stat_ino_limit_push, 0,""); 1302SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW, 1303 &stat_blk_limit_hit, 0, ""); 1304SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW, 1305 &stat_ino_limit_hit, 0, ""); 1306SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW, 1307 &stat_sync_limit_hit, 0, ""); 1308SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, 1309 &stat_indir_blk_ptrs, 0, ""); 1310SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW, 1311 &stat_inode_bitmap, 0, ""); 1312SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, 1313 &stat_direct_blk_ptrs, 0, ""); 1314SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW, 1315 &stat_dir_entry, 0, ""); 1316SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW, 1317 &stat_jaddref, 0, ""); 1318SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW, 1319 &stat_jnewblk, 0, ""); 1320SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW, 1321 &stat_journal_low, 0, ""); 1322SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW, 1323 &stat_journal_min, 0, ""); 1324SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW, 1325 &stat_journal_wait, 0, ""); 1326SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW, 1327 &stat_jwait_filepage, 0, ""); 1328SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW, 1329 &stat_jwait_freeblks, 0, ""); 1330SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW, 1331 &stat_jwait_inode, 0, ""); 1332SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW, 1333 &stat_jwait_newblk, 0, ""); 1334SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW, 1335 &stat_cleanup_blkrequests, 0, ""); 1336SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW, 1337 &stat_cleanup_inorequests, 0, ""); 1338SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW, 1339 &stat_cleanup_high_delay, 0, ""); 1340SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW, 1341 &stat_cleanup_retries, 0, ""); 1342SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW, 1343 &stat_cleanup_failures, 0, ""); 1344SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW, 1345 &softdep_flushcache, 0, ""); 1346SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD, 1347 &stat_emptyjblocks, 0, ""); 1348 1349SYSCTL_DECL(_vfs_ffs); 1350 1351/* Whether to recompute the summary at mount time */ 1352static int compute_summary_at_mount = 0; 1353SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW, 1354 &compute_summary_at_mount, 0, "Recompute summary at mount"); 1355static int print_threads = 0; 1356SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW, 1357 &print_threads, 0, "Notify flusher thread start/stop"); 1358 1359/* List of all filesystems mounted with soft updates */ 1360static TAILQ_HEAD(, mount_softdeps) softdepmounts; 1361 1362/* 1363 * This function cleans the worklist for a filesystem. 1364 * Each filesystem running with soft dependencies gets its own 1365 * thread to run in this function. The thread is started up in 1366 * softdep_mount and shutdown in softdep_unmount. They show up 1367 * as part of the kernel "bufdaemon" process whose process 1368 * entry is available in bufdaemonproc. 1369 */ 1370static int searchfailed; 1371extern struct proc *bufdaemonproc; 1372static void 1373softdep_flush(addr) 1374 void *addr; 1375{ 1376 struct mount *mp; 1377 struct thread *td; 1378 struct ufsmount *ump; 1379 1380 td = curthread; 1381 td->td_pflags |= TDP_NORUNNINGBUF; 1382 mp = (struct mount *)addr; 1383 ump = VFSTOUFS(mp); 1384 atomic_add_int(&stat_flush_threads, 1); 1385 ACQUIRE_LOCK(ump); 1386 ump->softdep_flags &= ~FLUSH_STARTING; 1387 wakeup(&ump->softdep_flushtd); 1388 FREE_LOCK(ump); 1389 if (print_threads) { 1390 if (stat_flush_threads == 1) 1391 printf("Running %s at pid %d\n", bufdaemonproc->p_comm, 1392 bufdaemonproc->p_pid); 1393 printf("Start thread %s\n", td->td_name); 1394 } 1395 for (;;) { 1396 while (softdep_process_worklist(mp, 0) > 0 || 1397 (MOUNTEDSUJ(mp) && 1398 VFSTOUFS(mp)->softdep_jblocks->jb_suspended)) 1399 kthread_suspend_check(); 1400 ACQUIRE_LOCK(ump); 1401 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1402 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, 1403 "sdflush", hz / 2); 1404 ump->softdep_flags &= ~FLUSH_CLEANUP; 1405 /* 1406 * Check to see if we are done and need to exit. 1407 */ 1408 if ((ump->softdep_flags & FLUSH_EXIT) == 0) { 1409 FREE_LOCK(ump); 1410 continue; 1411 } 1412 ump->softdep_flags &= ~FLUSH_EXIT; 1413 FREE_LOCK(ump); 1414 wakeup(&ump->softdep_flags); 1415 if (print_threads) 1416 printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups); 1417 atomic_subtract_int(&stat_flush_threads, 1); 1418 kthread_exit(); 1419 panic("kthread_exit failed\n"); 1420 } 1421} 1422 1423static void 1424worklist_speedup(mp) 1425 struct mount *mp; 1426{ 1427 struct ufsmount *ump; 1428 1429 ump = VFSTOUFS(mp); 1430 LOCK_OWNED(ump); 1431 if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1432 ump->softdep_flags |= FLUSH_CLEANUP; 1433 wakeup(&ump->softdep_flushtd); 1434} 1435 1436static int 1437softdep_speedup(ump) 1438 struct ufsmount *ump; 1439{ 1440 struct ufsmount *altump; 1441 struct mount_softdeps *sdp; 1442 1443 LOCK_OWNED(ump); 1444 worklist_speedup(ump->um_mountp); 1445 bd_speedup(); 1446 /* 1447 * If we have global shortages, then we need other 1448 * filesystems to help with the cleanup. Here we wakeup a 1449 * flusher thread for a filesystem that is over its fair 1450 * share of resources. 1451 */ 1452 if (req_clear_inodedeps || req_clear_remove) { 1453 ACQUIRE_GBLLOCK(&lk); 1454 TAILQ_FOREACH(sdp, &softdepmounts, sd_next) { 1455 if ((altump = sdp->sd_ump) == ump) 1456 continue; 1457 if (((req_clear_inodedeps && 1458 altump->softdep_curdeps[D_INODEDEP] > 1459 max_softdeps / stat_flush_threads) || 1460 (req_clear_remove && 1461 altump->softdep_curdeps[D_DIRREM] > 1462 (max_softdeps / 2) / stat_flush_threads)) && 1463 TRY_ACQUIRE_LOCK(altump)) 1464 break; 1465 } 1466 if (sdp == NULL) { 1467 searchfailed++; 1468 FREE_GBLLOCK(&lk); 1469 } else { 1470 /* 1471 * Move to the end of the list so we pick a 1472 * different one on out next try. 1473 */ 1474 TAILQ_REMOVE(&softdepmounts, sdp, sd_next); 1475 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); 1476 FREE_GBLLOCK(&lk); 1477 if ((altump->softdep_flags & 1478 (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) 1479 altump->softdep_flags |= FLUSH_CLEANUP; 1480 altump->um_softdep->sd_cleanups++; 1481 wakeup(&altump->softdep_flushtd); 1482 FREE_LOCK(altump); 1483 } 1484 } 1485 return (speedup_syncer()); 1486} 1487 1488/* 1489 * Add an item to the end of the work queue. 1490 * This routine requires that the lock be held. 1491 * This is the only routine that adds items to the list. 1492 * The following routine is the only one that removes items 1493 * and does so in order from first to last. 1494 */ 1495 1496#define WK_HEAD 0x0001 /* Add to HEAD. */ 1497#define WK_NODELAY 0x0002 /* Process immediately. */ 1498 1499static void 1500add_to_worklist(wk, flags) 1501 struct worklist *wk; 1502 int flags; 1503{ 1504 struct ufsmount *ump; 1505 1506 ump = VFSTOUFS(wk->wk_mp); 1507 LOCK_OWNED(ump); 1508 if (wk->wk_state & ONWORKLIST) 1509 panic("add_to_worklist: %s(0x%X) already on list", 1510 TYPENAME(wk->wk_type), wk->wk_state); 1511 wk->wk_state |= ONWORKLIST; 1512 if (ump->softdep_on_worklist == 0) { 1513 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1514 ump->softdep_worklist_tail = wk; 1515 } else if (flags & WK_HEAD) { 1516 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1517 } else { 1518 LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list); 1519 ump->softdep_worklist_tail = wk; 1520 } 1521 ump->softdep_on_worklist += 1; 1522 if (flags & WK_NODELAY) 1523 worklist_speedup(wk->wk_mp); 1524} 1525 1526/* 1527 * Remove the item to be processed. If we are removing the last 1528 * item on the list, we need to recalculate the tail pointer. 1529 */ 1530static void 1531remove_from_worklist(wk) 1532 struct worklist *wk; 1533{ 1534 struct ufsmount *ump; 1535 1536 ump = VFSTOUFS(wk->wk_mp); 1537 WORKLIST_REMOVE(wk); 1538 if (ump->softdep_worklist_tail == wk) 1539 ump->softdep_worklist_tail = 1540 (struct worklist *)wk->wk_list.le_prev; 1541 ump->softdep_on_worklist -= 1; 1542} 1543 1544static void 1545wake_worklist(wk) 1546 struct worklist *wk; 1547{ 1548 if (wk->wk_state & IOWAITING) { 1549 wk->wk_state &= ~IOWAITING; 1550 wakeup(wk); 1551 } 1552} 1553 1554static void 1555wait_worklist(wk, wmesg) 1556 struct worklist *wk; 1557 char *wmesg; 1558{ 1559 struct ufsmount *ump; 1560 1561 ump = VFSTOUFS(wk->wk_mp); 1562 wk->wk_state |= IOWAITING; 1563 msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0); 1564} 1565 1566/* 1567 * Process that runs once per second to handle items in the background queue. 1568 * 1569 * Note that we ensure that everything is done in the order in which they 1570 * appear in the queue. The code below depends on this property to ensure 1571 * that blocks of a file are freed before the inode itself is freed. This 1572 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 1573 * until all the old ones have been purged from the dependency lists. 1574 */ 1575static int 1576softdep_process_worklist(mp, full) 1577 struct mount *mp; 1578 int full; 1579{ 1580 int cnt, matchcnt; 1581 struct ufsmount *ump; 1582 long starttime; 1583 1584 KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp")); 1585 if (MOUNTEDSOFTDEP(mp) == 0) 1586 return (0); 1587 matchcnt = 0; 1588 ump = VFSTOUFS(mp); 1589 ACQUIRE_LOCK(ump); 1590 starttime = time_second; 1591 softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0); 1592 check_clear_deps(mp); 1593 while (ump->softdep_on_worklist > 0) { 1594 if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0) 1595 break; 1596 else 1597 matchcnt += cnt; 1598 check_clear_deps(mp); 1599 /* 1600 * We do not generally want to stop for buffer space, but if 1601 * we are really being a buffer hog, we will stop and wait. 1602 */ 1603 if (should_yield()) { 1604 FREE_LOCK(ump); 1605 kern_yield(PRI_USER); 1606 bwillwrite(); 1607 ACQUIRE_LOCK(ump); 1608 } 1609 /* 1610 * Never allow processing to run for more than one 1611 * second. This gives the syncer thread the opportunity 1612 * to pause if appropriate. 1613 */ 1614 if (!full && starttime != time_second) 1615 break; 1616 } 1617 if (full == 0) 1618 journal_unsuspend(ump); 1619 FREE_LOCK(ump); 1620 return (matchcnt); 1621} 1622 1623/* 1624 * Process all removes associated with a vnode if we are running out of 1625 * journal space. Any other process which attempts to flush these will 1626 * be unable as we have the vnodes locked. 1627 */ 1628static void 1629process_removes(vp) 1630 struct vnode *vp; 1631{ 1632 struct inodedep *inodedep; 1633 struct dirrem *dirrem; 1634 struct ufsmount *ump; 1635 struct mount *mp; 1636 ino_t inum; 1637 1638 mp = vp->v_mount; 1639 ump = VFSTOUFS(mp); 1640 LOCK_OWNED(ump); 1641 inum = VTOI(vp)->i_number; 1642 for (;;) { 1643top: 1644 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1645 return; 1646 LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) { 1647 /* 1648 * If another thread is trying to lock this vnode 1649 * it will fail but we must wait for it to do so 1650 * before we can proceed. 1651 */ 1652 if (dirrem->dm_state & INPROGRESS) { 1653 wait_worklist(&dirrem->dm_list, "pwrwait"); 1654 goto top; 1655 } 1656 if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) == 1657 (COMPLETE | ONWORKLIST)) 1658 break; 1659 } 1660 if (dirrem == NULL) 1661 return; 1662 remove_from_worklist(&dirrem->dm_list); 1663 FREE_LOCK(ump); 1664 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1665 panic("process_removes: suspended filesystem"); 1666 handle_workitem_remove(dirrem, 0); 1667 vn_finished_secondary_write(mp); 1668 ACQUIRE_LOCK(ump); 1669 } 1670} 1671 1672/* 1673 * Process all truncations associated with a vnode if we are running out 1674 * of journal space. This is called when the vnode lock is already held 1675 * and no other process can clear the truncation. This function returns 1676 * a value greater than zero if it did any work. 1677 */ 1678static void 1679process_truncates(vp) 1680 struct vnode *vp; 1681{ 1682 struct inodedep *inodedep; 1683 struct freeblks *freeblks; 1684 struct ufsmount *ump; 1685 struct mount *mp; 1686 ino_t inum; 1687 int cgwait; 1688 1689 mp = vp->v_mount; 1690 ump = VFSTOUFS(mp); 1691 LOCK_OWNED(ump); 1692 inum = VTOI(vp)->i_number; 1693 for (;;) { 1694 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1695 return; 1696 cgwait = 0; 1697 TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) { 1698 /* Journal entries not yet written. */ 1699 if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) { 1700 jwait(&LIST_FIRST( 1701 &freeblks->fb_jblkdephd)->jb_list, 1702 MNT_WAIT); 1703 break; 1704 } 1705 /* Another thread is executing this item. */ 1706 if (freeblks->fb_state & INPROGRESS) { 1707 wait_worklist(&freeblks->fb_list, "ptrwait"); 1708 break; 1709 } 1710 /* Freeblks is waiting on a inode write. */ 1711 if ((freeblks->fb_state & COMPLETE) == 0) { 1712 FREE_LOCK(ump); 1713 ffs_update(vp, 1); 1714 ACQUIRE_LOCK(ump); 1715 break; 1716 } 1717 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) == 1718 (ALLCOMPLETE | ONWORKLIST)) { 1719 remove_from_worklist(&freeblks->fb_list); 1720 freeblks->fb_state |= INPROGRESS; 1721 FREE_LOCK(ump); 1722 if (vn_start_secondary_write(NULL, &mp, 1723 V_NOWAIT)) 1724 panic("process_truncates: " 1725 "suspended filesystem"); 1726 handle_workitem_freeblocks(freeblks, 0); 1727 vn_finished_secondary_write(mp); 1728 ACQUIRE_LOCK(ump); 1729 break; 1730 } 1731 if (freeblks->fb_cgwait) 1732 cgwait++; 1733 } 1734 if (cgwait) { 1735 FREE_LOCK(ump); 1736 sync_cgs(mp, MNT_WAIT); 1737 ffs_sync_snap(mp, MNT_WAIT); 1738 ACQUIRE_LOCK(ump); 1739 continue; 1740 } 1741 if (freeblks == NULL) 1742 break; 1743 } 1744 return; 1745} 1746 1747/* 1748 * Process one item on the worklist. 1749 */ 1750static int 1751process_worklist_item(mp, target, flags) 1752 struct mount *mp; 1753 int target; 1754 int flags; 1755{ 1756 struct worklist sentinel; 1757 struct worklist *wk; 1758 struct ufsmount *ump; 1759 int matchcnt; 1760 int error; 1761 1762 KASSERT(mp != NULL, ("process_worklist_item: NULL mp")); 1763 /* 1764 * If we are being called because of a process doing a 1765 * copy-on-write, then it is not safe to write as we may 1766 * recurse into the copy-on-write routine. 1767 */ 1768 if (curthread->td_pflags & TDP_COWINPROGRESS) 1769 return (-1); 1770 PHOLD(curproc); /* Don't let the stack go away. */ 1771 ump = VFSTOUFS(mp); 1772 LOCK_OWNED(ump); 1773 matchcnt = 0; 1774 sentinel.wk_mp = NULL; 1775 sentinel.wk_type = D_SENTINEL; 1776 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list); 1777 for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL; 1778 wk = LIST_NEXT(&sentinel, wk_list)) { 1779 if (wk->wk_type == D_SENTINEL) { 1780 LIST_REMOVE(&sentinel, wk_list); 1781 LIST_INSERT_AFTER(wk, &sentinel, wk_list); 1782 continue; 1783 } 1784 if (wk->wk_state & INPROGRESS) 1785 panic("process_worklist_item: %p already in progress.", 1786 wk); 1787 wk->wk_state |= INPROGRESS; 1788 remove_from_worklist(wk); 1789 FREE_LOCK(ump); 1790 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1791 panic("process_worklist_item: suspended filesystem"); 1792 switch (wk->wk_type) { 1793 case D_DIRREM: 1794 /* removal of a directory entry */ 1795 error = handle_workitem_remove(WK_DIRREM(wk), flags); 1796 break; 1797 1798 case D_FREEBLKS: 1799 /* releasing blocks and/or fragments from a file */ 1800 error = handle_workitem_freeblocks(WK_FREEBLKS(wk), 1801 flags); 1802 break; 1803 1804 case D_FREEFRAG: 1805 /* releasing a fragment when replaced as a file grows */ 1806 handle_workitem_freefrag(WK_FREEFRAG(wk)); 1807 error = 0; 1808 break; 1809 1810 case D_FREEFILE: 1811 /* releasing an inode when its link count drops to 0 */ 1812 handle_workitem_freefile(WK_FREEFILE(wk)); 1813 error = 0; 1814 break; 1815 1816 default: 1817 panic("%s_process_worklist: Unknown type %s", 1818 "softdep", TYPENAME(wk->wk_type)); 1819 /* NOTREACHED */ 1820 } 1821 vn_finished_secondary_write(mp); 1822 ACQUIRE_LOCK(ump); 1823 if (error == 0) { 1824 if (++matchcnt == target) 1825 break; 1826 continue; 1827 } 1828 /* 1829 * We have to retry the worklist item later. Wake up any 1830 * waiters who may be able to complete it immediately and 1831 * add the item back to the head so we don't try to execute 1832 * it again. 1833 */ 1834 wk->wk_state &= ~INPROGRESS; 1835 wake_worklist(wk); 1836 add_to_worklist(wk, WK_HEAD); 1837 } 1838 LIST_REMOVE(&sentinel, wk_list); 1839 /* Sentinal could've become the tail from remove_from_worklist. */ 1840 if (ump->softdep_worklist_tail == &sentinel) 1841 ump->softdep_worklist_tail = 1842 (struct worklist *)sentinel.wk_list.le_prev; 1843 PRELE(curproc); 1844 return (matchcnt); 1845} 1846 1847/* 1848 * Move dependencies from one buffer to another. 1849 */ 1850int 1851softdep_move_dependencies(oldbp, newbp) 1852 struct buf *oldbp; 1853 struct buf *newbp; 1854{ 1855 struct worklist *wk, *wktail; 1856 struct ufsmount *ump; 1857 int dirty; 1858 1859 if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL) 1860 return (0); 1861 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 1862 ("softdep_move_dependencies called on non-softdep filesystem")); 1863 dirty = 0; 1864 wktail = NULL; 1865 ump = VFSTOUFS(wk->wk_mp); 1866 ACQUIRE_LOCK(ump); 1867 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 1868 LIST_REMOVE(wk, wk_list); 1869 if (wk->wk_type == D_BMSAFEMAP && 1870 bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp)) 1871 dirty = 1; 1872 if (wktail == 0) 1873 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 1874 else 1875 LIST_INSERT_AFTER(wktail, wk, wk_list); 1876 wktail = wk; 1877 } 1878 FREE_LOCK(ump); 1879 1880 return (dirty); 1881} 1882 1883/* 1884 * Purge the work list of all items associated with a particular mount point. 1885 */ 1886int 1887softdep_flushworklist(oldmnt, countp, td) 1888 struct mount *oldmnt; 1889 int *countp; 1890 struct thread *td; 1891{ 1892 struct vnode *devvp; 1893 struct ufsmount *ump; 1894 int count, error; 1895 1896 /* 1897 * Alternately flush the block device associated with the mount 1898 * point and process any dependencies that the flushing 1899 * creates. We continue until no more worklist dependencies 1900 * are found. 1901 */ 1902 *countp = 0; 1903 error = 0; 1904 ump = VFSTOUFS(oldmnt); 1905 devvp = ump->um_devvp; 1906 while ((count = softdep_process_worklist(oldmnt, 1)) > 0) { 1907 *countp += count; 1908 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1909 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1910 VOP_UNLOCK(devvp, 0); 1911 if (error != 0) 1912 break; 1913 } 1914 return (error); 1915} 1916 1917#define SU_WAITIDLE_RETRIES 20 1918static int 1919softdep_waitidle(struct mount *mp, int flags __unused) 1920{ 1921 struct ufsmount *ump; 1922 struct vnode *devvp; 1923 struct thread *td; 1924 int error, i; 1925 1926 ump = VFSTOUFS(mp); 1927 devvp = ump->um_devvp; 1928 td = curthread; 1929 error = 0; 1930 ACQUIRE_LOCK(ump); 1931 for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) { 1932 ump->softdep_req = 1; 1933 KASSERT((flags & FORCECLOSE) == 0 || 1934 ump->softdep_on_worklist == 0, 1935 ("softdep_waitidle: work added after flush")); 1936 msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP, 1937 "softdeps", 10 * hz); 1938 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1939 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1940 VOP_UNLOCK(devvp, 0); 1941 if (error != 0) 1942 break; 1943 ACQUIRE_LOCK(ump); 1944 } 1945 ump->softdep_req = 0; 1946 if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) { 1947 error = EBUSY; 1948 printf("softdep_waitidle: Failed to flush worklist for %p\n", 1949 mp); 1950 } 1951 FREE_LOCK(ump); 1952 return (error); 1953} 1954 1955/* 1956 * Flush all vnodes and worklist items associated with a specified mount point. 1957 */ 1958int 1959softdep_flushfiles(oldmnt, flags, td) 1960 struct mount *oldmnt; 1961 int flags; 1962 struct thread *td; 1963{ 1964#ifdef QUOTA 1965 struct ufsmount *ump; 1966 int i; 1967#endif 1968 int error, early, depcount, loopcnt, retry_flush_count, retry; 1969 int morework; 1970 1971 KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0, 1972 ("softdep_flushfiles called on non-softdep filesystem")); 1973 loopcnt = 10; 1974 retry_flush_count = 3; 1975retry_flush: 1976 error = 0; 1977 1978 /* 1979 * Alternately flush the vnodes associated with the mount 1980 * point and process any dependencies that the flushing 1981 * creates. In theory, this loop can happen at most twice, 1982 * but we give it a few extra just to be sure. 1983 */ 1984 for (; loopcnt > 0; loopcnt--) { 1985 /* 1986 * Do another flush in case any vnodes were brought in 1987 * as part of the cleanup operations. 1988 */ 1989 early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag & 1990 MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH; 1991 if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0) 1992 break; 1993 if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 || 1994 depcount == 0) 1995 break; 1996 } 1997 /* 1998 * If we are unmounting then it is an error to fail. If we 1999 * are simply trying to downgrade to read-only, then filesystem 2000 * activity can keep us busy forever, so we just fail with EBUSY. 2001 */ 2002 if (loopcnt == 0) { 2003 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 2004 panic("softdep_flushfiles: looping"); 2005 error = EBUSY; 2006 } 2007 if (!error) 2008 error = softdep_waitidle(oldmnt, flags); 2009 if (!error) { 2010 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) { 2011 retry = 0; 2012 MNT_ILOCK(oldmnt); 2013 KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0, 2014 ("softdep_flushfiles: !MNTK_NOINSMNTQ")); 2015 morework = oldmnt->mnt_nvnodelistsize > 0; 2016#ifdef QUOTA 2017 ump = VFSTOUFS(oldmnt); 2018 UFS_LOCK(ump); 2019 for (i = 0; i < MAXQUOTAS; i++) { 2020 if (ump->um_quotas[i] != NULLVP) 2021 morework = 1; 2022 } 2023 UFS_UNLOCK(ump); 2024#endif 2025 if (morework) { 2026 if (--retry_flush_count > 0) { 2027 retry = 1; 2028 loopcnt = 3; 2029 } else 2030 error = EBUSY; 2031 } 2032 MNT_IUNLOCK(oldmnt); 2033 if (retry) 2034 goto retry_flush; 2035 } 2036 } 2037 return (error); 2038} 2039 2040/* 2041 * Structure hashing. 2042 * 2043 * There are four types of structures that can be looked up: 2044 * 1) pagedep structures identified by mount point, inode number, 2045 * and logical block. 2046 * 2) inodedep structures identified by mount point and inode number. 2047 * 3) newblk structures identified by mount point and 2048 * physical block number. 2049 * 4) bmsafemap structures identified by mount point and 2050 * cylinder group number. 2051 * 2052 * The "pagedep" and "inodedep" dependency structures are hashed 2053 * separately from the file blocks and inodes to which they correspond. 2054 * This separation helps when the in-memory copy of an inode or 2055 * file block must be replaced. It also obviates the need to access 2056 * an inode or file page when simply updating (or de-allocating) 2057 * dependency structures. Lookup of newblk structures is needed to 2058 * find newly allocated blocks when trying to associate them with 2059 * their allocdirect or allocindir structure. 2060 * 2061 * The lookup routines optionally create and hash a new instance when 2062 * an existing entry is not found. The bmsafemap lookup routine always 2063 * allocates a new structure if an existing one is not found. 2064 */ 2065#define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 2066 2067/* 2068 * Structures and routines associated with pagedep caching. 2069 */ 2070#define PAGEDEP_HASH(ump, inum, lbn) \ 2071 (&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size]) 2072 2073static int 2074pagedep_find(pagedephd, ino, lbn, pagedeppp) 2075 struct pagedep_hashhead *pagedephd; 2076 ino_t ino; 2077 ufs_lbn_t lbn; 2078 struct pagedep **pagedeppp; 2079{ 2080 struct pagedep *pagedep; 2081 2082 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 2083 if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) { 2084 *pagedeppp = pagedep; 2085 return (1); 2086 } 2087 } 2088 *pagedeppp = NULL; 2089 return (0); 2090} 2091/* 2092 * Look up a pagedep. Return 1 if found, 0 otherwise. 2093 * If not found, allocate if DEPALLOC flag is passed. 2094 * Found or allocated entry is returned in pagedeppp. 2095 * This routine must be called with splbio interrupts blocked. 2096 */ 2097static int 2098pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp) 2099 struct mount *mp; 2100 struct buf *bp; 2101 ino_t ino; 2102 ufs_lbn_t lbn; 2103 int flags; 2104 struct pagedep **pagedeppp; 2105{ 2106 struct pagedep *pagedep; 2107 struct pagedep_hashhead *pagedephd; 2108 struct worklist *wk; 2109 struct ufsmount *ump; 2110 int ret; 2111 int i; 2112 2113 ump = VFSTOUFS(mp); 2114 LOCK_OWNED(ump); 2115 if (bp) { 2116 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 2117 if (wk->wk_type == D_PAGEDEP) { 2118 *pagedeppp = WK_PAGEDEP(wk); 2119 return (1); 2120 } 2121 } 2122 } 2123 pagedephd = PAGEDEP_HASH(ump, ino, lbn); 2124 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2125 if (ret) { 2126 if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp) 2127 WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list); 2128 return (1); 2129 } 2130 if ((flags & DEPALLOC) == 0) 2131 return (0); 2132 FREE_LOCK(ump); 2133 pagedep = malloc(sizeof(struct pagedep), 2134 M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO); 2135 workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp); 2136 ACQUIRE_LOCK(ump); 2137 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2138 if (*pagedeppp) { 2139 /* 2140 * This should never happen since we only create pagedeps 2141 * with the vnode lock held. Could be an assert. 2142 */ 2143 WORKITEM_FREE(pagedep, D_PAGEDEP); 2144 return (ret); 2145 } 2146 pagedep->pd_ino = ino; 2147 pagedep->pd_lbn = lbn; 2148 LIST_INIT(&pagedep->pd_dirremhd); 2149 LIST_INIT(&pagedep->pd_pendinghd); 2150 for (i = 0; i < DAHASHSZ; i++) 2151 LIST_INIT(&pagedep->pd_diraddhd[i]); 2152 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 2153 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2154 *pagedeppp = pagedep; 2155 return (0); 2156} 2157 2158/* 2159 * Structures and routines associated with inodedep caching. 2160 */ 2161#define INODEDEP_HASH(ump, inum) \ 2162 (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size]) 2163 2164static int 2165inodedep_find(inodedephd, inum, inodedeppp) 2166 struct inodedep_hashhead *inodedephd; 2167 ino_t inum; 2168 struct inodedep **inodedeppp; 2169{ 2170 struct inodedep *inodedep; 2171 2172 LIST_FOREACH(inodedep, inodedephd, id_hash) 2173 if (inum == inodedep->id_ino) 2174 break; 2175 if (inodedep) { 2176 *inodedeppp = inodedep; 2177 return (1); 2178 } 2179 *inodedeppp = NULL; 2180 2181 return (0); 2182} 2183/* 2184 * Look up an inodedep. Return 1 if found, 0 if not found. 2185 * If not found, allocate if DEPALLOC flag is passed. 2186 * Found or allocated entry is returned in inodedeppp. 2187 * This routine must be called with splbio interrupts blocked. 2188 */ 2189static int 2190inodedep_lookup(mp, inum, flags, inodedeppp) 2191 struct mount *mp; 2192 ino_t inum; 2193 int flags; 2194 struct inodedep **inodedeppp; 2195{ 2196 struct inodedep *inodedep; 2197 struct inodedep_hashhead *inodedephd; 2198 struct ufsmount *ump; 2199 struct fs *fs; 2200 2201 ump = VFSTOUFS(mp); 2202 LOCK_OWNED(ump); 2203 fs = ump->um_fs; 2204 inodedephd = INODEDEP_HASH(ump, inum); 2205 2206 if (inodedep_find(inodedephd, inum, inodedeppp)) 2207 return (1); 2208 if ((flags & DEPALLOC) == 0) 2209 return (0); 2210 /* 2211 * If the system is over its limit and our filesystem is 2212 * responsible for more than our share of that usage and 2213 * we are not in a rush, request some inodedep cleanup. 2214 */ 2215 if (softdep_excess_inodes(ump)) 2216 schedule_cleanup(mp); 2217 else 2218 FREE_LOCK(ump); 2219 inodedep = malloc(sizeof(struct inodedep), 2220 M_INODEDEP, M_SOFTDEP_FLAGS); 2221 workitem_alloc(&inodedep->id_list, D_INODEDEP, mp); 2222 ACQUIRE_LOCK(ump); 2223 if (inodedep_find(inodedephd, inum, inodedeppp)) { 2224 WORKITEM_FREE(inodedep, D_INODEDEP); 2225 return (1); 2226 } 2227 inodedep->id_fs = fs; 2228 inodedep->id_ino = inum; 2229 inodedep->id_state = ALLCOMPLETE; 2230 inodedep->id_nlinkdelta = 0; 2231 inodedep->id_savedino1 = NULL; 2232 inodedep->id_savedsize = -1; 2233 inodedep->id_savedextsize = -1; 2234 inodedep->id_savednlink = -1; 2235 inodedep->id_bmsafemap = NULL; 2236 inodedep->id_mkdiradd = NULL; 2237 LIST_INIT(&inodedep->id_dirremhd); 2238 LIST_INIT(&inodedep->id_pendinghd); 2239 LIST_INIT(&inodedep->id_inowait); 2240 LIST_INIT(&inodedep->id_bufwait); 2241 TAILQ_INIT(&inodedep->id_inoreflst); 2242 TAILQ_INIT(&inodedep->id_inoupdt); 2243 TAILQ_INIT(&inodedep->id_newinoupdt); 2244 TAILQ_INIT(&inodedep->id_extupdt); 2245 TAILQ_INIT(&inodedep->id_newextupdt); 2246 TAILQ_INIT(&inodedep->id_freeblklst); 2247 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 2248 *inodedeppp = inodedep; 2249 return (0); 2250} 2251 2252/* 2253 * Structures and routines associated with newblk caching. 2254 */ 2255#define NEWBLK_HASH(ump, inum) \ 2256 (&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size]) 2257 2258static int 2259newblk_find(newblkhd, newblkno, flags, newblkpp) 2260 struct newblk_hashhead *newblkhd; 2261 ufs2_daddr_t newblkno; 2262 int flags; 2263 struct newblk **newblkpp; 2264{ 2265 struct newblk *newblk; 2266 2267 LIST_FOREACH(newblk, newblkhd, nb_hash) { 2268 if (newblkno != newblk->nb_newblkno) 2269 continue; 2270 /* 2271 * If we're creating a new dependency don't match those that 2272 * have already been converted to allocdirects. This is for 2273 * a frag extend. 2274 */ 2275 if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK) 2276 continue; 2277 break; 2278 } 2279 if (newblk) { 2280 *newblkpp = newblk; 2281 return (1); 2282 } 2283 *newblkpp = NULL; 2284 return (0); 2285} 2286 2287/* 2288 * Look up a newblk. Return 1 if found, 0 if not found. 2289 * If not found, allocate if DEPALLOC flag is passed. 2290 * Found or allocated entry is returned in newblkpp. 2291 */ 2292static int 2293newblk_lookup(mp, newblkno, flags, newblkpp) 2294 struct mount *mp; 2295 ufs2_daddr_t newblkno; 2296 int flags; 2297 struct newblk **newblkpp; 2298{ 2299 struct newblk *newblk; 2300 struct newblk_hashhead *newblkhd; 2301 struct ufsmount *ump; 2302 2303 ump = VFSTOUFS(mp); 2304 LOCK_OWNED(ump); 2305 newblkhd = NEWBLK_HASH(ump, newblkno); 2306 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) 2307 return (1); 2308 if ((flags & DEPALLOC) == 0) 2309 return (0); 2310 FREE_LOCK(ump); 2311 newblk = malloc(sizeof(union allblk), M_NEWBLK, 2312 M_SOFTDEP_FLAGS | M_ZERO); 2313 workitem_alloc(&newblk->nb_list, D_NEWBLK, mp); 2314 ACQUIRE_LOCK(ump); 2315 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) { 2316 WORKITEM_FREE(newblk, D_NEWBLK); 2317 return (1); 2318 } 2319 newblk->nb_freefrag = NULL; 2320 LIST_INIT(&newblk->nb_indirdeps); 2321 LIST_INIT(&newblk->nb_newdirblk); 2322 LIST_INIT(&newblk->nb_jwork); 2323 newblk->nb_state = ATTACHED; 2324 newblk->nb_newblkno = newblkno; 2325 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 2326 *newblkpp = newblk; 2327 return (0); 2328} 2329 2330/* 2331 * Structures and routines associated with freed indirect block caching. 2332 */ 2333#define INDIR_HASH(ump, blkno) \ 2334 (&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size]) 2335 2336/* 2337 * Lookup an indirect block in the indir hash table. The freework is 2338 * removed and potentially freed. The caller must do a blocking journal 2339 * write before writing to the blkno. 2340 */ 2341static int 2342indirblk_lookup(mp, blkno) 2343 struct mount *mp; 2344 ufs2_daddr_t blkno; 2345{ 2346 struct freework *freework; 2347 struct indir_hashhead *wkhd; 2348 struct ufsmount *ump; 2349 2350 ump = VFSTOUFS(mp); 2351 wkhd = INDIR_HASH(ump, blkno); 2352 TAILQ_FOREACH(freework, wkhd, fw_next) { 2353 if (freework->fw_blkno != blkno) 2354 continue; 2355 indirblk_remove(freework); 2356 return (1); 2357 } 2358 return (0); 2359} 2360 2361/* 2362 * Insert an indirect block represented by freework into the indirblk 2363 * hash table so that it may prevent the block from being re-used prior 2364 * to the journal being written. 2365 */ 2366static void 2367indirblk_insert(freework) 2368 struct freework *freework; 2369{ 2370 struct jblocks *jblocks; 2371 struct jseg *jseg; 2372 struct ufsmount *ump; 2373 2374 ump = VFSTOUFS(freework->fw_list.wk_mp); 2375 jblocks = ump->softdep_jblocks; 2376 jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst); 2377 if (jseg == NULL) 2378 return; 2379 2380 LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs); 2381 TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework, 2382 fw_next); 2383 freework->fw_state &= ~DEPCOMPLETE; 2384} 2385 2386static void 2387indirblk_remove(freework) 2388 struct freework *freework; 2389{ 2390 struct ufsmount *ump; 2391 2392 ump = VFSTOUFS(freework->fw_list.wk_mp); 2393 LIST_REMOVE(freework, fw_segs); 2394 TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); 2395 freework->fw_state |= DEPCOMPLETE; 2396 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 2397 WORKITEM_FREE(freework, D_FREEWORK); 2398} 2399 2400/* 2401 * Executed during filesystem system initialization before 2402 * mounting any filesystems. 2403 */ 2404void 2405softdep_initialize() 2406{ 2407 2408 TAILQ_INIT(&softdepmounts); 2409 max_softdeps = desiredvnodes * 4; 2410 2411 /* initialise bioops hack */ 2412 bioops.io_start = softdep_disk_io_initiation; 2413 bioops.io_complete = softdep_disk_write_complete; 2414 bioops.io_deallocate = softdep_deallocate_dependencies; 2415 bioops.io_countdeps = softdep_count_dependencies; 2416 softdep_ast_cleanup = softdep_ast_cleanup_proc; 2417 2418 /* Initialize the callout with an mtx. */ 2419 callout_init_mtx(&softdep_callout, &lk, 0); 2420} 2421 2422/* 2423 * Executed after all filesystems have been unmounted during 2424 * filesystem module unload. 2425 */ 2426void 2427softdep_uninitialize() 2428{ 2429 2430 /* clear bioops hack */ 2431 bioops.io_start = NULL; 2432 bioops.io_complete = NULL; 2433 bioops.io_deallocate = NULL; 2434 bioops.io_countdeps = NULL; 2435 softdep_ast_cleanup = NULL; 2436 2437 callout_drain(&softdep_callout); 2438} 2439 2440/* 2441 * Called at mount time to notify the dependency code that a 2442 * filesystem wishes to use it. 2443 */ 2444int 2445softdep_mount(devvp, mp, fs, cred) 2446 struct vnode *devvp; 2447 struct mount *mp; 2448 struct fs *fs; 2449 struct ucred *cred; 2450{ 2451 struct csum_total cstotal; 2452 struct mount_softdeps *sdp; 2453 struct ufsmount *ump; 2454 struct cg *cgp; 2455 struct buf *bp; 2456 int i, error, cyl; 2457 2458 sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA, 2459 M_WAITOK | M_ZERO); 2460 MNT_ILOCK(mp); 2461 mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP; 2462 if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) { 2463 mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) | 2464 MNTK_SOFTDEP | MNTK_NOASYNC; 2465 } 2466 ump = VFSTOUFS(mp); 2467 ump->um_softdep = sdp; 2468 MNT_IUNLOCK(mp); 2469 rw_init(LOCK_PTR(ump), "Per-Filesystem Softdep Lock"); 2470 sdp->sd_ump = ump; 2471 LIST_INIT(&ump->softdep_workitem_pending); 2472 LIST_INIT(&ump->softdep_journal_pending); 2473 TAILQ_INIT(&ump->softdep_unlinked); 2474 LIST_INIT(&ump->softdep_dirtycg); 2475 ump->softdep_worklist_tail = NULL; 2476 ump->softdep_on_worklist = 0; 2477 ump->softdep_deps = 0; 2478 LIST_INIT(&ump->softdep_mkdirlisthd); 2479 ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 2480 &ump->pagedep_hash_size); 2481 ump->pagedep_nextclean = 0; 2482 ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, 2483 &ump->inodedep_hash_size); 2484 ump->inodedep_nextclean = 0; 2485 ump->newblk_hashtbl = hashinit(max_softdeps / 2, M_NEWBLK, 2486 &ump->newblk_hash_size); 2487 ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, 2488 &ump->bmsafemap_hash_size); 2489 i = 1 << (ffs(desiredvnodes / 10) - 1); 2490 ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead), 2491 M_FREEWORK, M_WAITOK); 2492 ump->indir_hash_size = i - 1; 2493 for (i = 0; i <= ump->indir_hash_size; i++) 2494 TAILQ_INIT(&ump->indir_hashtbl[i]); 2495 ACQUIRE_GBLLOCK(&lk); 2496 TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next); 2497 FREE_GBLLOCK(&lk); 2498 if ((fs->fs_flags & FS_SUJ) && 2499 (error = journal_mount(mp, fs, cred)) != 0) { 2500 printf("Failed to start journal: %d\n", error); 2501 softdep_unmount(mp); 2502 return (error); 2503 } 2504 /* 2505 * Start our flushing thread in the bufdaemon process. 2506 */ 2507 ACQUIRE_LOCK(ump); 2508 ump->softdep_flags |= FLUSH_STARTING; 2509 FREE_LOCK(ump); 2510 kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc, 2511 &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker", 2512 mp->mnt_stat.f_mntonname); 2513 ACQUIRE_LOCK(ump); 2514 while ((ump->softdep_flags & FLUSH_STARTING) != 0) { 2515 msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart", 2516 hz / 2); 2517 } 2518 FREE_LOCK(ump); 2519 /* 2520 * When doing soft updates, the counters in the 2521 * superblock may have gotten out of sync. Recomputation 2522 * can take a long time and can be deferred for background 2523 * fsck. However, the old behavior of scanning the cylinder 2524 * groups and recalculating them at mount time is available 2525 * by setting vfs.ffs.compute_summary_at_mount to one. 2526 */ 2527 if (compute_summary_at_mount == 0 || fs->fs_clean != 0) 2528 return (0); 2529 bzero(&cstotal, sizeof cstotal); 2530 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 2531 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 2532 fs->fs_cgsize, cred, &bp)) != 0) { 2533 brelse(bp); 2534 softdep_unmount(mp); 2535 return (error); 2536 } 2537 cgp = (struct cg *)bp->b_data; 2538 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 2539 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 2540 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 2541 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 2542 fs->fs_cs(fs, cyl) = cgp->cg_cs; 2543 brelse(bp); 2544 } 2545#ifdef DEBUG 2546 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 2547 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 2548#endif 2549 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 2550 return (0); 2551} 2552 2553void 2554softdep_unmount(mp) 2555 struct mount *mp; 2556{ 2557 struct ufsmount *ump; 2558#ifdef INVARIANTS 2559 int i; 2560#endif 2561 2562 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 2563 ("softdep_unmount called on non-softdep filesystem")); 2564 ump = VFSTOUFS(mp); 2565 MNT_ILOCK(mp); 2566 mp->mnt_flag &= ~MNT_SOFTDEP; 2567 if (MOUNTEDSUJ(mp) == 0) { 2568 MNT_IUNLOCK(mp); 2569 } else { 2570 mp->mnt_flag &= ~MNT_SUJ; 2571 MNT_IUNLOCK(mp); 2572 journal_unmount(ump); 2573 } 2574 /* 2575 * Shut down our flushing thread. Check for NULL is if 2576 * softdep_mount errors out before the thread has been created. 2577 */ 2578 if (ump->softdep_flushtd != NULL) { 2579 ACQUIRE_LOCK(ump); 2580 ump->softdep_flags |= FLUSH_EXIT; 2581 wakeup(&ump->softdep_flushtd); 2582 msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP, 2583 "sdwait", 0); 2584 KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0, 2585 ("Thread shutdown failed")); 2586 } 2587 /* 2588 * Free up our resources. 2589 */ 2590 ACQUIRE_GBLLOCK(&lk); 2591 TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next); 2592 FREE_GBLLOCK(&lk); 2593 rw_destroy(LOCK_PTR(ump)); 2594 hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size); 2595 hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size); 2596 hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size); 2597 hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP, 2598 ump->bmsafemap_hash_size); 2599 free(ump->indir_hashtbl, M_FREEWORK); 2600#ifdef INVARIANTS 2601 for (i = 0; i <= D_LAST; i++) 2602 KASSERT(ump->softdep_curdeps[i] == 0, 2603 ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt, 2604 TYPENAME(i), ump->softdep_curdeps[i])); 2605#endif 2606 free(ump->um_softdep, M_MOUNTDATA); 2607} 2608 2609static struct jblocks * 2610jblocks_create(void) 2611{ 2612 struct jblocks *jblocks; 2613 2614 jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO); 2615 TAILQ_INIT(&jblocks->jb_segs); 2616 jblocks->jb_avail = 10; 2617 jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2618 M_JBLOCKS, M_WAITOK | M_ZERO); 2619 2620 return (jblocks); 2621} 2622 2623static ufs2_daddr_t 2624jblocks_alloc(jblocks, bytes, actual) 2625 struct jblocks *jblocks; 2626 int bytes; 2627 int *actual; 2628{ 2629 ufs2_daddr_t daddr; 2630 struct jextent *jext; 2631 int freecnt; 2632 int blocks; 2633 2634 blocks = bytes / DEV_BSIZE; 2635 jext = &jblocks->jb_extent[jblocks->jb_head]; 2636 freecnt = jext->je_blocks - jblocks->jb_off; 2637 if (freecnt == 0) { 2638 jblocks->jb_off = 0; 2639 if (++jblocks->jb_head > jblocks->jb_used) 2640 jblocks->jb_head = 0; 2641 jext = &jblocks->jb_extent[jblocks->jb_head]; 2642 freecnt = jext->je_blocks; 2643 } 2644 if (freecnt > blocks) 2645 freecnt = blocks; 2646 *actual = freecnt * DEV_BSIZE; 2647 daddr = jext->je_daddr + jblocks->jb_off; 2648 jblocks->jb_off += freecnt; 2649 jblocks->jb_free -= freecnt; 2650 2651 return (daddr); 2652} 2653 2654static void 2655jblocks_free(jblocks, mp, bytes) 2656 struct jblocks *jblocks; 2657 struct mount *mp; 2658 int bytes; 2659{ 2660 2661 LOCK_OWNED(VFSTOUFS(mp)); 2662 jblocks->jb_free += bytes / DEV_BSIZE; 2663 if (jblocks->jb_suspended) 2664 worklist_speedup(mp); 2665 wakeup(jblocks); 2666} 2667 2668static void 2669jblocks_destroy(jblocks) 2670 struct jblocks *jblocks; 2671{ 2672 2673 if (jblocks->jb_extent) 2674 free(jblocks->jb_extent, M_JBLOCKS); 2675 free(jblocks, M_JBLOCKS); 2676} 2677 2678static void 2679jblocks_add(jblocks, daddr, blocks) 2680 struct jblocks *jblocks; 2681 ufs2_daddr_t daddr; 2682 int blocks; 2683{ 2684 struct jextent *jext; 2685 2686 jblocks->jb_blocks += blocks; 2687 jblocks->jb_free += blocks; 2688 jext = &jblocks->jb_extent[jblocks->jb_used]; 2689 /* Adding the first block. */ 2690 if (jext->je_daddr == 0) { 2691 jext->je_daddr = daddr; 2692 jext->je_blocks = blocks; 2693 return; 2694 } 2695 /* Extending the last extent. */ 2696 if (jext->je_daddr + jext->je_blocks == daddr) { 2697 jext->je_blocks += blocks; 2698 return; 2699 } 2700 /* Adding a new extent. */ 2701 if (++jblocks->jb_used == jblocks->jb_avail) { 2702 jblocks->jb_avail *= 2; 2703 jext = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2704 M_JBLOCKS, M_WAITOK | M_ZERO); 2705 memcpy(jext, jblocks->jb_extent, 2706 sizeof(struct jextent) * jblocks->jb_used); 2707 free(jblocks->jb_extent, M_JBLOCKS); 2708 jblocks->jb_extent = jext; 2709 } 2710 jext = &jblocks->jb_extent[jblocks->jb_used]; 2711 jext->je_daddr = daddr; 2712 jext->je_blocks = blocks; 2713 return; 2714} 2715 2716int 2717softdep_journal_lookup(mp, vpp) 2718 struct mount *mp; 2719 struct vnode **vpp; 2720{ 2721 struct componentname cnp; 2722 struct vnode *dvp; 2723 ino_t sujournal; 2724 int error; 2725 2726 error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp); 2727 if (error) 2728 return (error); 2729 bzero(&cnp, sizeof(cnp)); 2730 cnp.cn_nameiop = LOOKUP; 2731 cnp.cn_flags = ISLASTCN; 2732 cnp.cn_thread = curthread; 2733 cnp.cn_cred = curthread->td_ucred; 2734 cnp.cn_pnbuf = SUJ_FILE; 2735 cnp.cn_nameptr = SUJ_FILE; 2736 cnp.cn_namelen = strlen(SUJ_FILE); 2737 error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal); 2738 vput(dvp); 2739 if (error != 0) 2740 return (error); 2741 error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp); 2742 return (error); 2743} 2744 2745/* 2746 * Open and verify the journal file. 2747 */ 2748static int 2749journal_mount(mp, fs, cred) 2750 struct mount *mp; 2751 struct fs *fs; 2752 struct ucred *cred; 2753{ 2754 struct jblocks *jblocks; 2755 struct ufsmount *ump; 2756 struct vnode *vp; 2757 struct inode *ip; 2758 ufs2_daddr_t blkno; 2759 int bcount; 2760 int error; 2761 int i; 2762 2763 ump = VFSTOUFS(mp); 2764 ump->softdep_journal_tail = NULL; 2765 ump->softdep_on_journal = 0; 2766 ump->softdep_accdeps = 0; 2767 ump->softdep_req = 0; 2768 ump->softdep_jblocks = NULL; 2769 error = softdep_journal_lookup(mp, &vp); 2770 if (error != 0) { 2771 printf("Failed to find journal. Use tunefs to create one\n"); 2772 return (error); 2773 } 2774 ip = VTOI(vp); 2775 if (ip->i_size < SUJ_MIN) { 2776 error = ENOSPC; 2777 goto out; 2778 } 2779 bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */ 2780 jblocks = jblocks_create(); 2781 for (i = 0; i < bcount; i++) { 2782 error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL); 2783 if (error) 2784 break; 2785 jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag)); 2786 } 2787 if (error) { 2788 jblocks_destroy(jblocks); 2789 goto out; 2790 } 2791 jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */ 2792 jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */ 2793 ump->softdep_jblocks = jblocks; 2794out: 2795 if (error == 0) { 2796 MNT_ILOCK(mp); 2797 mp->mnt_flag |= MNT_SUJ; 2798 mp->mnt_flag &= ~MNT_SOFTDEP; 2799 MNT_IUNLOCK(mp); 2800 /* 2801 * Only validate the journal contents if the 2802 * filesystem is clean, otherwise we write the logs 2803 * but they'll never be used. If the filesystem was 2804 * still dirty when we mounted it the journal is 2805 * invalid and a new journal can only be valid if it 2806 * starts from a clean mount. 2807 */ 2808 if (fs->fs_clean) { 2809 DIP_SET(ip, i_modrev, fs->fs_mtime); 2810 ip->i_flags |= IN_MODIFIED; 2811 ffs_update(vp, 1); 2812 } 2813 } 2814 vput(vp); 2815 return (error); 2816} 2817 2818static void 2819journal_unmount(ump) 2820 struct ufsmount *ump; 2821{ 2822 2823 if (ump->softdep_jblocks) 2824 jblocks_destroy(ump->softdep_jblocks); 2825 ump->softdep_jblocks = NULL; 2826} 2827 2828/* 2829 * Called when a journal record is ready to be written. Space is allocated 2830 * and the journal entry is created when the journal is flushed to stable 2831 * store. 2832 */ 2833static void 2834add_to_journal(wk) 2835 struct worklist *wk; 2836{ 2837 struct ufsmount *ump; 2838 2839 ump = VFSTOUFS(wk->wk_mp); 2840 LOCK_OWNED(ump); 2841 if (wk->wk_state & ONWORKLIST) 2842 panic("add_to_journal: %s(0x%X) already on list", 2843 TYPENAME(wk->wk_type), wk->wk_state); 2844 wk->wk_state |= ONWORKLIST | DEPCOMPLETE; 2845 if (LIST_EMPTY(&ump->softdep_journal_pending)) { 2846 ump->softdep_jblocks->jb_age = ticks; 2847 LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list); 2848 } else 2849 LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list); 2850 ump->softdep_journal_tail = wk; 2851 ump->softdep_on_journal += 1; 2852} 2853 2854/* 2855 * Remove an arbitrary item for the journal worklist maintain the tail 2856 * pointer. This happens when a new operation obviates the need to 2857 * journal an old operation. 2858 */ 2859static void 2860remove_from_journal(wk) 2861 struct worklist *wk; 2862{ 2863 struct ufsmount *ump; 2864 2865 ump = VFSTOUFS(wk->wk_mp); 2866 LOCK_OWNED(ump); 2867#ifdef SUJ_DEBUG 2868 { 2869 struct worklist *wkn; 2870 2871 LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list) 2872 if (wkn == wk) 2873 break; 2874 if (wkn == NULL) 2875 panic("remove_from_journal: %p is not in journal", wk); 2876 } 2877#endif 2878 /* 2879 * We emulate a TAILQ to save space in most structures which do not 2880 * require TAILQ semantics. Here we must update the tail position 2881 * when removing the tail which is not the final entry. This works 2882 * only if the worklist linkage are at the beginning of the structure. 2883 */ 2884 if (ump->softdep_journal_tail == wk) 2885 ump->softdep_journal_tail = 2886 (struct worklist *)wk->wk_list.le_prev; 2887 2888 WORKLIST_REMOVE(wk); 2889 ump->softdep_on_journal -= 1; 2890} 2891 2892/* 2893 * Check for journal space as well as dependency limits so the prelink 2894 * code can throttle both journaled and non-journaled filesystems. 2895 * Threshold is 0 for low and 1 for min. 2896 */ 2897static int 2898journal_space(ump, thresh) 2899 struct ufsmount *ump; 2900 int thresh; 2901{ 2902 struct jblocks *jblocks; 2903 int limit, avail; 2904 2905 jblocks = ump->softdep_jblocks; 2906 if (jblocks == NULL) 2907 return (1); 2908 /* 2909 * We use a tighter restriction here to prevent request_cleanup() 2910 * running in threads from running into locks we currently hold. 2911 * We have to be over the limit and our filesystem has to be 2912 * responsible for more than our share of that usage. 2913 */ 2914 limit = (max_softdeps / 10) * 9; 2915 if (dep_current[D_INODEDEP] > limit && 2916 ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads) 2917 return (0); 2918 if (thresh) 2919 thresh = jblocks->jb_min; 2920 else 2921 thresh = jblocks->jb_low; 2922 avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE; 2923 avail = jblocks->jb_free - avail; 2924 2925 return (avail > thresh); 2926} 2927 2928static void 2929journal_suspend(ump) 2930 struct ufsmount *ump; 2931{ 2932 struct jblocks *jblocks; 2933 struct mount *mp; 2934 2935 mp = UFSTOVFS(ump); 2936 jblocks = ump->softdep_jblocks; 2937 MNT_ILOCK(mp); 2938 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 2939 stat_journal_min++; 2940 mp->mnt_kern_flag |= MNTK_SUSPEND; 2941 mp->mnt_susp_owner = ump->softdep_flushtd; 2942 } 2943 jblocks->jb_suspended = 1; 2944 MNT_IUNLOCK(mp); 2945} 2946 2947static int 2948journal_unsuspend(struct ufsmount *ump) 2949{ 2950 struct jblocks *jblocks; 2951 struct mount *mp; 2952 2953 mp = UFSTOVFS(ump); 2954 jblocks = ump->softdep_jblocks; 2955 2956 if (jblocks != NULL && jblocks->jb_suspended && 2957 journal_space(ump, jblocks->jb_min)) { 2958 jblocks->jb_suspended = 0; 2959 FREE_LOCK(ump); 2960 mp->mnt_susp_owner = curthread; 2961 vfs_write_resume(mp, 0); 2962 ACQUIRE_LOCK(ump); 2963 return (1); 2964 } 2965 return (0); 2966} 2967 2968/* 2969 * Called before any allocation function to be certain that there is 2970 * sufficient space in the journal prior to creating any new records. 2971 * Since in the case of block allocation we may have multiple locked 2972 * buffers at the time of the actual allocation we can not block 2973 * when the journal records are created. Doing so would create a deadlock 2974 * if any of these buffers needed to be flushed to reclaim space. Instead 2975 * we require a sufficiently large amount of available space such that 2976 * each thread in the system could have passed this allocation check and 2977 * still have sufficient free space. With 20% of a minimum journal size 2978 * of 1MB we have 6553 records available. 2979 */ 2980int 2981softdep_prealloc(vp, waitok) 2982 struct vnode *vp; 2983 int waitok; 2984{ 2985 struct ufsmount *ump; 2986 2987 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 2988 ("softdep_prealloc called on non-softdep filesystem")); 2989 /* 2990 * Nothing to do if we are not running journaled soft updates. 2991 * If we currently hold the snapshot lock, we must avoid handling 2992 * other resources that could cause deadlock. 2993 */ 2994 if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp))) 2995 return (0); 2996 ump = VFSTOUFS(vp->v_mount); 2997 ACQUIRE_LOCK(ump); 2998 if (journal_space(ump, 0)) { 2999 FREE_LOCK(ump); 3000 return (0); 3001 } 3002 stat_journal_low++; 3003 FREE_LOCK(ump); 3004 if (waitok == MNT_NOWAIT) 3005 return (ENOSPC); 3006 /* 3007 * Attempt to sync this vnode once to flush any journal 3008 * work attached to it. 3009 */ 3010 if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0) 3011 ffs_syncvnode(vp, waitok, 0); 3012 ACQUIRE_LOCK(ump); 3013 process_removes(vp); 3014 process_truncates(vp); 3015 if (journal_space(ump, 0) == 0) { 3016 softdep_speedup(ump); 3017 if (journal_space(ump, 1) == 0) 3018 journal_suspend(ump); 3019 } 3020 FREE_LOCK(ump); 3021 3022 return (0); 3023} 3024 3025/* 3026 * Before adjusting a link count on a vnode verify that we have sufficient 3027 * journal space. If not, process operations that depend on the currently 3028 * locked pair of vnodes to try to flush space as the syncer, buf daemon, 3029 * and softdep flush threads can not acquire these locks to reclaim space. 3030 */ 3031static void 3032softdep_prelink(dvp, vp) 3033 struct vnode *dvp; 3034 struct vnode *vp; 3035{ 3036 struct ufsmount *ump; 3037 3038 ump = VFSTOUFS(dvp->v_mount); 3039 LOCK_OWNED(ump); 3040 /* 3041 * Nothing to do if we have sufficient journal space. 3042 * If we currently hold the snapshot lock, we must avoid 3043 * handling other resources that could cause deadlock. 3044 */ 3045 if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp)))) 3046 return; 3047 stat_journal_low++; 3048 FREE_LOCK(ump); 3049 if (vp) 3050 ffs_syncvnode(vp, MNT_NOWAIT, 0); 3051 ffs_syncvnode(dvp, MNT_WAIT, 0); 3052 ACQUIRE_LOCK(ump); 3053 /* Process vp before dvp as it may create .. removes. */ 3054 if (vp) { 3055 process_removes(vp); 3056 process_truncates(vp); 3057 } 3058 process_removes(dvp); 3059 process_truncates(dvp); 3060 softdep_speedup(ump); 3061 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT); 3062 if (journal_space(ump, 0) == 0) { 3063 softdep_speedup(ump); 3064 if (journal_space(ump, 1) == 0) 3065 journal_suspend(ump); 3066 } 3067} 3068 3069static void 3070jseg_write(ump, jseg, data) 3071 struct ufsmount *ump; 3072 struct jseg *jseg; 3073 uint8_t *data; 3074{ 3075 struct jsegrec *rec; 3076 3077 rec = (struct jsegrec *)data; 3078 rec->jsr_seq = jseg->js_seq; 3079 rec->jsr_oldest = jseg->js_oldseq; 3080 rec->jsr_cnt = jseg->js_cnt; 3081 rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; 3082 rec->jsr_crc = 0; 3083 rec->jsr_time = ump->um_fs->fs_mtime; 3084} 3085 3086static inline void 3087inoref_write(inoref, jseg, rec) 3088 struct inoref *inoref; 3089 struct jseg *jseg; 3090 struct jrefrec *rec; 3091{ 3092 3093 inoref->if_jsegdep->jd_seg = jseg; 3094 rec->jr_ino = inoref->if_ino; 3095 rec->jr_parent = inoref->if_parent; 3096 rec->jr_nlink = inoref->if_nlink; 3097 rec->jr_mode = inoref->if_mode; 3098 rec->jr_diroff = inoref->if_diroff; 3099} 3100 3101static void 3102jaddref_write(jaddref, jseg, data) 3103 struct jaddref *jaddref; 3104 struct jseg *jseg; 3105 uint8_t *data; 3106{ 3107 struct jrefrec *rec; 3108 3109 rec = (struct jrefrec *)data; 3110 rec->jr_op = JOP_ADDREF; 3111 inoref_write(&jaddref->ja_ref, jseg, rec); 3112} 3113 3114static void 3115jremref_write(jremref, jseg, data) 3116 struct jremref *jremref; 3117 struct jseg *jseg; 3118 uint8_t *data; 3119{ 3120 struct jrefrec *rec; 3121 3122 rec = (struct jrefrec *)data; 3123 rec->jr_op = JOP_REMREF; 3124 inoref_write(&jremref->jr_ref, jseg, rec); 3125} 3126 3127static void 3128jmvref_write(jmvref, jseg, data) 3129 struct jmvref *jmvref; 3130 struct jseg *jseg; 3131 uint8_t *data; 3132{ 3133 struct jmvrec *rec; 3134 3135 rec = (struct jmvrec *)data; 3136 rec->jm_op = JOP_MVREF; 3137 rec->jm_ino = jmvref->jm_ino; 3138 rec->jm_parent = jmvref->jm_parent; 3139 rec->jm_oldoff = jmvref->jm_oldoff; 3140 rec->jm_newoff = jmvref->jm_newoff; 3141} 3142 3143static void 3144jnewblk_write(jnewblk, jseg, data) 3145 struct jnewblk *jnewblk; 3146 struct jseg *jseg; 3147 uint8_t *data; 3148{ 3149 struct jblkrec *rec; 3150 3151 jnewblk->jn_jsegdep->jd_seg = jseg; 3152 rec = (struct jblkrec *)data; 3153 rec->jb_op = JOP_NEWBLK; 3154 rec->jb_ino = jnewblk->jn_ino; 3155 rec->jb_blkno = jnewblk->jn_blkno; 3156 rec->jb_lbn = jnewblk->jn_lbn; 3157 rec->jb_frags = jnewblk->jn_frags; 3158 rec->jb_oldfrags = jnewblk->jn_oldfrags; 3159} 3160 3161static void 3162jfreeblk_write(jfreeblk, jseg, data) 3163 struct jfreeblk *jfreeblk; 3164 struct jseg *jseg; 3165 uint8_t *data; 3166{ 3167 struct jblkrec *rec; 3168 3169 jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg; 3170 rec = (struct jblkrec *)data; 3171 rec->jb_op = JOP_FREEBLK; 3172 rec->jb_ino = jfreeblk->jf_ino; 3173 rec->jb_blkno = jfreeblk->jf_blkno; 3174 rec->jb_lbn = jfreeblk->jf_lbn; 3175 rec->jb_frags = jfreeblk->jf_frags; 3176 rec->jb_oldfrags = 0; 3177} 3178 3179static void 3180jfreefrag_write(jfreefrag, jseg, data) 3181 struct jfreefrag *jfreefrag; 3182 struct jseg *jseg; 3183 uint8_t *data; 3184{ 3185 struct jblkrec *rec; 3186 3187 jfreefrag->fr_jsegdep->jd_seg = jseg; 3188 rec = (struct jblkrec *)data; 3189 rec->jb_op = JOP_FREEBLK; 3190 rec->jb_ino = jfreefrag->fr_ino; 3191 rec->jb_blkno = jfreefrag->fr_blkno; 3192 rec->jb_lbn = jfreefrag->fr_lbn; 3193 rec->jb_frags = jfreefrag->fr_frags; 3194 rec->jb_oldfrags = 0; 3195} 3196 3197static void 3198jtrunc_write(jtrunc, jseg, data) 3199 struct jtrunc *jtrunc; 3200 struct jseg *jseg; 3201 uint8_t *data; 3202{ 3203 struct jtrncrec *rec; 3204 3205 jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg; 3206 rec = (struct jtrncrec *)data; 3207 rec->jt_op = JOP_TRUNC; 3208 rec->jt_ino = jtrunc->jt_ino; 3209 rec->jt_size = jtrunc->jt_size; 3210 rec->jt_extsize = jtrunc->jt_extsize; 3211} 3212 3213static void 3214jfsync_write(jfsync, jseg, data) 3215 struct jfsync *jfsync; 3216 struct jseg *jseg; 3217 uint8_t *data; 3218{ 3219 struct jtrncrec *rec; 3220 3221 rec = (struct jtrncrec *)data; 3222 rec->jt_op = JOP_SYNC; 3223 rec->jt_ino = jfsync->jfs_ino; 3224 rec->jt_size = jfsync->jfs_size; 3225 rec->jt_extsize = jfsync->jfs_extsize; 3226} 3227 3228static void 3229softdep_flushjournal(mp) 3230 struct mount *mp; 3231{ 3232 struct jblocks *jblocks; 3233 struct ufsmount *ump; 3234 3235 if (MOUNTEDSUJ(mp) == 0) 3236 return; 3237 ump = VFSTOUFS(mp); 3238 jblocks = ump->softdep_jblocks; 3239 ACQUIRE_LOCK(ump); 3240 while (ump->softdep_on_journal) { 3241 jblocks->jb_needseg = 1; 3242 softdep_process_journal(mp, NULL, MNT_WAIT); 3243 } 3244 FREE_LOCK(ump); 3245} 3246 3247static void softdep_synchronize_completed(struct bio *); 3248static void softdep_synchronize(struct bio *, struct ufsmount *, void *); 3249 3250static void 3251softdep_synchronize_completed(bp) 3252 struct bio *bp; 3253{ 3254 struct jseg *oldest; 3255 struct jseg *jseg; 3256 struct ufsmount *ump; 3257 3258 /* 3259 * caller1 marks the last segment written before we issued the 3260 * synchronize cache. 3261 */ 3262 jseg = bp->bio_caller1; 3263 if (jseg == NULL) { 3264 g_destroy_bio(bp); 3265 return; 3266 } 3267 ump = VFSTOUFS(jseg->js_list.wk_mp); 3268 ACQUIRE_LOCK(ump); 3269 oldest = NULL; 3270 /* 3271 * Mark all the journal entries waiting on the synchronize cache 3272 * as completed so they may continue on. 3273 */ 3274 while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) { 3275 jseg->js_state |= COMPLETE; 3276 oldest = jseg; 3277 jseg = TAILQ_PREV(jseg, jseglst, js_next); 3278 } 3279 /* 3280 * Restart deferred journal entry processing from the oldest 3281 * completed jseg. 3282 */ 3283 if (oldest) 3284 complete_jsegs(oldest); 3285 3286 FREE_LOCK(ump); 3287 g_destroy_bio(bp); 3288} 3289 3290/* 3291 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering 3292 * barriers. The journal must be written prior to any blocks that depend 3293 * on it and the journal can not be released until the blocks have be 3294 * written. This code handles both barriers simultaneously. 3295 */ 3296static void 3297softdep_synchronize(bp, ump, caller1) 3298 struct bio *bp; 3299 struct ufsmount *ump; 3300 void *caller1; 3301{ 3302 3303 bp->bio_cmd = BIO_FLUSH; 3304 bp->bio_flags |= BIO_ORDERED; 3305 bp->bio_data = NULL; 3306 bp->bio_offset = ump->um_cp->provider->mediasize; 3307 bp->bio_length = 0; 3308 bp->bio_done = softdep_synchronize_completed; 3309 bp->bio_caller1 = caller1; 3310 g_io_request(bp, 3311 (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private); 3312} 3313 3314/* 3315 * Flush some journal records to disk. 3316 */ 3317static void 3318softdep_process_journal(mp, needwk, flags) 3319 struct mount *mp; 3320 struct worklist *needwk; 3321 int flags; 3322{ 3323 struct jblocks *jblocks; 3324 struct ufsmount *ump; 3325 struct worklist *wk; 3326 struct jseg *jseg; 3327 struct buf *bp; 3328 struct bio *bio; 3329 uint8_t *data; 3330 struct fs *fs; 3331 int shouldflush; 3332 int segwritten; 3333 int jrecmin; /* Minimum records per block. */ 3334 int jrecmax; /* Maximum records per block. */ 3335 int size; 3336 int cnt; 3337 int off; 3338 int devbsize; 3339 3340 if (MOUNTEDSUJ(mp) == 0) 3341 return; 3342 shouldflush = softdep_flushcache; 3343 bio = NULL; 3344 jseg = NULL; 3345 ump = VFSTOUFS(mp); 3346 LOCK_OWNED(ump); 3347 fs = ump->um_fs; 3348 jblocks = ump->softdep_jblocks; 3349 devbsize = ump->um_devvp->v_bufobj.bo_bsize; 3350 /* 3351 * We write anywhere between a disk block and fs block. The upper 3352 * bound is picked to prevent buffer cache fragmentation and limit 3353 * processing time per I/O. 3354 */ 3355 jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */ 3356 jrecmax = (fs->fs_bsize / devbsize) * jrecmin; 3357 segwritten = 0; 3358 for (;;) { 3359 cnt = ump->softdep_on_journal; 3360 /* 3361 * Criteria for writing a segment: 3362 * 1) We have a full block. 3363 * 2) We're called from jwait() and haven't found the 3364 * journal item yet. 3365 * 3) Always write if needseg is set. 3366 * 4) If we are called from process_worklist and have 3367 * not yet written anything we write a partial block 3368 * to enforce a 1 second maximum latency on journal 3369 * entries. 3370 */ 3371 if (cnt < (jrecmax - 1) && needwk == NULL && 3372 jblocks->jb_needseg == 0 && (segwritten || cnt == 0)) 3373 break; 3374 cnt++; 3375 /* 3376 * Verify some free journal space. softdep_prealloc() should 3377 * guarantee that we don't run out so this is indicative of 3378 * a problem with the flow control. Try to recover 3379 * gracefully in any event. 3380 */ 3381 while (jblocks->jb_free == 0) { 3382 if (flags != MNT_WAIT) 3383 break; 3384 printf("softdep: Out of journal space!\n"); 3385 softdep_speedup(ump); 3386 msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz); 3387 } 3388 FREE_LOCK(ump); 3389 jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS); 3390 workitem_alloc(&jseg->js_list, D_JSEG, mp); 3391 LIST_INIT(&jseg->js_entries); 3392 LIST_INIT(&jseg->js_indirs); 3393 jseg->js_state = ATTACHED; 3394 if (shouldflush == 0) 3395 jseg->js_state |= COMPLETE; 3396 else if (bio == NULL) 3397 bio = g_alloc_bio(); 3398 jseg->js_jblocks = jblocks; 3399 bp = geteblk(fs->fs_bsize, 0); 3400 ACQUIRE_LOCK(ump); 3401 /* 3402 * If there was a race while we were allocating the block 3403 * and jseg the entry we care about was likely written. 3404 * We bail out in both the WAIT and NOWAIT case and assume 3405 * the caller will loop if the entry it cares about is 3406 * not written. 3407 */ 3408 cnt = ump->softdep_on_journal; 3409 if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) { 3410 bp->b_flags |= B_INVAL | B_NOCACHE; 3411 WORKITEM_FREE(jseg, D_JSEG); 3412 FREE_LOCK(ump); 3413 brelse(bp); 3414 ACQUIRE_LOCK(ump); 3415 break; 3416 } 3417 /* 3418 * Calculate the disk block size required for the available 3419 * records rounded to the min size. 3420 */ 3421 if (cnt == 0) 3422 size = devbsize; 3423 else if (cnt < jrecmax) 3424 size = howmany(cnt, jrecmin) * devbsize; 3425 else 3426 size = fs->fs_bsize; 3427 /* 3428 * Allocate a disk block for this journal data and account 3429 * for truncation of the requested size if enough contiguous 3430 * space was not available. 3431 */ 3432 bp->b_blkno = jblocks_alloc(jblocks, size, &size); 3433 bp->b_lblkno = bp->b_blkno; 3434 bp->b_offset = bp->b_blkno * DEV_BSIZE; 3435 bp->b_bcount = size; 3436 bp->b_flags &= ~B_INVAL; 3437 bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY; 3438 /* 3439 * Initialize our jseg with cnt records. Assign the next 3440 * sequence number to it and link it in-order. 3441 */ 3442 cnt = MIN(cnt, (size / devbsize) * jrecmin); 3443 jseg->js_buf = bp; 3444 jseg->js_cnt = cnt; 3445 jseg->js_refs = cnt + 1; /* Self ref. */ 3446 jseg->js_size = size; 3447 jseg->js_seq = jblocks->jb_nextseq++; 3448 if (jblocks->jb_oldestseg == NULL) 3449 jblocks->jb_oldestseg = jseg; 3450 jseg->js_oldseq = jblocks->jb_oldestseg->js_seq; 3451 TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next); 3452 if (jblocks->jb_writeseg == NULL) 3453 jblocks->jb_writeseg = jseg; 3454 /* 3455 * Start filling in records from the pending list. 3456 */ 3457 data = bp->b_data; 3458 off = 0; 3459 3460 /* 3461 * Always put a header on the first block. 3462 * XXX As with below, there might not be a chance to get 3463 * into the loop. Ensure that something valid is written. 3464 */ 3465 jseg_write(ump, jseg, data); 3466 off += JREC_SIZE; 3467 data = bp->b_data + off; 3468 3469 /* 3470 * XXX Something is wrong here. There's no work to do, 3471 * but we need to perform and I/O and allow it to complete 3472 * anyways. 3473 */ 3474 if (LIST_EMPTY(&ump->softdep_journal_pending)) 3475 stat_emptyjblocks++; 3476 3477 while ((wk = LIST_FIRST(&ump->softdep_journal_pending)) 3478 != NULL) { 3479 if (cnt == 0) 3480 break; 3481 /* Place a segment header on every device block. */ 3482 if ((off % devbsize) == 0) { 3483 jseg_write(ump, jseg, data); 3484 off += JREC_SIZE; 3485 data = bp->b_data + off; 3486 } 3487 if (wk == needwk) 3488 needwk = NULL; 3489 remove_from_journal(wk); 3490 wk->wk_state |= INPROGRESS; 3491 WORKLIST_INSERT(&jseg->js_entries, wk); 3492 switch (wk->wk_type) { 3493 case D_JADDREF: 3494 jaddref_write(WK_JADDREF(wk), jseg, data); 3495 break; 3496 case D_JREMREF: 3497 jremref_write(WK_JREMREF(wk), jseg, data); 3498 break; 3499 case D_JMVREF: 3500 jmvref_write(WK_JMVREF(wk), jseg, data); 3501 break; 3502 case D_JNEWBLK: 3503 jnewblk_write(WK_JNEWBLK(wk), jseg, data); 3504 break; 3505 case D_JFREEBLK: 3506 jfreeblk_write(WK_JFREEBLK(wk), jseg, data); 3507 break; 3508 case D_JFREEFRAG: 3509 jfreefrag_write(WK_JFREEFRAG(wk), jseg, data); 3510 break; 3511 case D_JTRUNC: 3512 jtrunc_write(WK_JTRUNC(wk), jseg, data); 3513 break; 3514 case D_JFSYNC: 3515 jfsync_write(WK_JFSYNC(wk), jseg, data); 3516 break; 3517 default: 3518 panic("process_journal: Unknown type %s", 3519 TYPENAME(wk->wk_type)); 3520 /* NOTREACHED */ 3521 } 3522 off += JREC_SIZE; 3523 data = bp->b_data + off; 3524 cnt--; 3525 } 3526 3527 /* Clear any remaining space so we don't leak kernel data */ 3528 if (size > off) 3529 bzero(data, size - off); 3530 3531 /* 3532 * Write this one buffer and continue. 3533 */ 3534 segwritten = 1; 3535 jblocks->jb_needseg = 0; 3536 WORKLIST_INSERT(&bp->b_dep, &jseg->js_list); 3537 FREE_LOCK(ump); 3538 pbgetvp(ump->um_devvp, bp); 3539 /* 3540 * We only do the blocking wait once we find the journal 3541 * entry we're looking for. 3542 */ 3543 if (needwk == NULL && flags == MNT_WAIT) 3544 bwrite(bp); 3545 else 3546 bawrite(bp); 3547 ACQUIRE_LOCK(ump); 3548 } 3549 /* 3550 * If we wrote a segment issue a synchronize cache so the journal 3551 * is reflected on disk before the data is written. Since reclaiming 3552 * journal space also requires writing a journal record this 3553 * process also enforces a barrier before reclamation. 3554 */ 3555 if (segwritten && shouldflush) { 3556 softdep_synchronize(bio, ump, 3557 TAILQ_LAST(&jblocks->jb_segs, jseglst)); 3558 } else if (bio) 3559 g_destroy_bio(bio); 3560 /* 3561 * If we've suspended the filesystem because we ran out of journal 3562 * space either try to sync it here to make some progress or 3563 * unsuspend it if we already have. 3564 */ 3565 if (flags == 0 && jblocks->jb_suspended) { 3566 if (journal_unsuspend(ump)) 3567 return; 3568 FREE_LOCK(ump); 3569 VFS_SYNC(mp, MNT_NOWAIT); 3570 ffs_sbupdate(ump, MNT_WAIT, 0); 3571 ACQUIRE_LOCK(ump); 3572 } 3573} 3574 3575/* 3576 * Complete a jseg, allowing all dependencies awaiting journal writes 3577 * to proceed. Each journal dependency also attaches a jsegdep to dependent 3578 * structures so that the journal segment can be freed to reclaim space. 3579 */ 3580static void 3581complete_jseg(jseg) 3582 struct jseg *jseg; 3583{ 3584 struct worklist *wk; 3585 struct jmvref *jmvref; 3586 int waiting; 3587#ifdef INVARIANTS 3588 int i = 0; 3589#endif 3590 3591 while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) { 3592 WORKLIST_REMOVE(wk); 3593 waiting = wk->wk_state & IOWAITING; 3594 wk->wk_state &= ~(INPROGRESS | IOWAITING); 3595 wk->wk_state |= COMPLETE; 3596 KASSERT(i++ < jseg->js_cnt, 3597 ("handle_written_jseg: overflow %d >= %d", 3598 i - 1, jseg->js_cnt)); 3599 switch (wk->wk_type) { 3600 case D_JADDREF: 3601 handle_written_jaddref(WK_JADDREF(wk)); 3602 break; 3603 case D_JREMREF: 3604 handle_written_jremref(WK_JREMREF(wk)); 3605 break; 3606 case D_JMVREF: 3607 rele_jseg(jseg); /* No jsegdep. */ 3608 jmvref = WK_JMVREF(wk); 3609 LIST_REMOVE(jmvref, jm_deps); 3610 if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0) 3611 free_pagedep(jmvref->jm_pagedep); 3612 WORKITEM_FREE(jmvref, D_JMVREF); 3613 break; 3614 case D_JNEWBLK: 3615 handle_written_jnewblk(WK_JNEWBLK(wk)); 3616 break; 3617 case D_JFREEBLK: 3618 handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep); 3619 break; 3620 case D_JTRUNC: 3621 handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep); 3622 break; 3623 case D_JFSYNC: 3624 rele_jseg(jseg); /* No jsegdep. */ 3625 WORKITEM_FREE(wk, D_JFSYNC); 3626 break; 3627 case D_JFREEFRAG: 3628 handle_written_jfreefrag(WK_JFREEFRAG(wk)); 3629 break; 3630 default: 3631 panic("handle_written_jseg: Unknown type %s", 3632 TYPENAME(wk->wk_type)); 3633 /* NOTREACHED */ 3634 } 3635 if (waiting) 3636 wakeup(wk); 3637 } 3638 /* Release the self reference so the structure may be freed. */ 3639 rele_jseg(jseg); 3640} 3641 3642/* 3643 * Determine which jsegs are ready for completion processing. Waits for 3644 * synchronize cache to complete as well as forcing in-order completion 3645 * of journal entries. 3646 */ 3647static void 3648complete_jsegs(jseg) 3649 struct jseg *jseg; 3650{ 3651 struct jblocks *jblocks; 3652 struct jseg *jsegn; 3653 3654 jblocks = jseg->js_jblocks; 3655 /* 3656 * Don't allow out of order completions. If this isn't the first 3657 * block wait for it to write before we're done. 3658 */ 3659 if (jseg != jblocks->jb_writeseg) 3660 return; 3661 /* Iterate through available jsegs processing their entries. */ 3662 while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) { 3663 jblocks->jb_oldestwrseq = jseg->js_oldseq; 3664 jsegn = TAILQ_NEXT(jseg, js_next); 3665 complete_jseg(jseg); 3666 jseg = jsegn; 3667 } 3668 jblocks->jb_writeseg = jseg; 3669 /* 3670 * Attempt to free jsegs now that oldestwrseq may have advanced. 3671 */ 3672 free_jsegs(jblocks); 3673} 3674 3675/* 3676 * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle 3677 * the final completions. 3678 */ 3679static void 3680handle_written_jseg(jseg, bp) 3681 struct jseg *jseg; 3682 struct buf *bp; 3683{ 3684 3685 if (jseg->js_refs == 0) 3686 panic("handle_written_jseg: No self-reference on %p", jseg); 3687 jseg->js_state |= DEPCOMPLETE; 3688 /* 3689 * We'll never need this buffer again, set flags so it will be 3690 * discarded. 3691 */ 3692 bp->b_flags |= B_INVAL | B_NOCACHE; 3693 pbrelvp(bp); 3694 complete_jsegs(jseg); 3695} 3696 3697static inline struct jsegdep * 3698inoref_jseg(inoref) 3699 struct inoref *inoref; 3700{ 3701 struct jsegdep *jsegdep; 3702 3703 jsegdep = inoref->if_jsegdep; 3704 inoref->if_jsegdep = NULL; 3705 3706 return (jsegdep); 3707} 3708 3709/* 3710 * Called once a jremref has made it to stable store. The jremref is marked 3711 * complete and we attempt to free it. Any pagedeps writes sleeping waiting 3712 * for the jremref to complete will be awoken by free_jremref. 3713 */ 3714static void 3715handle_written_jremref(jremref) 3716 struct jremref *jremref; 3717{ 3718 struct inodedep *inodedep; 3719 struct jsegdep *jsegdep; 3720 struct dirrem *dirrem; 3721 3722 /* Grab the jsegdep. */ 3723 jsegdep = inoref_jseg(&jremref->jr_ref); 3724 /* 3725 * Remove us from the inoref list. 3726 */ 3727 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 3728 0, &inodedep) == 0) 3729 panic("handle_written_jremref: Lost inodedep"); 3730 TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 3731 /* 3732 * Complete the dirrem. 3733 */ 3734 dirrem = jremref->jr_dirrem; 3735 jremref->jr_dirrem = NULL; 3736 LIST_REMOVE(jremref, jr_deps); 3737 jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT; 3738 jwork_insert(&dirrem->dm_jwork, jsegdep); 3739 if (LIST_EMPTY(&dirrem->dm_jremrefhd) && 3740 (dirrem->dm_state & COMPLETE) != 0) 3741 add_to_worklist(&dirrem->dm_list, 0); 3742 free_jremref(jremref); 3743} 3744 3745/* 3746 * Called once a jaddref has made it to stable store. The dependency is 3747 * marked complete and any dependent structures are added to the inode 3748 * bufwait list to be completed as soon as it is written. If a bitmap write 3749 * depends on this entry we move the inode into the inodedephd of the 3750 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap. 3751 */ 3752static void 3753handle_written_jaddref(jaddref) 3754 struct jaddref *jaddref; 3755{ 3756 struct jsegdep *jsegdep; 3757 struct inodedep *inodedep; 3758 struct diradd *diradd; 3759 struct mkdir *mkdir; 3760 3761 /* Grab the jsegdep. */ 3762 jsegdep = inoref_jseg(&jaddref->ja_ref); 3763 mkdir = NULL; 3764 diradd = NULL; 3765 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 3766 0, &inodedep) == 0) 3767 panic("handle_written_jaddref: Lost inodedep."); 3768 if (jaddref->ja_diradd == NULL) 3769 panic("handle_written_jaddref: No dependency"); 3770 if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) { 3771 diradd = jaddref->ja_diradd; 3772 WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list); 3773 } else if (jaddref->ja_state & MKDIR_PARENT) { 3774 mkdir = jaddref->ja_mkdir; 3775 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list); 3776 } else if (jaddref->ja_state & MKDIR_BODY) 3777 mkdir = jaddref->ja_mkdir; 3778 else 3779 panic("handle_written_jaddref: Unknown dependency %p", 3780 jaddref->ja_diradd); 3781 jaddref->ja_diradd = NULL; /* also clears ja_mkdir */ 3782 /* 3783 * Remove us from the inode list. 3784 */ 3785 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); 3786 /* 3787 * The mkdir may be waiting on the jaddref to clear before freeing. 3788 */ 3789 if (mkdir) { 3790 KASSERT(mkdir->md_list.wk_type == D_MKDIR, 3791 ("handle_written_jaddref: Incorrect type for mkdir %s", 3792 TYPENAME(mkdir->md_list.wk_type))); 3793 mkdir->md_jaddref = NULL; 3794 diradd = mkdir->md_diradd; 3795 mkdir->md_state |= DEPCOMPLETE; 3796 complete_mkdir(mkdir); 3797 } 3798 jwork_insert(&diradd->da_jwork, jsegdep); 3799 if (jaddref->ja_state & NEWBLOCK) { 3800 inodedep->id_state |= ONDEPLIST; 3801 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd, 3802 inodedep, id_deps); 3803 } 3804 free_jaddref(jaddref); 3805} 3806 3807/* 3808 * Called once a jnewblk journal is written. The allocdirect or allocindir 3809 * is placed in the bmsafemap to await notification of a written bitmap. If 3810 * the operation was canceled we add the segdep to the appropriate 3811 * dependency to free the journal space once the canceling operation 3812 * completes. 3813 */ 3814static void 3815handle_written_jnewblk(jnewblk) 3816 struct jnewblk *jnewblk; 3817{ 3818 struct bmsafemap *bmsafemap; 3819 struct freefrag *freefrag; 3820 struct freework *freework; 3821 struct jsegdep *jsegdep; 3822 struct newblk *newblk; 3823 3824 /* Grab the jsegdep. */ 3825 jsegdep = jnewblk->jn_jsegdep; 3826 jnewblk->jn_jsegdep = NULL; 3827 if (jnewblk->jn_dep == NULL) 3828 panic("handle_written_jnewblk: No dependency for the segdep."); 3829 switch (jnewblk->jn_dep->wk_type) { 3830 case D_NEWBLK: 3831 case D_ALLOCDIRECT: 3832 case D_ALLOCINDIR: 3833 /* 3834 * Add the written block to the bmsafemap so it can 3835 * be notified when the bitmap is on disk. 3836 */ 3837 newblk = WK_NEWBLK(jnewblk->jn_dep); 3838 newblk->nb_jnewblk = NULL; 3839 if ((newblk->nb_state & GOINGAWAY) == 0) { 3840 bmsafemap = newblk->nb_bmsafemap; 3841 newblk->nb_state |= ONDEPLIST; 3842 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, 3843 nb_deps); 3844 } 3845 jwork_insert(&newblk->nb_jwork, jsegdep); 3846 break; 3847 case D_FREEFRAG: 3848 /* 3849 * A newblock being removed by a freefrag when replaced by 3850 * frag extension. 3851 */ 3852 freefrag = WK_FREEFRAG(jnewblk->jn_dep); 3853 freefrag->ff_jdep = NULL; 3854 jwork_insert(&freefrag->ff_jwork, jsegdep); 3855 break; 3856 case D_FREEWORK: 3857 /* 3858 * A direct block was removed by truncate. 3859 */ 3860 freework = WK_FREEWORK(jnewblk->jn_dep); 3861 freework->fw_jnewblk = NULL; 3862 jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep); 3863 break; 3864 default: 3865 panic("handle_written_jnewblk: Unknown type %d.", 3866 jnewblk->jn_dep->wk_type); 3867 } 3868 jnewblk->jn_dep = NULL; 3869 free_jnewblk(jnewblk); 3870} 3871 3872/* 3873 * Cancel a jfreefrag that won't be needed, probably due to colliding with 3874 * an in-flight allocation that has not yet been committed. Divorce us 3875 * from the freefrag and mark it DEPCOMPLETE so that it may be added 3876 * to the worklist. 3877 */ 3878static void 3879cancel_jfreefrag(jfreefrag) 3880 struct jfreefrag *jfreefrag; 3881{ 3882 struct freefrag *freefrag; 3883 3884 if (jfreefrag->fr_jsegdep) { 3885 free_jsegdep(jfreefrag->fr_jsegdep); 3886 jfreefrag->fr_jsegdep = NULL; 3887 } 3888 freefrag = jfreefrag->fr_freefrag; 3889 jfreefrag->fr_freefrag = NULL; 3890 free_jfreefrag(jfreefrag); 3891 freefrag->ff_state |= DEPCOMPLETE; 3892 CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno); 3893} 3894 3895/* 3896 * Free a jfreefrag when the parent freefrag is rendered obsolete. 3897 */ 3898static void 3899free_jfreefrag(jfreefrag) 3900 struct jfreefrag *jfreefrag; 3901{ 3902 3903 if (jfreefrag->fr_state & INPROGRESS) 3904 WORKLIST_REMOVE(&jfreefrag->fr_list); 3905 else if (jfreefrag->fr_state & ONWORKLIST) 3906 remove_from_journal(&jfreefrag->fr_list); 3907 if (jfreefrag->fr_freefrag != NULL) 3908 panic("free_jfreefrag: Still attached to a freefrag."); 3909 WORKITEM_FREE(jfreefrag, D_JFREEFRAG); 3910} 3911 3912/* 3913 * Called when the journal write for a jfreefrag completes. The parent 3914 * freefrag is added to the worklist if this completes its dependencies. 3915 */ 3916static void 3917handle_written_jfreefrag(jfreefrag) 3918 struct jfreefrag *jfreefrag; 3919{ 3920 struct jsegdep *jsegdep; 3921 struct freefrag *freefrag; 3922 3923 /* Grab the jsegdep. */ 3924 jsegdep = jfreefrag->fr_jsegdep; 3925 jfreefrag->fr_jsegdep = NULL; 3926 freefrag = jfreefrag->fr_freefrag; 3927 if (freefrag == NULL) 3928 panic("handle_written_jfreefrag: No freefrag."); 3929 freefrag->ff_state |= DEPCOMPLETE; 3930 freefrag->ff_jdep = NULL; 3931 jwork_insert(&freefrag->ff_jwork, jsegdep); 3932 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 3933 add_to_worklist(&freefrag->ff_list, 0); 3934 jfreefrag->fr_freefrag = NULL; 3935 free_jfreefrag(jfreefrag); 3936} 3937 3938/* 3939 * Called when the journal write for a jfreeblk completes. The jfreeblk 3940 * is removed from the freeblks list of pending journal writes and the 3941 * jsegdep is moved to the freeblks jwork to be completed when all blocks 3942 * have been reclaimed. 3943 */ 3944static void 3945handle_written_jblkdep(jblkdep) 3946 struct jblkdep *jblkdep; 3947{ 3948 struct freeblks *freeblks; 3949 struct jsegdep *jsegdep; 3950 3951 /* Grab the jsegdep. */ 3952 jsegdep = jblkdep->jb_jsegdep; 3953 jblkdep->jb_jsegdep = NULL; 3954 freeblks = jblkdep->jb_freeblks; 3955 LIST_REMOVE(jblkdep, jb_deps); 3956 jwork_insert(&freeblks->fb_jwork, jsegdep); 3957 /* 3958 * If the freeblks is all journaled, we can add it to the worklist. 3959 */ 3960 if (LIST_EMPTY(&freeblks->fb_jblkdephd) && 3961 (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 3962 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 3963 3964 free_jblkdep(jblkdep); 3965} 3966 3967static struct jsegdep * 3968newjsegdep(struct worklist *wk) 3969{ 3970 struct jsegdep *jsegdep; 3971 3972 jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS); 3973 workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp); 3974 jsegdep->jd_seg = NULL; 3975 3976 return (jsegdep); 3977} 3978 3979static struct jmvref * 3980newjmvref(dp, ino, oldoff, newoff) 3981 struct inode *dp; 3982 ino_t ino; 3983 off_t oldoff; 3984 off_t newoff; 3985{ 3986 struct jmvref *jmvref; 3987 3988 jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS); 3989 workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump)); 3990 jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE; 3991 jmvref->jm_parent = dp->i_number; 3992 jmvref->jm_ino = ino; 3993 jmvref->jm_oldoff = oldoff; 3994 jmvref->jm_newoff = newoff; 3995 3996 return (jmvref); 3997} 3998 3999/* 4000 * Allocate a new jremref that tracks the removal of ip from dp with the 4001 * directory entry offset of diroff. Mark the entry as ATTACHED and 4002 * DEPCOMPLETE as we have all the information required for the journal write 4003 * and the directory has already been removed from the buffer. The caller 4004 * is responsible for linking the jremref into the pagedep and adding it 4005 * to the journal to write. The MKDIR_PARENT flag is set if we're doing 4006 * a DOTDOT addition so handle_workitem_remove() can properly assign 4007 * the jsegdep when we're done. 4008 */ 4009static struct jremref * 4010newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip, 4011 off_t diroff, nlink_t nlink) 4012{ 4013 struct jremref *jremref; 4014 4015 jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS); 4016 workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump)); 4017 jremref->jr_state = ATTACHED; 4018 newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff, 4019 nlink, ip->i_mode); 4020 jremref->jr_dirrem = dirrem; 4021 4022 return (jremref); 4023} 4024 4025static inline void 4026newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff, 4027 nlink_t nlink, uint16_t mode) 4028{ 4029 4030 inoref->if_jsegdep = newjsegdep(&inoref->if_list); 4031 inoref->if_diroff = diroff; 4032 inoref->if_ino = ino; 4033 inoref->if_parent = parent; 4034 inoref->if_nlink = nlink; 4035 inoref->if_mode = mode; 4036} 4037 4038/* 4039 * Allocate a new jaddref to track the addition of ino to dp at diroff. The 4040 * directory offset may not be known until later. The caller is responsible 4041 * adding the entry to the journal when this information is available. nlink 4042 * should be the link count prior to the addition and mode is only required 4043 * to have the correct FMT. 4044 */ 4045static struct jaddref * 4046newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink, 4047 uint16_t mode) 4048{ 4049 struct jaddref *jaddref; 4050 4051 jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS); 4052 workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump)); 4053 jaddref->ja_state = ATTACHED; 4054 jaddref->ja_mkdir = NULL; 4055 newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode); 4056 4057 return (jaddref); 4058} 4059 4060/* 4061 * Create a new free dependency for a freework. The caller is responsible 4062 * for adjusting the reference count when it has the lock held. The freedep 4063 * will track an outstanding bitmap write that will ultimately clear the 4064 * freework to continue. 4065 */ 4066static struct freedep * 4067newfreedep(struct freework *freework) 4068{ 4069 struct freedep *freedep; 4070 4071 freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS); 4072 workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp); 4073 freedep->fd_freework = freework; 4074 4075 return (freedep); 4076} 4077 4078/* 4079 * Free a freedep structure once the buffer it is linked to is written. If 4080 * this is the last reference to the freework schedule it for completion. 4081 */ 4082static void 4083free_freedep(freedep) 4084 struct freedep *freedep; 4085{ 4086 struct freework *freework; 4087 4088 freework = freedep->fd_freework; 4089 freework->fw_freeblks->fb_cgwait--; 4090 if (--freework->fw_ref == 0) 4091 freework_enqueue(freework); 4092 WORKITEM_FREE(freedep, D_FREEDEP); 4093} 4094 4095/* 4096 * Allocate a new freework structure that may be a level in an indirect 4097 * when parent is not NULL or a top level block when it is. The top level 4098 * freework structures are allocated without the per-filesystem lock held 4099 * and before the freeblks is visible outside of softdep_setup_freeblocks(). 4100 */ 4101static struct freework * 4102newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal) 4103 struct ufsmount *ump; 4104 struct freeblks *freeblks; 4105 struct freework *parent; 4106 ufs_lbn_t lbn; 4107 ufs2_daddr_t nb; 4108 int frags; 4109 int off; 4110 int journal; 4111{ 4112 struct freework *freework; 4113 4114 freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS); 4115 workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp); 4116 freework->fw_state = ATTACHED; 4117 freework->fw_jnewblk = NULL; 4118 freework->fw_freeblks = freeblks; 4119 freework->fw_parent = parent; 4120 freework->fw_lbn = lbn; 4121 freework->fw_blkno = nb; 4122 freework->fw_frags = frags; 4123 freework->fw_indir = NULL; 4124 freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR) 4125 ? 0 : NINDIR(ump->um_fs) + 1; 4126 freework->fw_start = freework->fw_off = off; 4127 if (journal) 4128 newjfreeblk(freeblks, lbn, nb, frags); 4129 if (parent == NULL) { 4130 ACQUIRE_LOCK(ump); 4131 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 4132 freeblks->fb_ref++; 4133 FREE_LOCK(ump); 4134 } 4135 4136 return (freework); 4137} 4138 4139/* 4140 * Eliminate a jfreeblk for a block that does not need journaling. 4141 */ 4142static void 4143cancel_jfreeblk(freeblks, blkno) 4144 struct freeblks *freeblks; 4145 ufs2_daddr_t blkno; 4146{ 4147 struct jfreeblk *jfreeblk; 4148 struct jblkdep *jblkdep; 4149 4150 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) { 4151 if (jblkdep->jb_list.wk_type != D_JFREEBLK) 4152 continue; 4153 jfreeblk = WK_JFREEBLK(&jblkdep->jb_list); 4154 if (jfreeblk->jf_blkno == blkno) 4155 break; 4156 } 4157 if (jblkdep == NULL) 4158 return; 4159 CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno); 4160 free_jsegdep(jblkdep->jb_jsegdep); 4161 LIST_REMOVE(jblkdep, jb_deps); 4162 WORKITEM_FREE(jfreeblk, D_JFREEBLK); 4163} 4164 4165/* 4166 * Allocate a new jfreeblk to journal top level block pointer when truncating 4167 * a file. The caller must add this to the worklist when the per-filesystem 4168 * lock is held. 4169 */ 4170static struct jfreeblk * 4171newjfreeblk(freeblks, lbn, blkno, frags) 4172 struct freeblks *freeblks; 4173 ufs_lbn_t lbn; 4174 ufs2_daddr_t blkno; 4175 int frags; 4176{ 4177 struct jfreeblk *jfreeblk; 4178 4179 jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS); 4180 workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK, 4181 freeblks->fb_list.wk_mp); 4182 jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list); 4183 jfreeblk->jf_dep.jb_freeblks = freeblks; 4184 jfreeblk->jf_ino = freeblks->fb_inum; 4185 jfreeblk->jf_lbn = lbn; 4186 jfreeblk->jf_blkno = blkno; 4187 jfreeblk->jf_frags = frags; 4188 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps); 4189 4190 return (jfreeblk); 4191} 4192 4193/* 4194 * The journal is only prepared to handle full-size block numbers, so we 4195 * have to adjust the record to reflect the change to a full-size block. 4196 * For example, suppose we have a block made up of fragments 8-15 and 4197 * want to free its last two fragments. We are given a request that says: 4198 * FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0 4199 * where frags are the number of fragments to free and oldfrags are the 4200 * number of fragments to keep. To block align it, we have to change it to 4201 * have a valid full-size blkno, so it becomes: 4202 * FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6 4203 */ 4204static void 4205adjust_newfreework(freeblks, frag_offset) 4206 struct freeblks *freeblks; 4207 int frag_offset; 4208{ 4209 struct jfreeblk *jfreeblk; 4210 4211 KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL && 4212 LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK), 4213 ("adjust_newfreework: Missing freeblks dependency")); 4214 4215 jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd)); 4216 jfreeblk->jf_blkno -= frag_offset; 4217 jfreeblk->jf_frags += frag_offset; 4218} 4219 4220/* 4221 * Allocate a new jtrunc to track a partial truncation. 4222 */ 4223static struct jtrunc * 4224newjtrunc(freeblks, size, extsize) 4225 struct freeblks *freeblks; 4226 off_t size; 4227 int extsize; 4228{ 4229 struct jtrunc *jtrunc; 4230 4231 jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS); 4232 workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC, 4233 freeblks->fb_list.wk_mp); 4234 jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list); 4235 jtrunc->jt_dep.jb_freeblks = freeblks; 4236 jtrunc->jt_ino = freeblks->fb_inum; 4237 jtrunc->jt_size = size; 4238 jtrunc->jt_extsize = extsize; 4239 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps); 4240 4241 return (jtrunc); 4242} 4243 4244/* 4245 * If we're canceling a new bitmap we have to search for another ref 4246 * to move into the bmsafemap dep. This might be better expressed 4247 * with another structure. 4248 */ 4249static void 4250move_newblock_dep(jaddref, inodedep) 4251 struct jaddref *jaddref; 4252 struct inodedep *inodedep; 4253{ 4254 struct inoref *inoref; 4255 struct jaddref *jaddrefn; 4256 4257 jaddrefn = NULL; 4258 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4259 inoref = TAILQ_NEXT(inoref, if_deps)) { 4260 if ((jaddref->ja_state & NEWBLOCK) && 4261 inoref->if_list.wk_type == D_JADDREF) { 4262 jaddrefn = (struct jaddref *)inoref; 4263 break; 4264 } 4265 } 4266 if (jaddrefn == NULL) 4267 return; 4268 jaddrefn->ja_state &= ~(ATTACHED | UNDONE); 4269 jaddrefn->ja_state |= jaddref->ja_state & 4270 (ATTACHED | UNDONE | NEWBLOCK); 4271 jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK); 4272 jaddref->ja_state |= ATTACHED; 4273 LIST_REMOVE(jaddref, ja_bmdeps); 4274 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn, 4275 ja_bmdeps); 4276} 4277 4278/* 4279 * Cancel a jaddref either before it has been written or while it is being 4280 * written. This happens when a link is removed before the add reaches 4281 * the disk. The jaddref dependency is kept linked into the bmsafemap 4282 * and inode to prevent the link count or bitmap from reaching the disk 4283 * until handle_workitem_remove() re-adjusts the counts and bitmaps as 4284 * required. 4285 * 4286 * Returns 1 if the canceled addref requires journaling of the remove and 4287 * 0 otherwise. 4288 */ 4289static int 4290cancel_jaddref(jaddref, inodedep, wkhd) 4291 struct jaddref *jaddref; 4292 struct inodedep *inodedep; 4293 struct workhead *wkhd; 4294{ 4295 struct inoref *inoref; 4296 struct jsegdep *jsegdep; 4297 int needsj; 4298 4299 KASSERT((jaddref->ja_state & COMPLETE) == 0, 4300 ("cancel_jaddref: Canceling complete jaddref")); 4301 if (jaddref->ja_state & (INPROGRESS | COMPLETE)) 4302 needsj = 1; 4303 else 4304 needsj = 0; 4305 if (inodedep == NULL) 4306 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 4307 0, &inodedep) == 0) 4308 panic("cancel_jaddref: Lost inodedep"); 4309 /* 4310 * We must adjust the nlink of any reference operation that follows 4311 * us so that it is consistent with the in-memory reference. This 4312 * ensures that inode nlink rollbacks always have the correct link. 4313 */ 4314 if (needsj == 0) { 4315 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4316 inoref = TAILQ_NEXT(inoref, if_deps)) { 4317 if (inoref->if_state & GOINGAWAY) 4318 break; 4319 inoref->if_nlink--; 4320 } 4321 } 4322 jsegdep = inoref_jseg(&jaddref->ja_ref); 4323 if (jaddref->ja_state & NEWBLOCK) 4324 move_newblock_dep(jaddref, inodedep); 4325 wake_worklist(&jaddref->ja_list); 4326 jaddref->ja_mkdir = NULL; 4327 if (jaddref->ja_state & INPROGRESS) { 4328 jaddref->ja_state &= ~INPROGRESS; 4329 WORKLIST_REMOVE(&jaddref->ja_list); 4330 jwork_insert(wkhd, jsegdep); 4331 } else { 4332 free_jsegdep(jsegdep); 4333 if (jaddref->ja_state & DEPCOMPLETE) 4334 remove_from_journal(&jaddref->ja_list); 4335 } 4336 jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE); 4337 /* 4338 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove 4339 * can arrange for them to be freed with the bitmap. Otherwise we 4340 * no longer need this addref attached to the inoreflst and it 4341 * will incorrectly adjust nlink if we leave it. 4342 */ 4343 if ((jaddref->ja_state & NEWBLOCK) == 0) { 4344 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 4345 if_deps); 4346 jaddref->ja_state |= COMPLETE; 4347 free_jaddref(jaddref); 4348 return (needsj); 4349 } 4350 /* 4351 * Leave the head of the list for jsegdeps for fast merging. 4352 */ 4353 if (LIST_FIRST(wkhd) != NULL) { 4354 jaddref->ja_state |= ONWORKLIST; 4355 LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list); 4356 } else 4357 WORKLIST_INSERT(wkhd, &jaddref->ja_list); 4358 4359 return (needsj); 4360} 4361 4362/* 4363 * Attempt to free a jaddref structure when some work completes. This 4364 * should only succeed once the entry is written and all dependencies have 4365 * been notified. 4366 */ 4367static void 4368free_jaddref(jaddref) 4369 struct jaddref *jaddref; 4370{ 4371 4372 if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE) 4373 return; 4374 if (jaddref->ja_ref.if_jsegdep) 4375 panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n", 4376 jaddref, jaddref->ja_state); 4377 if (jaddref->ja_state & NEWBLOCK) 4378 LIST_REMOVE(jaddref, ja_bmdeps); 4379 if (jaddref->ja_state & (INPROGRESS | ONWORKLIST)) 4380 panic("free_jaddref: Bad state %p(0x%X)", 4381 jaddref, jaddref->ja_state); 4382 if (jaddref->ja_mkdir != NULL) 4383 panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state); 4384 WORKITEM_FREE(jaddref, D_JADDREF); 4385} 4386 4387/* 4388 * Free a jremref structure once it has been written or discarded. 4389 */ 4390static void 4391free_jremref(jremref) 4392 struct jremref *jremref; 4393{ 4394 4395 if (jremref->jr_ref.if_jsegdep) 4396 free_jsegdep(jremref->jr_ref.if_jsegdep); 4397 if (jremref->jr_state & INPROGRESS) 4398 panic("free_jremref: IO still pending"); 4399 WORKITEM_FREE(jremref, D_JREMREF); 4400} 4401 4402/* 4403 * Free a jnewblk structure. 4404 */ 4405static void 4406free_jnewblk(jnewblk) 4407 struct jnewblk *jnewblk; 4408{ 4409 4410 if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE) 4411 return; 4412 LIST_REMOVE(jnewblk, jn_deps); 4413 if (jnewblk->jn_dep != NULL) 4414 panic("free_jnewblk: Dependency still attached."); 4415 WORKITEM_FREE(jnewblk, D_JNEWBLK); 4416} 4417 4418/* 4419 * Cancel a jnewblk which has been been made redundant by frag extension. 4420 */ 4421static void 4422cancel_jnewblk(jnewblk, wkhd) 4423 struct jnewblk *jnewblk; 4424 struct workhead *wkhd; 4425{ 4426 struct jsegdep *jsegdep; 4427 4428 CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno); 4429 jsegdep = jnewblk->jn_jsegdep; 4430 if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL) 4431 panic("cancel_jnewblk: Invalid state"); 4432 jnewblk->jn_jsegdep = NULL; 4433 jnewblk->jn_dep = NULL; 4434 jnewblk->jn_state |= GOINGAWAY; 4435 if (jnewblk->jn_state & INPROGRESS) { 4436 jnewblk->jn_state &= ~INPROGRESS; 4437 WORKLIST_REMOVE(&jnewblk->jn_list); 4438 jwork_insert(wkhd, jsegdep); 4439 } else { 4440 free_jsegdep(jsegdep); 4441 remove_from_journal(&jnewblk->jn_list); 4442 } 4443 wake_worklist(&jnewblk->jn_list); 4444 WORKLIST_INSERT(wkhd, &jnewblk->jn_list); 4445} 4446 4447static void 4448free_jblkdep(jblkdep) 4449 struct jblkdep *jblkdep; 4450{ 4451 4452 if (jblkdep->jb_list.wk_type == D_JFREEBLK) 4453 WORKITEM_FREE(jblkdep, D_JFREEBLK); 4454 else if (jblkdep->jb_list.wk_type == D_JTRUNC) 4455 WORKITEM_FREE(jblkdep, D_JTRUNC); 4456 else 4457 panic("free_jblkdep: Unexpected type %s", 4458 TYPENAME(jblkdep->jb_list.wk_type)); 4459} 4460 4461/* 4462 * Free a single jseg once it is no longer referenced in memory or on 4463 * disk. Reclaim journal blocks and dependencies waiting for the segment 4464 * to disappear. 4465 */ 4466static void 4467free_jseg(jseg, jblocks) 4468 struct jseg *jseg; 4469 struct jblocks *jblocks; 4470{ 4471 struct freework *freework; 4472 4473 /* 4474 * Free freework structures that were lingering to indicate freed 4475 * indirect blocks that forced journal write ordering on reallocate. 4476 */ 4477 while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL) 4478 indirblk_remove(freework); 4479 if (jblocks->jb_oldestseg == jseg) 4480 jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next); 4481 TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next); 4482 jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size); 4483 KASSERT(LIST_EMPTY(&jseg->js_entries), 4484 ("free_jseg: Freed jseg has valid entries.")); 4485 WORKITEM_FREE(jseg, D_JSEG); 4486} 4487 4488/* 4489 * Free all jsegs that meet the criteria for being reclaimed and update 4490 * oldestseg. 4491 */ 4492static void 4493free_jsegs(jblocks) 4494 struct jblocks *jblocks; 4495{ 4496 struct jseg *jseg; 4497 4498 /* 4499 * Free only those jsegs which have none allocated before them to 4500 * preserve the journal space ordering. 4501 */ 4502 while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) { 4503 /* 4504 * Only reclaim space when nothing depends on this journal 4505 * set and another set has written that it is no longer 4506 * valid. 4507 */ 4508 if (jseg->js_refs != 0) { 4509 jblocks->jb_oldestseg = jseg; 4510 return; 4511 } 4512 if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE) 4513 break; 4514 if (jseg->js_seq > jblocks->jb_oldestwrseq) 4515 break; 4516 /* 4517 * We can free jsegs that didn't write entries when 4518 * oldestwrseq == js_seq. 4519 */ 4520 if (jseg->js_seq == jblocks->jb_oldestwrseq && 4521 jseg->js_cnt != 0) 4522 break; 4523 free_jseg(jseg, jblocks); 4524 } 4525 /* 4526 * If we exited the loop above we still must discover the 4527 * oldest valid segment. 4528 */ 4529 if (jseg) 4530 for (jseg = jblocks->jb_oldestseg; jseg != NULL; 4531 jseg = TAILQ_NEXT(jseg, js_next)) 4532 if (jseg->js_refs != 0) 4533 break; 4534 jblocks->jb_oldestseg = jseg; 4535 /* 4536 * The journal has no valid records but some jsegs may still be 4537 * waiting on oldestwrseq to advance. We force a small record 4538 * out to permit these lingering records to be reclaimed. 4539 */ 4540 if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs)) 4541 jblocks->jb_needseg = 1; 4542} 4543 4544/* 4545 * Release one reference to a jseg and free it if the count reaches 0. This 4546 * should eventually reclaim journal space as well. 4547 */ 4548static void 4549rele_jseg(jseg) 4550 struct jseg *jseg; 4551{ 4552 4553 KASSERT(jseg->js_refs > 0, 4554 ("free_jseg: Invalid refcnt %d", jseg->js_refs)); 4555 if (--jseg->js_refs != 0) 4556 return; 4557 free_jsegs(jseg->js_jblocks); 4558} 4559 4560/* 4561 * Release a jsegdep and decrement the jseg count. 4562 */ 4563static void 4564free_jsegdep(jsegdep) 4565 struct jsegdep *jsegdep; 4566{ 4567 4568 if (jsegdep->jd_seg) 4569 rele_jseg(jsegdep->jd_seg); 4570 WORKITEM_FREE(jsegdep, D_JSEGDEP); 4571} 4572 4573/* 4574 * Wait for a journal item to make it to disk. Initiate journal processing 4575 * if required. 4576 */ 4577static int 4578jwait(wk, waitfor) 4579 struct worklist *wk; 4580 int waitfor; 4581{ 4582 4583 LOCK_OWNED(VFSTOUFS(wk->wk_mp)); 4584 /* 4585 * Blocking journal waits cause slow synchronous behavior. Record 4586 * stats on the frequency of these blocking operations. 4587 */ 4588 if (waitfor == MNT_WAIT) { 4589 stat_journal_wait++; 4590 switch (wk->wk_type) { 4591 case D_JREMREF: 4592 case D_JMVREF: 4593 stat_jwait_filepage++; 4594 break; 4595 case D_JTRUNC: 4596 case D_JFREEBLK: 4597 stat_jwait_freeblks++; 4598 break; 4599 case D_JNEWBLK: 4600 stat_jwait_newblk++; 4601 break; 4602 case D_JADDREF: 4603 stat_jwait_inode++; 4604 break; 4605 default: 4606 break; 4607 } 4608 } 4609 /* 4610 * If IO has not started we process the journal. We can't mark the 4611 * worklist item as IOWAITING because we drop the lock while 4612 * processing the journal and the worklist entry may be freed after 4613 * this point. The caller may call back in and re-issue the request. 4614 */ 4615 if ((wk->wk_state & INPROGRESS) == 0) { 4616 softdep_process_journal(wk->wk_mp, wk, waitfor); 4617 if (waitfor != MNT_WAIT) 4618 return (EBUSY); 4619 return (0); 4620 } 4621 if (waitfor != MNT_WAIT) 4622 return (EBUSY); 4623 wait_worklist(wk, "jwait"); 4624 return (0); 4625} 4626 4627/* 4628 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as 4629 * appropriate. This is a convenience function to reduce duplicate code 4630 * for the setup and revert functions below. 4631 */ 4632static struct inodedep * 4633inodedep_lookup_ip(ip) 4634 struct inode *ip; 4635{ 4636 struct inodedep *inodedep; 4637 4638 KASSERT(ip->i_nlink >= ip->i_effnlink, 4639 ("inodedep_lookup_ip: bad delta")); 4640 (void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC, 4641 &inodedep); 4642 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 4643 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 4644 4645 return (inodedep); 4646} 4647 4648/* 4649 * Called prior to creating a new inode and linking it to a directory. The 4650 * jaddref structure must already be allocated by softdep_setup_inomapdep 4651 * and it is discovered here so we can initialize the mode and update 4652 * nlinkdelta. 4653 */ 4654void 4655softdep_setup_create(dp, ip) 4656 struct inode *dp; 4657 struct inode *ip; 4658{ 4659 struct inodedep *inodedep; 4660 struct jaddref *jaddref; 4661 struct vnode *dvp; 4662 4663 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4664 ("softdep_setup_create called on non-softdep filesystem")); 4665 KASSERT(ip->i_nlink == 1, 4666 ("softdep_setup_create: Invalid link count.")); 4667 dvp = ITOV(dp); 4668 ACQUIRE_LOCK(dp->i_ump); 4669 inodedep = inodedep_lookup_ip(ip); 4670 if (DOINGSUJ(dvp)) { 4671 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4672 inoreflst); 4673 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 4674 ("softdep_setup_create: No addref structure present.")); 4675 } 4676 softdep_prelink(dvp, NULL); 4677 FREE_LOCK(dp->i_ump); 4678} 4679 4680/* 4681 * Create a jaddref structure to track the addition of a DOTDOT link when 4682 * we are reparenting an inode as part of a rename. This jaddref will be 4683 * found by softdep_setup_directory_change. Adjusts nlinkdelta for 4684 * non-journaling softdep. 4685 */ 4686void 4687softdep_setup_dotdot_link(dp, ip) 4688 struct inode *dp; 4689 struct inode *ip; 4690{ 4691 struct inodedep *inodedep; 4692 struct jaddref *jaddref; 4693 struct vnode *dvp; 4694 4695 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4696 ("softdep_setup_dotdot_link called on non-softdep filesystem")); 4697 dvp = ITOV(dp); 4698 jaddref = NULL; 4699 /* 4700 * We don't set MKDIR_PARENT as this is not tied to a mkdir and 4701 * is used as a normal link would be. 4702 */ 4703 if (DOINGSUJ(dvp)) 4704 jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4705 dp->i_effnlink - 1, dp->i_mode); 4706 ACQUIRE_LOCK(dp->i_ump); 4707 inodedep = inodedep_lookup_ip(dp); 4708 if (jaddref) 4709 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4710 if_deps); 4711 softdep_prelink(dvp, ITOV(ip)); 4712 FREE_LOCK(dp->i_ump); 4713} 4714 4715/* 4716 * Create a jaddref structure to track a new link to an inode. The directory 4717 * offset is not known until softdep_setup_directory_add or 4718 * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling 4719 * softdep. 4720 */ 4721void 4722softdep_setup_link(dp, ip) 4723 struct inode *dp; 4724 struct inode *ip; 4725{ 4726 struct inodedep *inodedep; 4727 struct jaddref *jaddref; 4728 struct vnode *dvp; 4729 4730 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4731 ("softdep_setup_link called on non-softdep filesystem")); 4732 dvp = ITOV(dp); 4733 jaddref = NULL; 4734 if (DOINGSUJ(dvp)) 4735 jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1, 4736 ip->i_mode); 4737 ACQUIRE_LOCK(dp->i_ump); 4738 inodedep = inodedep_lookup_ip(ip); 4739 if (jaddref) 4740 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4741 if_deps); 4742 softdep_prelink(dvp, ITOV(ip)); 4743 FREE_LOCK(dp->i_ump); 4744} 4745 4746/* 4747 * Called to create the jaddref structures to track . and .. references as 4748 * well as lookup and further initialize the incomplete jaddref created 4749 * by softdep_setup_inomapdep when the inode was allocated. Adjusts 4750 * nlinkdelta for non-journaling softdep. 4751 */ 4752void 4753softdep_setup_mkdir(dp, ip) 4754 struct inode *dp; 4755 struct inode *ip; 4756{ 4757 struct inodedep *inodedep; 4758 struct jaddref *dotdotaddref; 4759 struct jaddref *dotaddref; 4760 struct jaddref *jaddref; 4761 struct vnode *dvp; 4762 4763 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4764 ("softdep_setup_mkdir called on non-softdep filesystem")); 4765 dvp = ITOV(dp); 4766 dotaddref = dotdotaddref = NULL; 4767 if (DOINGSUJ(dvp)) { 4768 dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1, 4769 ip->i_mode); 4770 dotaddref->ja_state |= MKDIR_BODY; 4771 dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4772 dp->i_effnlink - 1, dp->i_mode); 4773 dotdotaddref->ja_state |= MKDIR_PARENT; 4774 } 4775 ACQUIRE_LOCK(dp->i_ump); 4776 inodedep = inodedep_lookup_ip(ip); 4777 if (DOINGSUJ(dvp)) { 4778 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4779 inoreflst); 4780 KASSERT(jaddref != NULL, 4781 ("softdep_setup_mkdir: No addref structure present.")); 4782 KASSERT(jaddref->ja_parent == dp->i_number, 4783 ("softdep_setup_mkdir: bad parent %ju", 4784 (uintmax_t)jaddref->ja_parent)); 4785 TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref, 4786 if_deps); 4787 } 4788 inodedep = inodedep_lookup_ip(dp); 4789 if (DOINGSUJ(dvp)) 4790 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, 4791 &dotdotaddref->ja_ref, if_deps); 4792 softdep_prelink(ITOV(dp), NULL); 4793 FREE_LOCK(dp->i_ump); 4794} 4795 4796/* 4797 * Called to track nlinkdelta of the inode and parent directories prior to 4798 * unlinking a directory. 4799 */ 4800void 4801softdep_setup_rmdir(dp, ip) 4802 struct inode *dp; 4803 struct inode *ip; 4804{ 4805 struct vnode *dvp; 4806 4807 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4808 ("softdep_setup_rmdir called on non-softdep filesystem")); 4809 dvp = ITOV(dp); 4810 ACQUIRE_LOCK(dp->i_ump); 4811 (void) inodedep_lookup_ip(ip); 4812 (void) inodedep_lookup_ip(dp); 4813 softdep_prelink(dvp, ITOV(ip)); 4814 FREE_LOCK(dp->i_ump); 4815} 4816 4817/* 4818 * Called to track nlinkdelta of the inode and parent directories prior to 4819 * unlink. 4820 */ 4821void 4822softdep_setup_unlink(dp, ip) 4823 struct inode *dp; 4824 struct inode *ip; 4825{ 4826 struct vnode *dvp; 4827 4828 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4829 ("softdep_setup_unlink called on non-softdep filesystem")); 4830 dvp = ITOV(dp); 4831 ACQUIRE_LOCK(dp->i_ump); 4832 (void) inodedep_lookup_ip(ip); 4833 (void) inodedep_lookup_ip(dp); 4834 softdep_prelink(dvp, ITOV(ip)); 4835 FREE_LOCK(dp->i_ump); 4836} 4837 4838/* 4839 * Called to release the journal structures created by a failed non-directory 4840 * creation. Adjusts nlinkdelta for non-journaling softdep. 4841 */ 4842void 4843softdep_revert_create(dp, ip) 4844 struct inode *dp; 4845 struct inode *ip; 4846{ 4847 struct inodedep *inodedep; 4848 struct jaddref *jaddref; 4849 struct vnode *dvp; 4850 4851 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4852 ("softdep_revert_create called on non-softdep filesystem")); 4853 dvp = ITOV(dp); 4854 ACQUIRE_LOCK(dp->i_ump); 4855 inodedep = inodedep_lookup_ip(ip); 4856 if (DOINGSUJ(dvp)) { 4857 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4858 inoreflst); 4859 KASSERT(jaddref->ja_parent == dp->i_number, 4860 ("softdep_revert_create: addref parent mismatch")); 4861 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4862 } 4863 FREE_LOCK(dp->i_ump); 4864} 4865 4866/* 4867 * Called to release the journal structures created by a failed link 4868 * addition. Adjusts nlinkdelta for non-journaling softdep. 4869 */ 4870void 4871softdep_revert_link(dp, ip) 4872 struct inode *dp; 4873 struct inode *ip; 4874{ 4875 struct inodedep *inodedep; 4876 struct jaddref *jaddref; 4877 struct vnode *dvp; 4878 4879 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4880 ("softdep_revert_link called on non-softdep filesystem")); 4881 dvp = ITOV(dp); 4882 ACQUIRE_LOCK(dp->i_ump); 4883 inodedep = inodedep_lookup_ip(ip); 4884 if (DOINGSUJ(dvp)) { 4885 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4886 inoreflst); 4887 KASSERT(jaddref->ja_parent == dp->i_number, 4888 ("softdep_revert_link: addref parent mismatch")); 4889 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4890 } 4891 FREE_LOCK(dp->i_ump); 4892} 4893 4894/* 4895 * Called to release the journal structures created by a failed mkdir 4896 * attempt. Adjusts nlinkdelta for non-journaling softdep. 4897 */ 4898void 4899softdep_revert_mkdir(dp, ip) 4900 struct inode *dp; 4901 struct inode *ip; 4902{ 4903 struct inodedep *inodedep; 4904 struct jaddref *jaddref; 4905 struct jaddref *dotaddref; 4906 struct vnode *dvp; 4907 4908 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4909 ("softdep_revert_mkdir called on non-softdep filesystem")); 4910 dvp = ITOV(dp); 4911 4912 ACQUIRE_LOCK(dp->i_ump); 4913 inodedep = inodedep_lookup_ip(dp); 4914 if (DOINGSUJ(dvp)) { 4915 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4916 inoreflst); 4917 KASSERT(jaddref->ja_parent == ip->i_number, 4918 ("softdep_revert_mkdir: dotdot addref parent mismatch")); 4919 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4920 } 4921 inodedep = inodedep_lookup_ip(ip); 4922 if (DOINGSUJ(dvp)) { 4923 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4924 inoreflst); 4925 KASSERT(jaddref->ja_parent == dp->i_number, 4926 ("softdep_revert_mkdir: addref parent mismatch")); 4927 dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 4928 inoreflst, if_deps); 4929 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4930 KASSERT(dotaddref->ja_parent == ip->i_number, 4931 ("softdep_revert_mkdir: dot addref parent mismatch")); 4932 cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait); 4933 } 4934 FREE_LOCK(dp->i_ump); 4935} 4936 4937/* 4938 * Called to correct nlinkdelta after a failed rmdir. 4939 */ 4940void 4941softdep_revert_rmdir(dp, ip) 4942 struct inode *dp; 4943 struct inode *ip; 4944{ 4945 4946 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4947 ("softdep_revert_rmdir called on non-softdep filesystem")); 4948 ACQUIRE_LOCK(dp->i_ump); 4949 (void) inodedep_lookup_ip(ip); 4950 (void) inodedep_lookup_ip(dp); 4951 FREE_LOCK(dp->i_ump); 4952} 4953 4954/* 4955 * Protecting the freemaps (or bitmaps). 4956 * 4957 * To eliminate the need to execute fsck before mounting a filesystem 4958 * after a power failure, one must (conservatively) guarantee that the 4959 * on-disk copy of the bitmaps never indicate that a live inode or block is 4960 * free. So, when a block or inode is allocated, the bitmap should be 4961 * updated (on disk) before any new pointers. When a block or inode is 4962 * freed, the bitmap should not be updated until all pointers have been 4963 * reset. The latter dependency is handled by the delayed de-allocation 4964 * approach described below for block and inode de-allocation. The former 4965 * dependency is handled by calling the following procedure when a block or 4966 * inode is allocated. When an inode is allocated an "inodedep" is created 4967 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 4968 * Each "inodedep" is also inserted into the hash indexing structure so 4969 * that any additional link additions can be made dependent on the inode 4970 * allocation. 4971 * 4972 * The ufs filesystem maintains a number of free block counts (e.g., per 4973 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 4974 * in addition to the bitmaps. These counts are used to improve efficiency 4975 * during allocation and therefore must be consistent with the bitmaps. 4976 * There is no convenient way to guarantee post-crash consistency of these 4977 * counts with simple update ordering, for two main reasons: (1) The counts 4978 * and bitmaps for a single cylinder group block are not in the same disk 4979 * sector. If a disk write is interrupted (e.g., by power failure), one may 4980 * be written and the other not. (2) Some of the counts are located in the 4981 * superblock rather than the cylinder group block. So, we focus our soft 4982 * updates implementation on protecting the bitmaps. When mounting a 4983 * filesystem, we recompute the auxiliary counts from the bitmaps. 4984 */ 4985 4986/* 4987 * Called just after updating the cylinder group block to allocate an inode. 4988 */ 4989void 4990softdep_setup_inomapdep(bp, ip, newinum, mode) 4991 struct buf *bp; /* buffer for cylgroup block with inode map */ 4992 struct inode *ip; /* inode related to allocation */ 4993 ino_t newinum; /* new inode number being allocated */ 4994 int mode; 4995{ 4996 struct inodedep *inodedep; 4997 struct bmsafemap *bmsafemap; 4998 struct jaddref *jaddref; 4999 struct mount *mp; 5000 struct fs *fs; 5001 5002 mp = UFSTOVFS(ip->i_ump); 5003 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5004 ("softdep_setup_inomapdep called on non-softdep filesystem")); 5005 fs = ip->i_ump->um_fs; 5006 jaddref = NULL; 5007 5008 /* 5009 * Allocate the journal reference add structure so that the bitmap 5010 * can be dependent on it. 5011 */ 5012 if (MOUNTEDSUJ(mp)) { 5013 jaddref = newjaddref(ip, newinum, 0, 0, mode); 5014 jaddref->ja_state |= NEWBLOCK; 5015 } 5016 5017 /* 5018 * Create a dependency for the newly allocated inode. 5019 * Panic if it already exists as something is seriously wrong. 5020 * Otherwise add it to the dependency list for the buffer holding 5021 * the cylinder group map from which it was allocated. 5022 * 5023 * We have to preallocate a bmsafemap entry in case it is needed 5024 * in bmsafemap_lookup since once we allocate the inodedep, we 5025 * have to finish initializing it before we can FREE_LOCK(). 5026 * By preallocating, we avoid FREE_LOCK() while doing a malloc 5027 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before 5028 * creating the inodedep as it can be freed during the time 5029 * that we FREE_LOCK() while allocating the inodedep. We must 5030 * call workitem_alloc() before entering the locked section as 5031 * it also acquires the lock and we must avoid trying doing so 5032 * recursively. 5033 */ 5034 bmsafemap = malloc(sizeof(struct bmsafemap), 5035 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 5036 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 5037 ACQUIRE_LOCK(ip->i_ump); 5038 if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep))) 5039 panic("softdep_setup_inomapdep: dependency %p for new" 5040 "inode already exists", inodedep); 5041 bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap); 5042 if (jaddref) { 5043 LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps); 5044 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 5045 if_deps); 5046 } else { 5047 inodedep->id_state |= ONDEPLIST; 5048 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 5049 } 5050 inodedep->id_bmsafemap = bmsafemap; 5051 inodedep->id_state &= ~DEPCOMPLETE; 5052 FREE_LOCK(ip->i_ump); 5053} 5054 5055/* 5056 * Called just after updating the cylinder group block to 5057 * allocate block or fragment. 5058 */ 5059void 5060softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 5061 struct buf *bp; /* buffer for cylgroup block with block map */ 5062 struct mount *mp; /* filesystem doing allocation */ 5063 ufs2_daddr_t newblkno; /* number of newly allocated block */ 5064 int frags; /* Number of fragments. */ 5065 int oldfrags; /* Previous number of fragments for extend. */ 5066{ 5067 struct newblk *newblk; 5068 struct bmsafemap *bmsafemap; 5069 struct jnewblk *jnewblk; 5070 struct ufsmount *ump; 5071 struct fs *fs; 5072 5073 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5074 ("softdep_setup_blkmapdep called on non-softdep filesystem")); 5075 ump = VFSTOUFS(mp); 5076 fs = ump->um_fs; 5077 jnewblk = NULL; 5078 /* 5079 * Create a dependency for the newly allocated block. 5080 * Add it to the dependency list for the buffer holding 5081 * the cylinder group map from which it was allocated. 5082 */ 5083 if (MOUNTEDSUJ(mp)) { 5084 jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS); 5085 workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp); 5086 jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list); 5087 jnewblk->jn_state = ATTACHED; 5088 jnewblk->jn_blkno = newblkno; 5089 jnewblk->jn_frags = frags; 5090 jnewblk->jn_oldfrags = oldfrags; 5091#ifdef SUJ_DEBUG 5092 { 5093 struct cg *cgp; 5094 uint8_t *blksfree; 5095 long bno; 5096 int i; 5097 5098 cgp = (struct cg *)bp->b_data; 5099 blksfree = cg_blksfree(cgp); 5100 bno = dtogd(fs, jnewblk->jn_blkno); 5101 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; 5102 i++) { 5103 if (isset(blksfree, bno + i)) 5104 panic("softdep_setup_blkmapdep: " 5105 "free fragment %d from %d-%d " 5106 "state 0x%X dep %p", i, 5107 jnewblk->jn_oldfrags, 5108 jnewblk->jn_frags, 5109 jnewblk->jn_state, 5110 jnewblk->jn_dep); 5111 } 5112 } 5113#endif 5114 } 5115 5116 CTR3(KTR_SUJ, 5117 "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d", 5118 newblkno, frags, oldfrags); 5119 ACQUIRE_LOCK(ump); 5120 if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0) 5121 panic("softdep_setup_blkmapdep: found block"); 5122 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp, 5123 dtog(fs, newblkno), NULL); 5124 if (jnewblk) { 5125 jnewblk->jn_dep = (struct worklist *)newblk; 5126 LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps); 5127 } else { 5128 newblk->nb_state |= ONDEPLIST; 5129 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 5130 } 5131 newblk->nb_bmsafemap = bmsafemap; 5132 newblk->nb_jnewblk = jnewblk; 5133 FREE_LOCK(ump); 5134} 5135 5136#define BMSAFEMAP_HASH(ump, cg) \ 5137 (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size]) 5138 5139static int 5140bmsafemap_find(bmsafemaphd, cg, bmsafemapp) 5141 struct bmsafemap_hashhead *bmsafemaphd; 5142 int cg; 5143 struct bmsafemap **bmsafemapp; 5144{ 5145 struct bmsafemap *bmsafemap; 5146 5147 LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash) 5148 if (bmsafemap->sm_cg == cg) 5149 break; 5150 if (bmsafemap) { 5151 *bmsafemapp = bmsafemap; 5152 return (1); 5153 } 5154 *bmsafemapp = NULL; 5155 5156 return (0); 5157} 5158 5159/* 5160 * Find the bmsafemap associated with a cylinder group buffer. 5161 * If none exists, create one. The buffer must be locked when 5162 * this routine is called and this routine must be called with 5163 * the softdep lock held. To avoid giving up the lock while 5164 * allocating a new bmsafemap, a preallocated bmsafemap may be 5165 * provided. If it is provided but not needed, it is freed. 5166 */ 5167static struct bmsafemap * 5168bmsafemap_lookup(mp, bp, cg, newbmsafemap) 5169 struct mount *mp; 5170 struct buf *bp; 5171 int cg; 5172 struct bmsafemap *newbmsafemap; 5173{ 5174 struct bmsafemap_hashhead *bmsafemaphd; 5175 struct bmsafemap *bmsafemap, *collision; 5176 struct worklist *wk; 5177 struct ufsmount *ump; 5178 5179 ump = VFSTOUFS(mp); 5180 LOCK_OWNED(ump); 5181 KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer")); 5182 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5183 if (wk->wk_type == D_BMSAFEMAP) { 5184 if (newbmsafemap) 5185 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5186 return (WK_BMSAFEMAP(wk)); 5187 } 5188 } 5189 bmsafemaphd = BMSAFEMAP_HASH(ump, cg); 5190 if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) { 5191 if (newbmsafemap) 5192 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5193 return (bmsafemap); 5194 } 5195 if (newbmsafemap) { 5196 bmsafemap = newbmsafemap; 5197 } else { 5198 FREE_LOCK(ump); 5199 bmsafemap = malloc(sizeof(struct bmsafemap), 5200 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 5201 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 5202 ACQUIRE_LOCK(ump); 5203 } 5204 bmsafemap->sm_buf = bp; 5205 LIST_INIT(&bmsafemap->sm_inodedephd); 5206 LIST_INIT(&bmsafemap->sm_inodedepwr); 5207 LIST_INIT(&bmsafemap->sm_newblkhd); 5208 LIST_INIT(&bmsafemap->sm_newblkwr); 5209 LIST_INIT(&bmsafemap->sm_jaddrefhd); 5210 LIST_INIT(&bmsafemap->sm_jnewblkhd); 5211 LIST_INIT(&bmsafemap->sm_freehd); 5212 LIST_INIT(&bmsafemap->sm_freewr); 5213 if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) { 5214 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 5215 return (collision); 5216 } 5217 bmsafemap->sm_cg = cg; 5218 LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash); 5219 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 5220 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 5221 return (bmsafemap); 5222} 5223 5224/* 5225 * Direct block allocation dependencies. 5226 * 5227 * When a new block is allocated, the corresponding disk locations must be 5228 * initialized (with zeros or new data) before the on-disk inode points to 5229 * them. Also, the freemap from which the block was allocated must be 5230 * updated (on disk) before the inode's pointer. These two dependencies are 5231 * independent of each other and are needed for all file blocks and indirect 5232 * blocks that are pointed to directly by the inode. Just before the 5233 * "in-core" version of the inode is updated with a newly allocated block 5234 * number, a procedure (below) is called to setup allocation dependency 5235 * structures. These structures are removed when the corresponding 5236 * dependencies are satisfied or when the block allocation becomes obsolete 5237 * (i.e., the file is deleted, the block is de-allocated, or the block is a 5238 * fragment that gets upgraded). All of these cases are handled in 5239 * procedures described later. 5240 * 5241 * When a file extension causes a fragment to be upgraded, either to a larger 5242 * fragment or to a full block, the on-disk location may change (if the 5243 * previous fragment could not simply be extended). In this case, the old 5244 * fragment must be de-allocated, but not until after the inode's pointer has 5245 * been updated. In most cases, this is handled by later procedures, which 5246 * will construct a "freefrag" structure to be added to the workitem queue 5247 * when the inode update is complete (or obsolete). The main exception to 5248 * this is when an allocation occurs while a pending allocation dependency 5249 * (for the same block pointer) remains. This case is handled in the main 5250 * allocation dependency setup procedure by immediately freeing the 5251 * unreferenced fragments. 5252 */ 5253void 5254softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5255 struct inode *ip; /* inode to which block is being added */ 5256 ufs_lbn_t off; /* block pointer within inode */ 5257 ufs2_daddr_t newblkno; /* disk block number being added */ 5258 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 5259 long newsize; /* size of new block */ 5260 long oldsize; /* size of new block */ 5261 struct buf *bp; /* bp for allocated block */ 5262{ 5263 struct allocdirect *adp, *oldadp; 5264 struct allocdirectlst *adphead; 5265 struct freefrag *freefrag; 5266 struct inodedep *inodedep; 5267 struct pagedep *pagedep; 5268 struct jnewblk *jnewblk; 5269 struct newblk *newblk; 5270 struct mount *mp; 5271 ufs_lbn_t lbn; 5272 5273 lbn = bp->b_lblkno; 5274 mp = UFSTOVFS(ip->i_ump); 5275 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5276 ("softdep_setup_allocdirect called on non-softdep filesystem")); 5277 if (oldblkno && oldblkno != newblkno) 5278 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5279 else 5280 freefrag = NULL; 5281 5282 CTR6(KTR_SUJ, 5283 "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd " 5284 "off %jd newsize %ld oldsize %d", 5285 ip->i_number, newblkno, oldblkno, off, newsize, oldsize); 5286 ACQUIRE_LOCK(ip->i_ump); 5287 if (off >= NDADDR) { 5288 if (lbn > 0) 5289 panic("softdep_setup_allocdirect: bad lbn %jd, off %jd", 5290 lbn, off); 5291 /* allocating an indirect block */ 5292 if (oldblkno != 0) 5293 panic("softdep_setup_allocdirect: non-zero indir"); 5294 } else { 5295 if (off != lbn) 5296 panic("softdep_setup_allocdirect: lbn %jd != off %jd", 5297 lbn, off); 5298 /* 5299 * Allocating a direct block. 5300 * 5301 * If we are allocating a directory block, then we must 5302 * allocate an associated pagedep to track additions and 5303 * deletions. 5304 */ 5305 if ((ip->i_mode & IFMT) == IFDIR) 5306 pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC, 5307 &pagedep); 5308 } 5309 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5310 panic("softdep_setup_allocdirect: lost block"); 5311 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5312 ("softdep_setup_allocdirect: newblk already initialized")); 5313 /* 5314 * Convert the newblk to an allocdirect. 5315 */ 5316 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5317 adp = (struct allocdirect *)newblk; 5318 newblk->nb_freefrag = freefrag; 5319 adp->ad_offset = off; 5320 adp->ad_oldblkno = oldblkno; 5321 adp->ad_newsize = newsize; 5322 adp->ad_oldsize = oldsize; 5323 5324 /* 5325 * Finish initializing the journal. 5326 */ 5327 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5328 jnewblk->jn_ino = ip->i_number; 5329 jnewblk->jn_lbn = lbn; 5330 add_to_journal(&jnewblk->jn_list); 5331 } 5332 if (freefrag && freefrag->ff_jdep != NULL && 5333 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5334 add_to_journal(freefrag->ff_jdep); 5335 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 5336 adp->ad_inodedep = inodedep; 5337 5338 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5339 /* 5340 * The list of allocdirects must be kept in sorted and ascending 5341 * order so that the rollback routines can quickly determine the 5342 * first uncommitted block (the size of the file stored on disk 5343 * ends at the end of the lowest committed fragment, or if there 5344 * are no fragments, at the end of the highest committed block). 5345 * Since files generally grow, the typical case is that the new 5346 * block is to be added at the end of the list. We speed this 5347 * special case by checking against the last allocdirect in the 5348 * list before laboriously traversing the list looking for the 5349 * insertion point. 5350 */ 5351 adphead = &inodedep->id_newinoupdt; 5352 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5353 if (oldadp == NULL || oldadp->ad_offset <= off) { 5354 /* insert at end of list */ 5355 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5356 if (oldadp != NULL && oldadp->ad_offset == off) 5357 allocdirect_merge(adphead, adp, oldadp); 5358 FREE_LOCK(ip->i_ump); 5359 return; 5360 } 5361 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5362 if (oldadp->ad_offset >= off) 5363 break; 5364 } 5365 if (oldadp == NULL) 5366 panic("softdep_setup_allocdirect: lost entry"); 5367 /* insert in middle of list */ 5368 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5369 if (oldadp->ad_offset == off) 5370 allocdirect_merge(adphead, adp, oldadp); 5371 5372 FREE_LOCK(ip->i_ump); 5373} 5374 5375/* 5376 * Merge a newer and older journal record to be stored either in a 5377 * newblock or freefrag. This handles aggregating journal records for 5378 * fragment allocation into a second record as well as replacing a 5379 * journal free with an aborted journal allocation. A segment for the 5380 * oldest record will be placed on wkhd if it has been written. If not 5381 * the segment for the newer record will suffice. 5382 */ 5383static struct worklist * 5384jnewblk_merge(new, old, wkhd) 5385 struct worklist *new; 5386 struct worklist *old; 5387 struct workhead *wkhd; 5388{ 5389 struct jnewblk *njnewblk; 5390 struct jnewblk *jnewblk; 5391 5392 /* Handle NULLs to simplify callers. */ 5393 if (new == NULL) 5394 return (old); 5395 if (old == NULL) 5396 return (new); 5397 /* Replace a jfreefrag with a jnewblk. */ 5398 if (new->wk_type == D_JFREEFRAG) { 5399 if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno) 5400 panic("jnewblk_merge: blkno mismatch: %p, %p", 5401 old, new); 5402 cancel_jfreefrag(WK_JFREEFRAG(new)); 5403 return (old); 5404 } 5405 if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK) 5406 panic("jnewblk_merge: Bad type: old %d new %d\n", 5407 old->wk_type, new->wk_type); 5408 /* 5409 * Handle merging of two jnewblk records that describe 5410 * different sets of fragments in the same block. 5411 */ 5412 jnewblk = WK_JNEWBLK(old); 5413 njnewblk = WK_JNEWBLK(new); 5414 if (jnewblk->jn_blkno != njnewblk->jn_blkno) 5415 panic("jnewblk_merge: Merging disparate blocks."); 5416 /* 5417 * The record may be rolled back in the cg. 5418 */ 5419 if (jnewblk->jn_state & UNDONE) { 5420 jnewblk->jn_state &= ~UNDONE; 5421 njnewblk->jn_state |= UNDONE; 5422 njnewblk->jn_state &= ~ATTACHED; 5423 } 5424 /* 5425 * We modify the newer addref and free the older so that if neither 5426 * has been written the most up-to-date copy will be on disk. If 5427 * both have been written but rolled back we only temporarily need 5428 * one of them to fix the bits when the cg write completes. 5429 */ 5430 jnewblk->jn_state |= ATTACHED | COMPLETE; 5431 njnewblk->jn_oldfrags = jnewblk->jn_oldfrags; 5432 cancel_jnewblk(jnewblk, wkhd); 5433 WORKLIST_REMOVE(&jnewblk->jn_list); 5434 free_jnewblk(jnewblk); 5435 return (new); 5436} 5437 5438/* 5439 * Replace an old allocdirect dependency with a newer one. 5440 * This routine must be called with splbio interrupts blocked. 5441 */ 5442static void 5443allocdirect_merge(adphead, newadp, oldadp) 5444 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 5445 struct allocdirect *newadp; /* allocdirect being added */ 5446 struct allocdirect *oldadp; /* existing allocdirect being checked */ 5447{ 5448 struct worklist *wk; 5449 struct freefrag *freefrag; 5450 5451 freefrag = NULL; 5452 LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp)); 5453 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 5454 newadp->ad_oldsize != oldadp->ad_newsize || 5455 newadp->ad_offset >= NDADDR) 5456 panic("%s %jd != new %jd || old size %ld != new %ld", 5457 "allocdirect_merge: old blkno", 5458 (intmax_t)newadp->ad_oldblkno, 5459 (intmax_t)oldadp->ad_newblkno, 5460 newadp->ad_oldsize, oldadp->ad_newsize); 5461 newadp->ad_oldblkno = oldadp->ad_oldblkno; 5462 newadp->ad_oldsize = oldadp->ad_oldsize; 5463 /* 5464 * If the old dependency had a fragment to free or had never 5465 * previously had a block allocated, then the new dependency 5466 * can immediately post its freefrag and adopt the old freefrag. 5467 * This action is done by swapping the freefrag dependencies. 5468 * The new dependency gains the old one's freefrag, and the 5469 * old one gets the new one and then immediately puts it on 5470 * the worklist when it is freed by free_newblk. It is 5471 * not possible to do this swap when the old dependency had a 5472 * non-zero size but no previous fragment to free. This condition 5473 * arises when the new block is an extension of the old block. 5474 * Here, the first part of the fragment allocated to the new 5475 * dependency is part of the block currently claimed on disk by 5476 * the old dependency, so cannot legitimately be freed until the 5477 * conditions for the new dependency are fulfilled. 5478 */ 5479 freefrag = newadp->ad_freefrag; 5480 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 5481 newadp->ad_freefrag = oldadp->ad_freefrag; 5482 oldadp->ad_freefrag = freefrag; 5483 } 5484 /* 5485 * If we are tracking a new directory-block allocation, 5486 * move it from the old allocdirect to the new allocdirect. 5487 */ 5488 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 5489 WORKLIST_REMOVE(wk); 5490 if (!LIST_EMPTY(&oldadp->ad_newdirblk)) 5491 panic("allocdirect_merge: extra newdirblk"); 5492 WORKLIST_INSERT(&newadp->ad_newdirblk, wk); 5493 } 5494 TAILQ_REMOVE(adphead, oldadp, ad_next); 5495 /* 5496 * We need to move any journal dependencies over to the freefrag 5497 * that releases this block if it exists. Otherwise we are 5498 * extending an existing block and we'll wait until that is 5499 * complete to release the journal space and extend the 5500 * new journal to cover this old space as well. 5501 */ 5502 if (freefrag == NULL) { 5503 if (oldadp->ad_newblkno != newadp->ad_newblkno) 5504 panic("allocdirect_merge: %jd != %jd", 5505 oldadp->ad_newblkno, newadp->ad_newblkno); 5506 newadp->ad_block.nb_jnewblk = (struct jnewblk *) 5507 jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list, 5508 &oldadp->ad_block.nb_jnewblk->jn_list, 5509 &newadp->ad_block.nb_jwork); 5510 oldadp->ad_block.nb_jnewblk = NULL; 5511 cancel_newblk(&oldadp->ad_block, NULL, 5512 &newadp->ad_block.nb_jwork); 5513 } else { 5514 wk = (struct worklist *) cancel_newblk(&oldadp->ad_block, 5515 &freefrag->ff_list, &freefrag->ff_jwork); 5516 freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk, 5517 &freefrag->ff_jwork); 5518 } 5519 free_newblk(&oldadp->ad_block); 5520} 5521 5522/* 5523 * Allocate a jfreefrag structure to journal a single block free. 5524 */ 5525static struct jfreefrag * 5526newjfreefrag(freefrag, ip, blkno, size, lbn) 5527 struct freefrag *freefrag; 5528 struct inode *ip; 5529 ufs2_daddr_t blkno; 5530 long size; 5531 ufs_lbn_t lbn; 5532{ 5533 struct jfreefrag *jfreefrag; 5534 struct fs *fs; 5535 5536 fs = ip->i_fs; 5537 jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG, 5538 M_SOFTDEP_FLAGS); 5539 workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump)); 5540 jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list); 5541 jfreefrag->fr_state = ATTACHED | DEPCOMPLETE; 5542 jfreefrag->fr_ino = ip->i_number; 5543 jfreefrag->fr_lbn = lbn; 5544 jfreefrag->fr_blkno = blkno; 5545 jfreefrag->fr_frags = numfrags(fs, size); 5546 jfreefrag->fr_freefrag = freefrag; 5547 5548 return (jfreefrag); 5549} 5550 5551/* 5552 * Allocate a new freefrag structure. 5553 */ 5554static struct freefrag * 5555newfreefrag(ip, blkno, size, lbn) 5556 struct inode *ip; 5557 ufs2_daddr_t blkno; 5558 long size; 5559 ufs_lbn_t lbn; 5560{ 5561 struct freefrag *freefrag; 5562 struct fs *fs; 5563 5564 CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd", 5565 ip->i_number, blkno, size, lbn); 5566 fs = ip->i_fs; 5567 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 5568 panic("newfreefrag: frag size"); 5569 freefrag = malloc(sizeof(struct freefrag), 5570 M_FREEFRAG, M_SOFTDEP_FLAGS); 5571 workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump)); 5572 freefrag->ff_state = ATTACHED; 5573 LIST_INIT(&freefrag->ff_jwork); 5574 freefrag->ff_inum = ip->i_number; 5575 freefrag->ff_vtype = ITOV(ip)->v_type; 5576 freefrag->ff_blkno = blkno; 5577 freefrag->ff_fragsize = size; 5578 5579 if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) { 5580 freefrag->ff_jdep = (struct worklist *) 5581 newjfreefrag(freefrag, ip, blkno, size, lbn); 5582 } else { 5583 freefrag->ff_state |= DEPCOMPLETE; 5584 freefrag->ff_jdep = NULL; 5585 } 5586 5587 return (freefrag); 5588} 5589 5590/* 5591 * This workitem de-allocates fragments that were replaced during 5592 * file block allocation. 5593 */ 5594static void 5595handle_workitem_freefrag(freefrag) 5596 struct freefrag *freefrag; 5597{ 5598 struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp); 5599 struct workhead wkhd; 5600 5601 CTR3(KTR_SUJ, 5602 "handle_workitem_freefrag: ino %d blkno %jd size %ld", 5603 freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize); 5604 /* 5605 * It would be illegal to add new completion items to the 5606 * freefrag after it was schedule to be done so it must be 5607 * safe to modify the list head here. 5608 */ 5609 LIST_INIT(&wkhd); 5610 ACQUIRE_LOCK(ump); 5611 LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list); 5612 /* 5613 * If the journal has not been written we must cancel it here. 5614 */ 5615 if (freefrag->ff_jdep) { 5616 if (freefrag->ff_jdep->wk_type != D_JNEWBLK) 5617 panic("handle_workitem_freefrag: Unexpected type %d\n", 5618 freefrag->ff_jdep->wk_type); 5619 cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd); 5620 } 5621 FREE_LOCK(ump); 5622 ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 5623 freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd); 5624 ACQUIRE_LOCK(ump); 5625 WORKITEM_FREE(freefrag, D_FREEFRAG); 5626 FREE_LOCK(ump); 5627} 5628 5629/* 5630 * Set up a dependency structure for an external attributes data block. 5631 * This routine follows much of the structure of softdep_setup_allocdirect. 5632 * See the description of softdep_setup_allocdirect above for details. 5633 */ 5634void 5635softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5636 struct inode *ip; 5637 ufs_lbn_t off; 5638 ufs2_daddr_t newblkno; 5639 ufs2_daddr_t oldblkno; 5640 long newsize; 5641 long oldsize; 5642 struct buf *bp; 5643{ 5644 struct allocdirect *adp, *oldadp; 5645 struct allocdirectlst *adphead; 5646 struct freefrag *freefrag; 5647 struct inodedep *inodedep; 5648 struct jnewblk *jnewblk; 5649 struct newblk *newblk; 5650 struct mount *mp; 5651 ufs_lbn_t lbn; 5652 5653 mp = UFSTOVFS(ip->i_ump); 5654 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5655 ("softdep_setup_allocext called on non-softdep filesystem")); 5656 KASSERT(off < NXADDR, ("softdep_setup_allocext: lbn %lld > NXADDR", 5657 (long long)off)); 5658 5659 lbn = bp->b_lblkno; 5660 if (oldblkno && oldblkno != newblkno) 5661 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5662 else 5663 freefrag = NULL; 5664 5665 ACQUIRE_LOCK(ip->i_ump); 5666 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5667 panic("softdep_setup_allocext: lost block"); 5668 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5669 ("softdep_setup_allocext: newblk already initialized")); 5670 /* 5671 * Convert the newblk to an allocdirect. 5672 */ 5673 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5674 adp = (struct allocdirect *)newblk; 5675 newblk->nb_freefrag = freefrag; 5676 adp->ad_offset = off; 5677 adp->ad_oldblkno = oldblkno; 5678 adp->ad_newsize = newsize; 5679 adp->ad_oldsize = oldsize; 5680 adp->ad_state |= EXTDATA; 5681 5682 /* 5683 * Finish initializing the journal. 5684 */ 5685 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5686 jnewblk->jn_ino = ip->i_number; 5687 jnewblk->jn_lbn = lbn; 5688 add_to_journal(&jnewblk->jn_list); 5689 } 5690 if (freefrag && freefrag->ff_jdep != NULL && 5691 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5692 add_to_journal(freefrag->ff_jdep); 5693 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 5694 adp->ad_inodedep = inodedep; 5695 5696 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5697 /* 5698 * The list of allocdirects must be kept in sorted and ascending 5699 * order so that the rollback routines can quickly determine the 5700 * first uncommitted block (the size of the file stored on disk 5701 * ends at the end of the lowest committed fragment, or if there 5702 * are no fragments, at the end of the highest committed block). 5703 * Since files generally grow, the typical case is that the new 5704 * block is to be added at the end of the list. We speed this 5705 * special case by checking against the last allocdirect in the 5706 * list before laboriously traversing the list looking for the 5707 * insertion point. 5708 */ 5709 adphead = &inodedep->id_newextupdt; 5710 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5711 if (oldadp == NULL || oldadp->ad_offset <= off) { 5712 /* insert at end of list */ 5713 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5714 if (oldadp != NULL && oldadp->ad_offset == off) 5715 allocdirect_merge(adphead, adp, oldadp); 5716 FREE_LOCK(ip->i_ump); 5717 return; 5718 } 5719 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5720 if (oldadp->ad_offset >= off) 5721 break; 5722 } 5723 if (oldadp == NULL) 5724 panic("softdep_setup_allocext: lost entry"); 5725 /* insert in middle of list */ 5726 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5727 if (oldadp->ad_offset == off) 5728 allocdirect_merge(adphead, adp, oldadp); 5729 FREE_LOCK(ip->i_ump); 5730} 5731 5732/* 5733 * Indirect block allocation dependencies. 5734 * 5735 * The same dependencies that exist for a direct block also exist when 5736 * a new block is allocated and pointed to by an entry in a block of 5737 * indirect pointers. The undo/redo states described above are also 5738 * used here. Because an indirect block contains many pointers that 5739 * may have dependencies, a second copy of the entire in-memory indirect 5740 * block is kept. The buffer cache copy is always completely up-to-date. 5741 * The second copy, which is used only as a source for disk writes, 5742 * contains only the safe pointers (i.e., those that have no remaining 5743 * update dependencies). The second copy is freed when all pointers 5744 * are safe. The cache is not allowed to replace indirect blocks with 5745 * pending update dependencies. If a buffer containing an indirect 5746 * block with dependencies is written, these routines will mark it 5747 * dirty again. It can only be successfully written once all the 5748 * dependencies are removed. The ffs_fsync routine in conjunction with 5749 * softdep_sync_metadata work together to get all the dependencies 5750 * removed so that a file can be successfully written to disk. Three 5751 * procedures are used when setting up indirect block pointer 5752 * dependencies. The division is necessary because of the organization 5753 * of the "balloc" routine and because of the distinction between file 5754 * pages and file metadata blocks. 5755 */ 5756 5757/* 5758 * Allocate a new allocindir structure. 5759 */ 5760static struct allocindir * 5761newallocindir(ip, ptrno, newblkno, oldblkno, lbn) 5762 struct inode *ip; /* inode for file being extended */ 5763 int ptrno; /* offset of pointer in indirect block */ 5764 ufs2_daddr_t newblkno; /* disk block number being added */ 5765 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5766 ufs_lbn_t lbn; 5767{ 5768 struct newblk *newblk; 5769 struct allocindir *aip; 5770 struct freefrag *freefrag; 5771 struct jnewblk *jnewblk; 5772 5773 if (oldblkno) 5774 freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn); 5775 else 5776 freefrag = NULL; 5777 ACQUIRE_LOCK(ip->i_ump); 5778 if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0) 5779 panic("new_allocindir: lost block"); 5780 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5781 ("newallocindir: newblk already initialized")); 5782 WORKITEM_REASSIGN(newblk, D_ALLOCINDIR); 5783 newblk->nb_freefrag = freefrag; 5784 aip = (struct allocindir *)newblk; 5785 aip->ai_offset = ptrno; 5786 aip->ai_oldblkno = oldblkno; 5787 aip->ai_lbn = lbn; 5788 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5789 jnewblk->jn_ino = ip->i_number; 5790 jnewblk->jn_lbn = lbn; 5791 add_to_journal(&jnewblk->jn_list); 5792 } 5793 if (freefrag && freefrag->ff_jdep != NULL && 5794 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5795 add_to_journal(freefrag->ff_jdep); 5796 return (aip); 5797} 5798 5799/* 5800 * Called just before setting an indirect block pointer 5801 * to a newly allocated file page. 5802 */ 5803void 5804softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 5805 struct inode *ip; /* inode for file being extended */ 5806 ufs_lbn_t lbn; /* allocated block number within file */ 5807 struct buf *bp; /* buffer with indirect blk referencing page */ 5808 int ptrno; /* offset of pointer in indirect block */ 5809 ufs2_daddr_t newblkno; /* disk block number being added */ 5810 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5811 struct buf *nbp; /* buffer holding allocated page */ 5812{ 5813 struct inodedep *inodedep; 5814 struct freefrag *freefrag; 5815 struct allocindir *aip; 5816 struct pagedep *pagedep; 5817 struct mount *mp; 5818 5819 mp = UFSTOVFS(ip->i_ump); 5820 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5821 ("softdep_setup_allocindir_page called on non-softdep filesystem")); 5822 KASSERT(lbn == nbp->b_lblkno, 5823 ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd", 5824 lbn, bp->b_lblkno)); 5825 CTR4(KTR_SUJ, 5826 "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd " 5827 "lbn %jd", ip->i_number, newblkno, oldblkno, lbn); 5828 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page"); 5829 aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn); 5830 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 5831 /* 5832 * If we are allocating a directory page, then we must 5833 * allocate an associated pagedep to track additions and 5834 * deletions. 5835 */ 5836 if ((ip->i_mode & IFMT) == IFDIR) 5837 pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep); 5838 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5839 freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn); 5840 FREE_LOCK(ip->i_ump); 5841 if (freefrag) 5842 handle_workitem_freefrag(freefrag); 5843} 5844 5845/* 5846 * Called just before setting an indirect block pointer to a 5847 * newly allocated indirect block. 5848 */ 5849void 5850softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 5851 struct buf *nbp; /* newly allocated indirect block */ 5852 struct inode *ip; /* inode for file being extended */ 5853 struct buf *bp; /* indirect block referencing allocated block */ 5854 int ptrno; /* offset of pointer in indirect block */ 5855 ufs2_daddr_t newblkno; /* disk block number being added */ 5856{ 5857 struct inodedep *inodedep; 5858 struct allocindir *aip; 5859 ufs_lbn_t lbn; 5860 5861 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 5862 ("softdep_setup_allocindir_meta called on non-softdep filesystem")); 5863 CTR3(KTR_SUJ, 5864 "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d", 5865 ip->i_number, newblkno, ptrno); 5866 lbn = nbp->b_lblkno; 5867 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta"); 5868 aip = newallocindir(ip, ptrno, newblkno, 0, lbn); 5869 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC, 5870 &inodedep); 5871 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5872 if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)) 5873 panic("softdep_setup_allocindir_meta: Block already existed"); 5874 FREE_LOCK(ip->i_ump); 5875} 5876 5877static void 5878indirdep_complete(indirdep) 5879 struct indirdep *indirdep; 5880{ 5881 struct allocindir *aip; 5882 5883 LIST_REMOVE(indirdep, ir_next); 5884 indirdep->ir_state |= DEPCOMPLETE; 5885 5886 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) { 5887 LIST_REMOVE(aip, ai_next); 5888 free_newblk(&aip->ai_block); 5889 } 5890 /* 5891 * If this indirdep is not attached to a buf it was simply waiting 5892 * on completion to clear completehd. free_indirdep() asserts 5893 * that nothing is dangling. 5894 */ 5895 if ((indirdep->ir_state & ONWORKLIST) == 0) 5896 free_indirdep(indirdep); 5897} 5898 5899static struct indirdep * 5900indirdep_lookup(mp, ip, bp) 5901 struct mount *mp; 5902 struct inode *ip; 5903 struct buf *bp; 5904{ 5905 struct indirdep *indirdep, *newindirdep; 5906 struct newblk *newblk; 5907 struct ufsmount *ump; 5908 struct worklist *wk; 5909 struct fs *fs; 5910 ufs2_daddr_t blkno; 5911 5912 ump = VFSTOUFS(mp); 5913 LOCK_OWNED(ump); 5914 indirdep = NULL; 5915 newindirdep = NULL; 5916 fs = ip->i_fs; 5917 for (;;) { 5918 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5919 if (wk->wk_type != D_INDIRDEP) 5920 continue; 5921 indirdep = WK_INDIRDEP(wk); 5922 break; 5923 } 5924 /* Found on the buffer worklist, no new structure to free. */ 5925 if (indirdep != NULL && newindirdep == NULL) 5926 return (indirdep); 5927 if (indirdep != NULL && newindirdep != NULL) 5928 panic("indirdep_lookup: simultaneous create"); 5929 /* None found on the buffer and a new structure is ready. */ 5930 if (indirdep == NULL && newindirdep != NULL) 5931 break; 5932 /* None found and no new structure available. */ 5933 FREE_LOCK(ump); 5934 newindirdep = malloc(sizeof(struct indirdep), 5935 M_INDIRDEP, M_SOFTDEP_FLAGS); 5936 workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp); 5937 newindirdep->ir_state = ATTACHED; 5938 if (ip->i_ump->um_fstype == UFS1) 5939 newindirdep->ir_state |= UFS1FMT; 5940 TAILQ_INIT(&newindirdep->ir_trunc); 5941 newindirdep->ir_saveddata = NULL; 5942 LIST_INIT(&newindirdep->ir_deplisthd); 5943 LIST_INIT(&newindirdep->ir_donehd); 5944 LIST_INIT(&newindirdep->ir_writehd); 5945 LIST_INIT(&newindirdep->ir_completehd); 5946 if (bp->b_blkno == bp->b_lblkno) { 5947 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 5948 NULL, NULL); 5949 bp->b_blkno = blkno; 5950 } 5951 newindirdep->ir_freeblks = NULL; 5952 newindirdep->ir_savebp = 5953 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0); 5954 newindirdep->ir_bp = bp; 5955 BUF_KERNPROC(newindirdep->ir_savebp); 5956 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 5957 ACQUIRE_LOCK(ump); 5958 } 5959 indirdep = newindirdep; 5960 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 5961 /* 5962 * If the block is not yet allocated we don't set DEPCOMPLETE so 5963 * that we don't free dependencies until the pointers are valid. 5964 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather 5965 * than using the hash. 5966 */ 5967 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)) 5968 LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next); 5969 else 5970 indirdep->ir_state |= DEPCOMPLETE; 5971 return (indirdep); 5972} 5973 5974/* 5975 * Called to finish the allocation of the "aip" allocated 5976 * by one of the two routines above. 5977 */ 5978static struct freefrag * 5979setup_allocindir_phase2(bp, ip, inodedep, aip, lbn) 5980 struct buf *bp; /* in-memory copy of the indirect block */ 5981 struct inode *ip; /* inode for file being extended */ 5982 struct inodedep *inodedep; /* Inodedep for ip */ 5983 struct allocindir *aip; /* allocindir allocated by the above routines */ 5984 ufs_lbn_t lbn; /* Logical block number for this block. */ 5985{ 5986 struct fs *fs; 5987 struct indirdep *indirdep; 5988 struct allocindir *oldaip; 5989 struct freefrag *freefrag; 5990 struct mount *mp; 5991 5992 LOCK_OWNED(ip->i_ump); 5993 mp = UFSTOVFS(ip->i_ump); 5994 fs = ip->i_fs; 5995 if (bp->b_lblkno >= 0) 5996 panic("setup_allocindir_phase2: not indir blk"); 5997 KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs), 5998 ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset)); 5999 indirdep = indirdep_lookup(mp, ip, bp); 6000 KASSERT(indirdep->ir_savebp != NULL, 6001 ("setup_allocindir_phase2 NULL ir_savebp")); 6002 aip->ai_indirdep = indirdep; 6003 /* 6004 * Check for an unwritten dependency for this indirect offset. If 6005 * there is, merge the old dependency into the new one. This happens 6006 * as a result of reallocblk only. 6007 */ 6008 freefrag = NULL; 6009 if (aip->ai_oldblkno != 0) { 6010 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) { 6011 if (oldaip->ai_offset == aip->ai_offset) { 6012 freefrag = allocindir_merge(aip, oldaip); 6013 goto done; 6014 } 6015 } 6016 LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) { 6017 if (oldaip->ai_offset == aip->ai_offset) { 6018 freefrag = allocindir_merge(aip, oldaip); 6019 goto done; 6020 } 6021 } 6022 } 6023done: 6024 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 6025 return (freefrag); 6026} 6027 6028/* 6029 * Merge two allocindirs which refer to the same block. Move newblock 6030 * dependencies and setup the freefrags appropriately. 6031 */ 6032static struct freefrag * 6033allocindir_merge(aip, oldaip) 6034 struct allocindir *aip; 6035 struct allocindir *oldaip; 6036{ 6037 struct freefrag *freefrag; 6038 struct worklist *wk; 6039 6040 if (oldaip->ai_newblkno != aip->ai_oldblkno) 6041 panic("allocindir_merge: blkno"); 6042 aip->ai_oldblkno = oldaip->ai_oldblkno; 6043 freefrag = aip->ai_freefrag; 6044 aip->ai_freefrag = oldaip->ai_freefrag; 6045 oldaip->ai_freefrag = NULL; 6046 KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag")); 6047 /* 6048 * If we are tracking a new directory-block allocation, 6049 * move it from the old allocindir to the new allocindir. 6050 */ 6051 if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) { 6052 WORKLIST_REMOVE(wk); 6053 if (!LIST_EMPTY(&oldaip->ai_newdirblk)) 6054 panic("allocindir_merge: extra newdirblk"); 6055 WORKLIST_INSERT(&aip->ai_newdirblk, wk); 6056 } 6057 /* 6058 * We can skip journaling for this freefrag and just complete 6059 * any pending journal work for the allocindir that is being 6060 * removed after the freefrag completes. 6061 */ 6062 if (freefrag->ff_jdep) 6063 cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep)); 6064 LIST_REMOVE(oldaip, ai_next); 6065 freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block, 6066 &freefrag->ff_list, &freefrag->ff_jwork); 6067 free_newblk(&oldaip->ai_block); 6068 6069 return (freefrag); 6070} 6071 6072static inline void 6073setup_freedirect(freeblks, ip, i, needj) 6074 struct freeblks *freeblks; 6075 struct inode *ip; 6076 int i; 6077 int needj; 6078{ 6079 ufs2_daddr_t blkno; 6080 int frags; 6081 6082 blkno = DIP(ip, i_db[i]); 6083 if (blkno == 0) 6084 return; 6085 DIP_SET(ip, i_db[i], 0); 6086 frags = sblksize(ip->i_fs, ip->i_size, i); 6087 frags = numfrags(ip->i_fs, frags); 6088 newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj); 6089} 6090 6091static inline void 6092setup_freeext(freeblks, ip, i, needj) 6093 struct freeblks *freeblks; 6094 struct inode *ip; 6095 int i; 6096 int needj; 6097{ 6098 ufs2_daddr_t blkno; 6099 int frags; 6100 6101 blkno = ip->i_din2->di_extb[i]; 6102 if (blkno == 0) 6103 return; 6104 ip->i_din2->di_extb[i] = 0; 6105 frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i); 6106 frags = numfrags(ip->i_fs, frags); 6107 newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj); 6108} 6109 6110static inline void 6111setup_freeindir(freeblks, ip, i, lbn, needj) 6112 struct freeblks *freeblks; 6113 struct inode *ip; 6114 int i; 6115 ufs_lbn_t lbn; 6116 int needj; 6117{ 6118 ufs2_daddr_t blkno; 6119 6120 blkno = DIP(ip, i_ib[i]); 6121 if (blkno == 0) 6122 return; 6123 DIP_SET(ip, i_ib[i], 0); 6124 newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag, 6125 0, needj); 6126} 6127 6128static inline struct freeblks * 6129newfreeblks(mp, ip) 6130 struct mount *mp; 6131 struct inode *ip; 6132{ 6133 struct freeblks *freeblks; 6134 6135 freeblks = malloc(sizeof(struct freeblks), 6136 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 6137 workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp); 6138 LIST_INIT(&freeblks->fb_jblkdephd); 6139 LIST_INIT(&freeblks->fb_jwork); 6140 freeblks->fb_ref = 0; 6141 freeblks->fb_cgwait = 0; 6142 freeblks->fb_state = ATTACHED; 6143 freeblks->fb_uid = ip->i_uid; 6144 freeblks->fb_inum = ip->i_number; 6145 freeblks->fb_vtype = ITOV(ip)->v_type; 6146 freeblks->fb_modrev = DIP(ip, i_modrev); 6147 freeblks->fb_devvp = ip->i_devvp; 6148 freeblks->fb_chkcnt = 0; 6149 freeblks->fb_len = 0; 6150 6151 return (freeblks); 6152} 6153 6154static void 6155trunc_indirdep(indirdep, freeblks, bp, off) 6156 struct indirdep *indirdep; 6157 struct freeblks *freeblks; 6158 struct buf *bp; 6159 int off; 6160{ 6161 struct allocindir *aip, *aipn; 6162 6163 /* 6164 * The first set of allocindirs won't be in savedbp. 6165 */ 6166 LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn) 6167 if (aip->ai_offset > off) 6168 cancel_allocindir(aip, bp, freeblks, 1); 6169 LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn) 6170 if (aip->ai_offset > off) 6171 cancel_allocindir(aip, bp, freeblks, 1); 6172 /* 6173 * These will exist in savedbp. 6174 */ 6175 LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn) 6176 if (aip->ai_offset > off) 6177 cancel_allocindir(aip, NULL, freeblks, 0); 6178 LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn) 6179 if (aip->ai_offset > off) 6180 cancel_allocindir(aip, NULL, freeblks, 0); 6181} 6182 6183/* 6184 * Follow the chain of indirects down to lastlbn creating a freework 6185 * structure for each. This will be used to start indir_trunc() at 6186 * the right offset and create the journal records for the parrtial 6187 * truncation. A second step will handle the truncated dependencies. 6188 */ 6189static int 6190setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno) 6191 struct freeblks *freeblks; 6192 struct inode *ip; 6193 ufs_lbn_t lbn; 6194 ufs_lbn_t lastlbn; 6195 ufs2_daddr_t blkno; 6196{ 6197 struct indirdep *indirdep; 6198 struct indirdep *indirn; 6199 struct freework *freework; 6200 struct newblk *newblk; 6201 struct mount *mp; 6202 struct buf *bp; 6203 uint8_t *start; 6204 uint8_t *end; 6205 ufs_lbn_t lbnadd; 6206 int level; 6207 int error; 6208 int off; 6209 6210 6211 freework = NULL; 6212 if (blkno == 0) 6213 return (0); 6214 mp = freeblks->fb_list.wk_mp; 6215 bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0); 6216 if ((bp->b_flags & B_CACHE) == 0) { 6217 bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno); 6218 bp->b_iocmd = BIO_READ; 6219 bp->b_flags &= ~B_INVAL; 6220 bp->b_ioflags &= ~BIO_ERROR; 6221 vfs_busy_pages(bp, 0); 6222 bp->b_iooffset = dbtob(bp->b_blkno); 6223 bstrategy(bp); 6224 curthread->td_ru.ru_inblock++; 6225 error = bufwait(bp); 6226 if (error) { 6227 brelse(bp); 6228 return (error); 6229 } 6230 } 6231 level = lbn_level(lbn); 6232 lbnadd = lbn_offset(ip->i_fs, level); 6233 /* 6234 * Compute the offset of the last block we want to keep. Store 6235 * in the freework the first block we want to completely free. 6236 */ 6237 off = (lastlbn - -(lbn + level)) / lbnadd; 6238 if (off + 1 == NINDIR(ip->i_fs)) 6239 goto nowork; 6240 freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1, 6241 0); 6242 /* 6243 * Link the freework into the indirdep. This will prevent any new 6244 * allocations from proceeding until we are finished with the 6245 * truncate and the block is written. 6246 */ 6247 ACQUIRE_LOCK(ip->i_ump); 6248 indirdep = indirdep_lookup(mp, ip, bp); 6249 if (indirdep->ir_freeblks) 6250 panic("setup_trunc_indir: indirdep already truncated."); 6251 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); 6252 freework->fw_indir = indirdep; 6253 /* 6254 * Cancel any allocindirs that will not make it to disk. 6255 * We have to do this for all copies of the indirdep that 6256 * live on this newblk. 6257 */ 6258 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 6259 newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk); 6260 LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next) 6261 trunc_indirdep(indirn, freeblks, bp, off); 6262 } else 6263 trunc_indirdep(indirdep, freeblks, bp, off); 6264 FREE_LOCK(ip->i_ump); 6265 /* 6266 * Creation is protected by the buf lock. The saveddata is only 6267 * needed if a full truncation follows a partial truncation but it 6268 * is difficult to allocate in that case so we fetch it anyway. 6269 */ 6270 if (indirdep->ir_saveddata == NULL) 6271 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 6272 M_SOFTDEP_FLAGS); 6273nowork: 6274 /* Fetch the blkno of the child and the zero start offset. */ 6275 if (ip->i_ump->um_fstype == UFS1) { 6276 blkno = ((ufs1_daddr_t *)bp->b_data)[off]; 6277 start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1]; 6278 } else { 6279 blkno = ((ufs2_daddr_t *)bp->b_data)[off]; 6280 start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1]; 6281 } 6282 if (freework) { 6283 /* Zero the truncated pointers. */ 6284 end = bp->b_data + bp->b_bcount; 6285 bzero(start, end - start); 6286 bdwrite(bp); 6287 } else 6288 bqrelse(bp); 6289 if (level == 0) 6290 return (0); 6291 lbn++; /* adjust level */ 6292 lbn -= (off * lbnadd); 6293 return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno); 6294} 6295 6296/* 6297 * Complete the partial truncation of an indirect block setup by 6298 * setup_trunc_indir(). This zeros the truncated pointers in the saved 6299 * copy and writes them to disk before the freeblks is allowed to complete. 6300 */ 6301static void 6302complete_trunc_indir(freework) 6303 struct freework *freework; 6304{ 6305 struct freework *fwn; 6306 struct indirdep *indirdep; 6307 struct ufsmount *ump; 6308 struct buf *bp; 6309 uintptr_t start; 6310 int count; 6311 6312 ump = VFSTOUFS(freework->fw_list.wk_mp); 6313 LOCK_OWNED(ump); 6314 indirdep = freework->fw_indir; 6315 for (;;) { 6316 bp = indirdep->ir_bp; 6317 /* See if the block was discarded. */ 6318 if (bp == NULL) 6319 break; 6320 /* Inline part of getdirtybuf(). We dont want bremfree. */ 6321 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) 6322 break; 6323 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 6324 LOCK_PTR(ump)) == 0) 6325 BUF_UNLOCK(bp); 6326 ACQUIRE_LOCK(ump); 6327 } 6328 freework->fw_state |= DEPCOMPLETE; 6329 TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next); 6330 /* 6331 * Zero the pointers in the saved copy. 6332 */ 6333 if (indirdep->ir_state & UFS1FMT) 6334 start = sizeof(ufs1_daddr_t); 6335 else 6336 start = sizeof(ufs2_daddr_t); 6337 start *= freework->fw_start; 6338 count = indirdep->ir_savebp->b_bcount - start; 6339 start += (uintptr_t)indirdep->ir_savebp->b_data; 6340 bzero((char *)start, count); 6341 /* 6342 * We need to start the next truncation in the list if it has not 6343 * been started yet. 6344 */ 6345 fwn = TAILQ_FIRST(&indirdep->ir_trunc); 6346 if (fwn != NULL) { 6347 if (fwn->fw_freeblks == indirdep->ir_freeblks) 6348 TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next); 6349 if ((fwn->fw_state & ONWORKLIST) == 0) 6350 freework_enqueue(fwn); 6351 } 6352 /* 6353 * If bp is NULL the block was fully truncated, restore 6354 * the saved block list otherwise free it if it is no 6355 * longer needed. 6356 */ 6357 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 6358 if (bp == NULL) 6359 bcopy(indirdep->ir_saveddata, 6360 indirdep->ir_savebp->b_data, 6361 indirdep->ir_savebp->b_bcount); 6362 free(indirdep->ir_saveddata, M_INDIRDEP); 6363 indirdep->ir_saveddata = NULL; 6364 } 6365 /* 6366 * When bp is NULL there is a full truncation pending. We 6367 * must wait for this full truncation to be journaled before 6368 * we can release this freework because the disk pointers will 6369 * never be written as zero. 6370 */ 6371 if (bp == NULL) { 6372 if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd)) 6373 handle_written_freework(freework); 6374 else 6375 WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd, 6376 &freework->fw_list); 6377 } else { 6378 /* Complete when the real copy is written. */ 6379 WORKLIST_INSERT(&bp->b_dep, &freework->fw_list); 6380 BUF_UNLOCK(bp); 6381 } 6382} 6383 6384/* 6385 * Calculate the number of blocks we are going to release where datablocks 6386 * is the current total and length is the new file size. 6387 */ 6388static ufs2_daddr_t 6389blkcount(fs, datablocks, length) 6390 struct fs *fs; 6391 ufs2_daddr_t datablocks; 6392 off_t length; 6393{ 6394 off_t totblks, numblks; 6395 6396 totblks = 0; 6397 numblks = howmany(length, fs->fs_bsize); 6398 if (numblks <= NDADDR) { 6399 totblks = howmany(length, fs->fs_fsize); 6400 goto out; 6401 } 6402 totblks = blkstofrags(fs, numblks); 6403 numblks -= NDADDR; 6404 /* 6405 * Count all single, then double, then triple indirects required. 6406 * Subtracting one indirects worth of blocks for each pass 6407 * acknowledges one of each pointed to by the inode. 6408 */ 6409 for (;;) { 6410 totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs))); 6411 numblks -= NINDIR(fs); 6412 if (numblks <= 0) 6413 break; 6414 numblks = howmany(numblks, NINDIR(fs)); 6415 } 6416out: 6417 totblks = fsbtodb(fs, totblks); 6418 /* 6419 * Handle sparse files. We can't reclaim more blocks than the inode 6420 * references. We will correct it later in handle_complete_freeblks() 6421 * when we know the real count. 6422 */ 6423 if (totblks > datablocks) 6424 return (0); 6425 return (datablocks - totblks); 6426} 6427 6428/* 6429 * Handle freeblocks for journaled softupdate filesystems. 6430 * 6431 * Contrary to normal softupdates, we must preserve the block pointers in 6432 * indirects until their subordinates are free. This is to avoid journaling 6433 * every block that is freed which may consume more space than the journal 6434 * itself. The recovery program will see the free block journals at the 6435 * base of the truncated area and traverse them to reclaim space. The 6436 * pointers in the inode may be cleared immediately after the journal 6437 * records are written because each direct and indirect pointer in the 6438 * inode is recorded in a journal. This permits full truncation to proceed 6439 * asynchronously. The write order is journal -> inode -> cgs -> indirects. 6440 * 6441 * The algorithm is as follows: 6442 * 1) Traverse the in-memory state and create journal entries to release 6443 * the relevant blocks and full indirect trees. 6444 * 2) Traverse the indirect block chain adding partial truncation freework 6445 * records to indirects in the path to lastlbn. The freework will 6446 * prevent new allocation dependencies from being satisfied in this 6447 * indirect until the truncation completes. 6448 * 3) Read and lock the inode block, performing an update with the new size 6449 * and pointers. This prevents truncated data from becoming valid on 6450 * disk through step 4. 6451 * 4) Reap unsatisfied dependencies that are beyond the truncated area, 6452 * eliminate journal work for those records that do not require it. 6453 * 5) Schedule the journal records to be written followed by the inode block. 6454 * 6) Allocate any necessary frags for the end of file. 6455 * 7) Zero any partially truncated blocks. 6456 * 6457 * From this truncation proceeds asynchronously using the freework and 6458 * indir_trunc machinery. The file will not be extended again into a 6459 * partially truncated indirect block until all work is completed but 6460 * the normal dependency mechanism ensures that it is rolled back/forward 6461 * as appropriate. Further truncation may occur without delay and is 6462 * serialized in indir_trunc(). 6463 */ 6464void 6465softdep_journal_freeblocks(ip, cred, length, flags) 6466 struct inode *ip; /* The inode whose length is to be reduced */ 6467 struct ucred *cred; 6468 off_t length; /* The new length for the file */ 6469 int flags; /* IO_EXT and/or IO_NORMAL */ 6470{ 6471 struct freeblks *freeblks, *fbn; 6472 struct worklist *wk, *wkn; 6473 struct inodedep *inodedep; 6474 struct jblkdep *jblkdep; 6475 struct allocdirect *adp, *adpn; 6476 struct ufsmount *ump; 6477 struct fs *fs; 6478 struct buf *bp; 6479 struct vnode *vp; 6480 struct mount *mp; 6481 ufs2_daddr_t extblocks, datablocks; 6482 ufs_lbn_t tmpval, lbn, lastlbn; 6483 int frags, lastoff, iboff, allocblock, needj, error, i; 6484 6485 fs = ip->i_fs; 6486 ump = ip->i_ump; 6487 mp = UFSTOVFS(ump); 6488 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6489 ("softdep_journal_freeblocks called on non-softdep filesystem")); 6490 vp = ITOV(ip); 6491 needj = 1; 6492 iboff = -1; 6493 allocblock = 0; 6494 extblocks = 0; 6495 datablocks = 0; 6496 frags = 0; 6497 freeblks = newfreeblks(mp, ip); 6498 ACQUIRE_LOCK(ump); 6499 /* 6500 * If we're truncating a removed file that will never be written 6501 * we don't need to journal the block frees. The canceled journals 6502 * for the allocations will suffice. 6503 */ 6504 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 6505 if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED && 6506 length == 0) 6507 needj = 0; 6508 CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d", 6509 ip->i_number, length, needj); 6510 FREE_LOCK(ump); 6511 /* 6512 * Calculate the lbn that we are truncating to. This results in -1 6513 * if we're truncating the 0 bytes. So it is the last lbn we want 6514 * to keep, not the first lbn we want to truncate. 6515 */ 6516 lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1; 6517 lastoff = blkoff(fs, length); 6518 /* 6519 * Compute frags we are keeping in lastlbn. 0 means all. 6520 */ 6521 if (lastlbn >= 0 && lastlbn < NDADDR) { 6522 frags = fragroundup(fs, lastoff); 6523 /* adp offset of last valid allocdirect. */ 6524 iboff = lastlbn; 6525 } else if (lastlbn > 0) 6526 iboff = NDADDR; 6527 if (fs->fs_magic == FS_UFS2_MAGIC) 6528 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6529 /* 6530 * Handle normal data blocks and indirects. This section saves 6531 * values used after the inode update to complete frag and indirect 6532 * truncation. 6533 */ 6534 if ((flags & IO_NORMAL) != 0) { 6535 /* 6536 * Handle truncation of whole direct and indirect blocks. 6537 */ 6538 for (i = iboff + 1; i < NDADDR; i++) 6539 setup_freedirect(freeblks, ip, i, needj); 6540 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6541 i++, lbn += tmpval, tmpval *= NINDIR(fs)) { 6542 /* Release a whole indirect tree. */ 6543 if (lbn > lastlbn) { 6544 setup_freeindir(freeblks, ip, i, -lbn -i, 6545 needj); 6546 continue; 6547 } 6548 iboff = i + NDADDR; 6549 /* 6550 * Traverse partially truncated indirect tree. 6551 */ 6552 if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn) 6553 setup_trunc_indir(freeblks, ip, -lbn - i, 6554 lastlbn, DIP(ip, i_ib[i])); 6555 } 6556 /* 6557 * Handle partial truncation to a frag boundary. 6558 */ 6559 if (frags) { 6560 ufs2_daddr_t blkno; 6561 long oldfrags; 6562 6563 oldfrags = blksize(fs, ip, lastlbn); 6564 blkno = DIP(ip, i_db[lastlbn]); 6565 if (blkno && oldfrags != frags) { 6566 oldfrags -= frags; 6567 oldfrags = numfrags(ip->i_fs, oldfrags); 6568 blkno += numfrags(ip->i_fs, frags); 6569 newfreework(ump, freeblks, NULL, lastlbn, 6570 blkno, oldfrags, 0, needj); 6571 if (needj) 6572 adjust_newfreework(freeblks, 6573 numfrags(ip->i_fs, frags)); 6574 } else if (blkno == 0) 6575 allocblock = 1; 6576 } 6577 /* 6578 * Add a journal record for partial truncate if we are 6579 * handling indirect blocks. Non-indirects need no extra 6580 * journaling. 6581 */ 6582 if (length != 0 && lastlbn >= NDADDR) { 6583 ip->i_flag |= IN_TRUNCATED; 6584 newjtrunc(freeblks, length, 0); 6585 } 6586 ip->i_size = length; 6587 DIP_SET(ip, i_size, ip->i_size); 6588 datablocks = DIP(ip, i_blocks) - extblocks; 6589 if (length != 0) 6590 datablocks = blkcount(ip->i_fs, datablocks, length); 6591 freeblks->fb_len = length; 6592 } 6593 if ((flags & IO_EXT) != 0) { 6594 for (i = 0; i < NXADDR; i++) 6595 setup_freeext(freeblks, ip, i, needj); 6596 ip->i_din2->di_extsize = 0; 6597 datablocks += extblocks; 6598 } 6599#ifdef QUOTA 6600 /* Reference the quotas in case the block count is wrong in the end. */ 6601 quotaref(vp, freeblks->fb_quota); 6602 (void) chkdq(ip, -datablocks, NOCRED, 0); 6603#endif 6604 freeblks->fb_chkcnt = -datablocks; 6605 UFS_LOCK(ump); 6606 fs->fs_pendingblocks += datablocks; 6607 UFS_UNLOCK(ump); 6608 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6609 /* 6610 * Handle truncation of incomplete alloc direct dependencies. We 6611 * hold the inode block locked to prevent incomplete dependencies 6612 * from reaching the disk while we are eliminating those that 6613 * have been truncated. This is a partially inlined ffs_update(). 6614 */ 6615 ufs_itimes(vp); 6616 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 6617 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6618 (int)fs->fs_bsize, cred, &bp); 6619 if (error) { 6620 brelse(bp); 6621 softdep_error("softdep_journal_freeblocks", error); 6622 return; 6623 } 6624 if (bp->b_bufsize == fs->fs_bsize) 6625 bp->b_flags |= B_CLUSTEROK; 6626 softdep_update_inodeblock(ip, bp, 0); 6627 if (ump->um_fstype == UFS1) 6628 *((struct ufs1_dinode *)bp->b_data + 6629 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 6630 else 6631 *((struct ufs2_dinode *)bp->b_data + 6632 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 6633 ACQUIRE_LOCK(ump); 6634 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 6635 if ((inodedep->id_state & IOSTARTED) != 0) 6636 panic("softdep_setup_freeblocks: inode busy"); 6637 /* 6638 * Add the freeblks structure to the list of operations that 6639 * must await the zero'ed inode being written to disk. If we 6640 * still have a bitmap dependency (needj), then the inode 6641 * has never been written to disk, so we can process the 6642 * freeblks below once we have deleted the dependencies. 6643 */ 6644 if (needj) 6645 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6646 else 6647 freeblks->fb_state |= COMPLETE; 6648 if ((flags & IO_NORMAL) != 0) { 6649 TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) { 6650 if (adp->ad_offset > iboff) 6651 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6652 freeblks); 6653 /* 6654 * Truncate the allocdirect. We could eliminate 6655 * or modify journal records as well. 6656 */ 6657 else if (adp->ad_offset == iboff && frags) 6658 adp->ad_newsize = frags; 6659 } 6660 } 6661 if ((flags & IO_EXT) != 0) 6662 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6663 cancel_allocdirect(&inodedep->id_extupdt, adp, 6664 freeblks); 6665 /* 6666 * Scan the bufwait list for newblock dependencies that will never 6667 * make it to disk. 6668 */ 6669 LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) { 6670 if (wk->wk_type != D_ALLOCDIRECT) 6671 continue; 6672 adp = WK_ALLOCDIRECT(wk); 6673 if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) || 6674 ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) { 6675 cancel_jfreeblk(freeblks, adp->ad_newblkno); 6676 cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork); 6677 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 6678 } 6679 } 6680 /* 6681 * Add journal work. 6682 */ 6683 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) 6684 add_to_journal(&jblkdep->jb_list); 6685 FREE_LOCK(ump); 6686 bdwrite(bp); 6687 /* 6688 * Truncate dependency structures beyond length. 6689 */ 6690 trunc_dependencies(ip, freeblks, lastlbn, frags, flags); 6691 /* 6692 * This is only set when we need to allocate a fragment because 6693 * none existed at the end of a frag-sized file. It handles only 6694 * allocating a new, zero filled block. 6695 */ 6696 if (allocblock) { 6697 ip->i_size = length - lastoff; 6698 DIP_SET(ip, i_size, ip->i_size); 6699 error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp); 6700 if (error != 0) { 6701 softdep_error("softdep_journal_freeblks", error); 6702 return; 6703 } 6704 ip->i_size = length; 6705 DIP_SET(ip, i_size, length); 6706 ip->i_flag |= IN_CHANGE | IN_UPDATE; 6707 allocbuf(bp, frags); 6708 ffs_update(vp, 0); 6709 bawrite(bp); 6710 } else if (lastoff != 0 && vp->v_type != VDIR) { 6711 int size; 6712 6713 /* 6714 * Zero the end of a truncated frag or block. 6715 */ 6716 size = sblksize(fs, length, lastlbn); 6717 error = bread(vp, lastlbn, size, cred, &bp); 6718 if (error) { 6719 softdep_error("softdep_journal_freeblks", error); 6720 return; 6721 } 6722 bzero((char *)bp->b_data + lastoff, size - lastoff); 6723 bawrite(bp); 6724 6725 } 6726 ACQUIRE_LOCK(ump); 6727 inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 6728 TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next); 6729 freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST; 6730 /* 6731 * We zero earlier truncations so they don't erroneously 6732 * update i_blocks. 6733 */ 6734 if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0) 6735 TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next) 6736 fbn->fb_len = 0; 6737 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE && 6738 LIST_EMPTY(&freeblks->fb_jblkdephd)) 6739 freeblks->fb_state |= INPROGRESS; 6740 else 6741 freeblks = NULL; 6742 FREE_LOCK(ump); 6743 if (freeblks) 6744 handle_workitem_freeblocks(freeblks, 0); 6745 trunc_pages(ip, length, extblocks, flags); 6746 6747} 6748 6749/* 6750 * Flush a JOP_SYNC to the journal. 6751 */ 6752void 6753softdep_journal_fsync(ip) 6754 struct inode *ip; 6755{ 6756 struct jfsync *jfsync; 6757 6758 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 6759 ("softdep_journal_fsync called on non-softdep filesystem")); 6760 if ((ip->i_flag & IN_TRUNCATED) == 0) 6761 return; 6762 ip->i_flag &= ~IN_TRUNCATED; 6763 jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO); 6764 workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump)); 6765 jfsync->jfs_size = ip->i_size; 6766 jfsync->jfs_ino = ip->i_number; 6767 ACQUIRE_LOCK(ip->i_ump); 6768 add_to_journal(&jfsync->jfs_list); 6769 jwait(&jfsync->jfs_list, MNT_WAIT); 6770 FREE_LOCK(ip->i_ump); 6771} 6772 6773/* 6774 * Block de-allocation dependencies. 6775 * 6776 * When blocks are de-allocated, the on-disk pointers must be nullified before 6777 * the blocks are made available for use by other files. (The true 6778 * requirement is that old pointers must be nullified before new on-disk 6779 * pointers are set. We chose this slightly more stringent requirement to 6780 * reduce complexity.) Our implementation handles this dependency by updating 6781 * the inode (or indirect block) appropriately but delaying the actual block 6782 * de-allocation (i.e., freemap and free space count manipulation) until 6783 * after the updated versions reach stable storage. After the disk is 6784 * updated, the blocks can be safely de-allocated whenever it is convenient. 6785 * This implementation handles only the common case of reducing a file's 6786 * length to zero. Other cases are handled by the conventional synchronous 6787 * write approach. 6788 * 6789 * The ffs implementation with which we worked double-checks 6790 * the state of the block pointers and file size as it reduces 6791 * a file's length. Some of this code is replicated here in our 6792 * soft updates implementation. The freeblks->fb_chkcnt field is 6793 * used to transfer a part of this information to the procedure 6794 * that eventually de-allocates the blocks. 6795 * 6796 * This routine should be called from the routine that shortens 6797 * a file's length, before the inode's size or block pointers 6798 * are modified. It will save the block pointer information for 6799 * later release and zero the inode so that the calling routine 6800 * can release it. 6801 */ 6802void 6803softdep_setup_freeblocks(ip, length, flags) 6804 struct inode *ip; /* The inode whose length is to be reduced */ 6805 off_t length; /* The new length for the file */ 6806 int flags; /* IO_EXT and/or IO_NORMAL */ 6807{ 6808 struct ufs1_dinode *dp1; 6809 struct ufs2_dinode *dp2; 6810 struct freeblks *freeblks; 6811 struct inodedep *inodedep; 6812 struct allocdirect *adp; 6813 struct ufsmount *ump; 6814 struct buf *bp; 6815 struct fs *fs; 6816 ufs2_daddr_t extblocks, datablocks; 6817 struct mount *mp; 6818 int i, delay, error; 6819 ufs_lbn_t tmpval; 6820 ufs_lbn_t lbn; 6821 6822 ump = ip->i_ump; 6823 mp = UFSTOVFS(ump); 6824 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6825 ("softdep_setup_freeblocks called on non-softdep filesystem")); 6826 CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld", 6827 ip->i_number, length); 6828 KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length")); 6829 fs = ip->i_fs; 6830 freeblks = newfreeblks(mp, ip); 6831 extblocks = 0; 6832 datablocks = 0; 6833 if (fs->fs_magic == FS_UFS2_MAGIC) 6834 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6835 if ((flags & IO_NORMAL) != 0) { 6836 for (i = 0; i < NDADDR; i++) 6837 setup_freedirect(freeblks, ip, i, 0); 6838 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6839 i++, lbn += tmpval, tmpval *= NINDIR(fs)) 6840 setup_freeindir(freeblks, ip, i, -lbn -i, 0); 6841 ip->i_size = 0; 6842 DIP_SET(ip, i_size, 0); 6843 datablocks = DIP(ip, i_blocks) - extblocks; 6844 } 6845 if ((flags & IO_EXT) != 0) { 6846 for (i = 0; i < NXADDR; i++) 6847 setup_freeext(freeblks, ip, i, 0); 6848 ip->i_din2->di_extsize = 0; 6849 datablocks += extblocks; 6850 } 6851#ifdef QUOTA 6852 /* Reference the quotas in case the block count is wrong in the end. */ 6853 quotaref(ITOV(ip), freeblks->fb_quota); 6854 (void) chkdq(ip, -datablocks, NOCRED, 0); 6855#endif 6856 freeblks->fb_chkcnt = -datablocks; 6857 UFS_LOCK(ump); 6858 fs->fs_pendingblocks += datablocks; 6859 UFS_UNLOCK(ump); 6860 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6861 /* 6862 * Push the zero'ed inode to to its disk buffer so that we are free 6863 * to delete its dependencies below. Once the dependencies are gone 6864 * the buffer can be safely released. 6865 */ 6866 if ((error = bread(ip->i_devvp, 6867 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6868 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 6869 brelse(bp); 6870 softdep_error("softdep_setup_freeblocks", error); 6871 } 6872 if (ump->um_fstype == UFS1) { 6873 dp1 = ((struct ufs1_dinode *)bp->b_data + 6874 ino_to_fsbo(fs, ip->i_number)); 6875 ip->i_din1->di_freelink = dp1->di_freelink; 6876 *dp1 = *ip->i_din1; 6877 } else { 6878 dp2 = ((struct ufs2_dinode *)bp->b_data + 6879 ino_to_fsbo(fs, ip->i_number)); 6880 ip->i_din2->di_freelink = dp2->di_freelink; 6881 *dp2 = *ip->i_din2; 6882 } 6883 /* 6884 * Find and eliminate any inode dependencies. 6885 */ 6886 ACQUIRE_LOCK(ump); 6887 (void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep); 6888 if ((inodedep->id_state & IOSTARTED) != 0) 6889 panic("softdep_setup_freeblocks: inode busy"); 6890 /* 6891 * Add the freeblks structure to the list of operations that 6892 * must await the zero'ed inode being written to disk. If we 6893 * still have a bitmap dependency (delay == 0), then the inode 6894 * has never been written to disk, so we can process the 6895 * freeblks below once we have deleted the dependencies. 6896 */ 6897 delay = (inodedep->id_state & DEPCOMPLETE); 6898 if (delay) 6899 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6900 else 6901 freeblks->fb_state |= COMPLETE; 6902 /* 6903 * Because the file length has been truncated to zero, any 6904 * pending block allocation dependency structures associated 6905 * with this inode are obsolete and can simply be de-allocated. 6906 * We must first merge the two dependency lists to get rid of 6907 * any duplicate freefrag structures, then purge the merged list. 6908 * If we still have a bitmap dependency, then the inode has never 6909 * been written to disk, so we can free any fragments without delay. 6910 */ 6911 if (flags & IO_NORMAL) { 6912 merge_inode_lists(&inodedep->id_newinoupdt, 6913 &inodedep->id_inoupdt); 6914 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 6915 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6916 freeblks); 6917 } 6918 if (flags & IO_EXT) { 6919 merge_inode_lists(&inodedep->id_newextupdt, 6920 &inodedep->id_extupdt); 6921 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6922 cancel_allocdirect(&inodedep->id_extupdt, adp, 6923 freeblks); 6924 } 6925 FREE_LOCK(ump); 6926 bdwrite(bp); 6927 trunc_dependencies(ip, freeblks, -1, 0, flags); 6928 ACQUIRE_LOCK(ump); 6929 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 6930 (void) free_inodedep(inodedep); 6931 freeblks->fb_state |= DEPCOMPLETE; 6932 /* 6933 * If the inode with zeroed block pointers is now on disk 6934 * we can start freeing blocks. 6935 */ 6936 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 6937 freeblks->fb_state |= INPROGRESS; 6938 else 6939 freeblks = NULL; 6940 FREE_LOCK(ump); 6941 if (freeblks) 6942 handle_workitem_freeblocks(freeblks, 0); 6943 trunc_pages(ip, length, extblocks, flags); 6944} 6945 6946/* 6947 * Eliminate pages from the page cache that back parts of this inode and 6948 * adjust the vnode pager's idea of our size. This prevents stale data 6949 * from hanging around in the page cache. 6950 */ 6951static void 6952trunc_pages(ip, length, extblocks, flags) 6953 struct inode *ip; 6954 off_t length; 6955 ufs2_daddr_t extblocks; 6956 int flags; 6957{ 6958 struct vnode *vp; 6959 struct fs *fs; 6960 ufs_lbn_t lbn; 6961 off_t end, extend; 6962 6963 vp = ITOV(ip); 6964 fs = ip->i_fs; 6965 extend = OFF_TO_IDX(lblktosize(fs, -extblocks)); 6966 if ((flags & IO_EXT) != 0) 6967 vn_pages_remove(vp, extend, 0); 6968 if ((flags & IO_NORMAL) == 0) 6969 return; 6970 BO_LOCK(&vp->v_bufobj); 6971 drain_output(vp); 6972 BO_UNLOCK(&vp->v_bufobj); 6973 /* 6974 * The vnode pager eliminates file pages we eliminate indirects 6975 * below. 6976 */ 6977 vnode_pager_setsize(vp, length); 6978 /* 6979 * Calculate the end based on the last indirect we want to keep. If 6980 * the block extends into indirects we can just use the negative of 6981 * its lbn. Doubles and triples exist at lower numbers so we must 6982 * be careful not to remove those, if they exist. double and triple 6983 * indirect lbns do not overlap with others so it is not important 6984 * to verify how many levels are required. 6985 */ 6986 lbn = lblkno(fs, length); 6987 if (lbn >= NDADDR) { 6988 /* Calculate the virtual lbn of the triple indirect. */ 6989 lbn = -lbn - (NIADDR - 1); 6990 end = OFF_TO_IDX(lblktosize(fs, lbn)); 6991 } else 6992 end = extend; 6993 vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end); 6994} 6995 6996/* 6997 * See if the buf bp is in the range eliminated by truncation. 6998 */ 6999static int 7000trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags) 7001 struct buf *bp; 7002 int *blkoffp; 7003 ufs_lbn_t lastlbn; 7004 int lastoff; 7005 int flags; 7006{ 7007 ufs_lbn_t lbn; 7008 7009 *blkoffp = 0; 7010 /* Only match ext/normal blocks as appropriate. */ 7011 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 7012 ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0)) 7013 return (0); 7014 /* ALTDATA is always a full truncation. */ 7015 if ((bp->b_xflags & BX_ALTDATA) != 0) 7016 return (1); 7017 /* -1 is full truncation. */ 7018 if (lastlbn == -1) 7019 return (1); 7020 /* 7021 * If this is a partial truncate we only want those 7022 * blocks and indirect blocks that cover the range 7023 * we're after. 7024 */ 7025 lbn = bp->b_lblkno; 7026 if (lbn < 0) 7027 lbn = -(lbn + lbn_level(lbn)); 7028 if (lbn < lastlbn) 7029 return (0); 7030 /* Here we only truncate lblkno if it's partial. */ 7031 if (lbn == lastlbn) { 7032 if (lastoff == 0) 7033 return (0); 7034 *blkoffp = lastoff; 7035 } 7036 return (1); 7037} 7038 7039/* 7040 * Eliminate any dependencies that exist in memory beyond lblkno:off 7041 */ 7042static void 7043trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags) 7044 struct inode *ip; 7045 struct freeblks *freeblks; 7046 ufs_lbn_t lastlbn; 7047 int lastoff; 7048 int flags; 7049{ 7050 struct bufobj *bo; 7051 struct vnode *vp; 7052 struct buf *bp; 7053 int blkoff; 7054 7055 /* 7056 * We must wait for any I/O in progress to finish so that 7057 * all potential buffers on the dirty list will be visible. 7058 * Once they are all there, walk the list and get rid of 7059 * any dependencies. 7060 */ 7061 vp = ITOV(ip); 7062 bo = &vp->v_bufobj; 7063 BO_LOCK(bo); 7064 drain_output(vp); 7065 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 7066 bp->b_vflags &= ~BV_SCANNED; 7067restart: 7068 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 7069 if (bp->b_vflags & BV_SCANNED) 7070 continue; 7071 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 7072 bp->b_vflags |= BV_SCANNED; 7073 continue; 7074 } 7075 KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer")); 7076 if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL) 7077 goto restart; 7078 BO_UNLOCK(bo); 7079 if (deallocate_dependencies(bp, freeblks, blkoff)) 7080 bqrelse(bp); 7081 else 7082 brelse(bp); 7083 BO_LOCK(bo); 7084 goto restart; 7085 } 7086 /* 7087 * Now do the work of vtruncbuf while also matching indirect blocks. 7088 */ 7089 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) 7090 bp->b_vflags &= ~BV_SCANNED; 7091cleanrestart: 7092 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) { 7093 if (bp->b_vflags & BV_SCANNED) 7094 continue; 7095 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 7096 bp->b_vflags |= BV_SCANNED; 7097 continue; 7098 } 7099 if (BUF_LOCK(bp, 7100 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 7101 BO_LOCKPTR(bo)) == ENOLCK) { 7102 BO_LOCK(bo); 7103 goto cleanrestart; 7104 } 7105 bp->b_vflags |= BV_SCANNED; 7106 bremfree(bp); 7107 if (blkoff != 0) { 7108 allocbuf(bp, blkoff); 7109 bqrelse(bp); 7110 } else { 7111 bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF; 7112 brelse(bp); 7113 } 7114 BO_LOCK(bo); 7115 goto cleanrestart; 7116 } 7117 drain_output(vp); 7118 BO_UNLOCK(bo); 7119} 7120 7121static int 7122cancel_pagedep(pagedep, freeblks, blkoff) 7123 struct pagedep *pagedep; 7124 struct freeblks *freeblks; 7125 int blkoff; 7126{ 7127 struct jremref *jremref; 7128 struct jmvref *jmvref; 7129 struct dirrem *dirrem, *tmp; 7130 int i; 7131 7132 /* 7133 * Copy any directory remove dependencies to the list 7134 * to be processed after the freeblks proceeds. If 7135 * directory entry never made it to disk they 7136 * can be dumped directly onto the work list. 7137 */ 7138 LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) { 7139 /* Skip this directory removal if it is intended to remain. */ 7140 if (dirrem->dm_offset < blkoff) 7141 continue; 7142 /* 7143 * If there are any dirrems we wait for the journal write 7144 * to complete and then restart the buf scan as the lock 7145 * has been dropped. 7146 */ 7147 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) { 7148 jwait(&jremref->jr_list, MNT_WAIT); 7149 return (ERESTART); 7150 } 7151 LIST_REMOVE(dirrem, dm_next); 7152 dirrem->dm_dirinum = pagedep->pd_ino; 7153 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list); 7154 } 7155 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) { 7156 jwait(&jmvref->jm_list, MNT_WAIT); 7157 return (ERESTART); 7158 } 7159 /* 7160 * When we're partially truncating a pagedep we just want to flush 7161 * journal entries and return. There can not be any adds in the 7162 * truncated portion of the directory and newblk must remain if 7163 * part of the block remains. 7164 */ 7165 if (blkoff != 0) { 7166 struct diradd *dap; 7167 7168 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 7169 if (dap->da_offset > blkoff) 7170 panic("cancel_pagedep: diradd %p off %d > %d", 7171 dap, dap->da_offset, blkoff); 7172 for (i = 0; i < DAHASHSZ; i++) 7173 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) 7174 if (dap->da_offset > blkoff) 7175 panic("cancel_pagedep: diradd %p off %d > %d", 7176 dap, dap->da_offset, blkoff); 7177 return (0); 7178 } 7179 /* 7180 * There should be no directory add dependencies present 7181 * as the directory could not be truncated until all 7182 * children were removed. 7183 */ 7184 KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL, 7185 ("deallocate_dependencies: pendinghd != NULL")); 7186 for (i = 0; i < DAHASHSZ; i++) 7187 KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL, 7188 ("deallocate_dependencies: diraddhd != NULL")); 7189 if ((pagedep->pd_state & NEWBLOCK) != 0) 7190 free_newdirblk(pagedep->pd_newdirblk); 7191 if (free_pagedep(pagedep) == 0) 7192 panic("Failed to free pagedep %p", pagedep); 7193 return (0); 7194} 7195 7196/* 7197 * Reclaim any dependency structures from a buffer that is about to 7198 * be reallocated to a new vnode. The buffer must be locked, thus, 7199 * no I/O completion operations can occur while we are manipulating 7200 * its associated dependencies. The mutex is held so that other I/O's 7201 * associated with related dependencies do not occur. 7202 */ 7203static int 7204deallocate_dependencies(bp, freeblks, off) 7205 struct buf *bp; 7206 struct freeblks *freeblks; 7207 int off; 7208{ 7209 struct indirdep *indirdep; 7210 struct pagedep *pagedep; 7211 struct allocdirect *adp; 7212 struct worklist *wk, *wkn; 7213 struct ufsmount *ump; 7214 7215 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 7216 goto done; 7217 ump = VFSTOUFS(wk->wk_mp); 7218 ACQUIRE_LOCK(ump); 7219 LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) { 7220 switch (wk->wk_type) { 7221 case D_INDIRDEP: 7222 indirdep = WK_INDIRDEP(wk); 7223 if (bp->b_lblkno >= 0 || 7224 bp->b_blkno != indirdep->ir_savebp->b_lblkno) 7225 panic("deallocate_dependencies: not indir"); 7226 cancel_indirdep(indirdep, bp, freeblks); 7227 continue; 7228 7229 case D_PAGEDEP: 7230 pagedep = WK_PAGEDEP(wk); 7231 if (cancel_pagedep(pagedep, freeblks, off)) { 7232 FREE_LOCK(ump); 7233 return (ERESTART); 7234 } 7235 continue; 7236 7237 case D_ALLOCINDIR: 7238 /* 7239 * Simply remove the allocindir, we'll find it via 7240 * the indirdep where we can clear pointers if 7241 * needed. 7242 */ 7243 WORKLIST_REMOVE(wk); 7244 continue; 7245 7246 case D_FREEWORK: 7247 /* 7248 * A truncation is waiting for the zero'd pointers 7249 * to be written. It can be freed when the freeblks 7250 * is journaled. 7251 */ 7252 WORKLIST_REMOVE(wk); 7253 wk->wk_state |= ONDEPLIST; 7254 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 7255 break; 7256 7257 case D_ALLOCDIRECT: 7258 adp = WK_ALLOCDIRECT(wk); 7259 if (off != 0) 7260 continue; 7261 /* FALLTHROUGH */ 7262 default: 7263 panic("deallocate_dependencies: Unexpected type %s", 7264 TYPENAME(wk->wk_type)); 7265 /* NOTREACHED */ 7266 } 7267 } 7268 FREE_LOCK(ump); 7269done: 7270 /* 7271 * Don't throw away this buf, we were partially truncating and 7272 * some deps may always remain. 7273 */ 7274 if (off) { 7275 allocbuf(bp, off); 7276 bp->b_vflags |= BV_SCANNED; 7277 return (EBUSY); 7278 } 7279 bp->b_flags |= B_INVAL | B_NOCACHE; 7280 7281 return (0); 7282} 7283 7284/* 7285 * An allocdirect is being canceled due to a truncate. We must make sure 7286 * the journal entry is released in concert with the blkfree that releases 7287 * the storage. Completed journal entries must not be released until the 7288 * space is no longer pointed to by the inode or in the bitmap. 7289 */ 7290static void 7291cancel_allocdirect(adphead, adp, freeblks) 7292 struct allocdirectlst *adphead; 7293 struct allocdirect *adp; 7294 struct freeblks *freeblks; 7295{ 7296 struct freework *freework; 7297 struct newblk *newblk; 7298 struct worklist *wk; 7299 7300 TAILQ_REMOVE(adphead, adp, ad_next); 7301 newblk = (struct newblk *)adp; 7302 freework = NULL; 7303 /* 7304 * Find the correct freework structure. 7305 */ 7306 LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) { 7307 if (wk->wk_type != D_FREEWORK) 7308 continue; 7309 freework = WK_FREEWORK(wk); 7310 if (freework->fw_blkno == newblk->nb_newblkno) 7311 break; 7312 } 7313 if (freework == NULL) 7314 panic("cancel_allocdirect: Freework not found"); 7315 /* 7316 * If a newblk exists at all we still have the journal entry that 7317 * initiated the allocation so we do not need to journal the free. 7318 */ 7319 cancel_jfreeblk(freeblks, freework->fw_blkno); 7320 /* 7321 * If the journal hasn't been written the jnewblk must be passed 7322 * to the call to ffs_blkfree that reclaims the space. We accomplish 7323 * this by linking the journal dependency into the freework to be 7324 * freed when freework_freeblock() is called. If the journal has 7325 * been written we can simply reclaim the journal space when the 7326 * freeblks work is complete. 7327 */ 7328 freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list, 7329 &freeblks->fb_jwork); 7330 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 7331} 7332 7333 7334/* 7335 * Cancel a new block allocation. May be an indirect or direct block. We 7336 * remove it from various lists and return any journal record that needs to 7337 * be resolved by the caller. 7338 * 7339 * A special consideration is made for indirects which were never pointed 7340 * at on disk and will never be found once this block is released. 7341 */ 7342static struct jnewblk * 7343cancel_newblk(newblk, wk, wkhd) 7344 struct newblk *newblk; 7345 struct worklist *wk; 7346 struct workhead *wkhd; 7347{ 7348 struct jnewblk *jnewblk; 7349 7350 CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno); 7351 7352 newblk->nb_state |= GOINGAWAY; 7353 /* 7354 * Previously we traversed the completedhd on each indirdep 7355 * attached to this newblk to cancel them and gather journal 7356 * work. Since we need only the oldest journal segment and 7357 * the lowest point on the tree will always have the oldest 7358 * journal segment we are free to release the segments 7359 * of any subordinates and may leave the indirdep list to 7360 * indirdep_complete() when this newblk is freed. 7361 */ 7362 if (newblk->nb_state & ONDEPLIST) { 7363 newblk->nb_state &= ~ONDEPLIST; 7364 LIST_REMOVE(newblk, nb_deps); 7365 } 7366 if (newblk->nb_state & ONWORKLIST) 7367 WORKLIST_REMOVE(&newblk->nb_list); 7368 /* 7369 * If the journal entry hasn't been written we save a pointer to 7370 * the dependency that frees it until it is written or the 7371 * superseding operation completes. 7372 */ 7373 jnewblk = newblk->nb_jnewblk; 7374 if (jnewblk != NULL && wk != NULL) { 7375 newblk->nb_jnewblk = NULL; 7376 jnewblk->jn_dep = wk; 7377 } 7378 if (!LIST_EMPTY(&newblk->nb_jwork)) 7379 jwork_move(wkhd, &newblk->nb_jwork); 7380 /* 7381 * When truncating we must free the newdirblk early to remove 7382 * the pagedep from the hash before returning. 7383 */ 7384 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7385 free_newdirblk(WK_NEWDIRBLK(wk)); 7386 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7387 panic("cancel_newblk: extra newdirblk"); 7388 7389 return (jnewblk); 7390} 7391 7392/* 7393 * Schedule the freefrag associated with a newblk to be released once 7394 * the pointers are written and the previous block is no longer needed. 7395 */ 7396static void 7397newblk_freefrag(newblk) 7398 struct newblk *newblk; 7399{ 7400 struct freefrag *freefrag; 7401 7402 if (newblk->nb_freefrag == NULL) 7403 return; 7404 freefrag = newblk->nb_freefrag; 7405 newblk->nb_freefrag = NULL; 7406 freefrag->ff_state |= COMPLETE; 7407 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 7408 add_to_worklist(&freefrag->ff_list, 0); 7409} 7410 7411/* 7412 * Free a newblk. Generate a new freefrag work request if appropriate. 7413 * This must be called after the inode pointer and any direct block pointers 7414 * are valid or fully removed via truncate or frag extension. 7415 */ 7416static void 7417free_newblk(newblk) 7418 struct newblk *newblk; 7419{ 7420 struct indirdep *indirdep; 7421 struct worklist *wk; 7422 7423 KASSERT(newblk->nb_jnewblk == NULL, 7424 ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk)); 7425 KASSERT(newblk->nb_list.wk_type != D_NEWBLK, 7426 ("free_newblk: unclaimed newblk")); 7427 LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp)); 7428 newblk_freefrag(newblk); 7429 if (newblk->nb_state & ONDEPLIST) 7430 LIST_REMOVE(newblk, nb_deps); 7431 if (newblk->nb_state & ONWORKLIST) 7432 WORKLIST_REMOVE(&newblk->nb_list); 7433 LIST_REMOVE(newblk, nb_hash); 7434 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7435 free_newdirblk(WK_NEWDIRBLK(wk)); 7436 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7437 panic("free_newblk: extra newdirblk"); 7438 while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL) 7439 indirdep_complete(indirdep); 7440 handle_jwork(&newblk->nb_jwork); 7441 WORKITEM_FREE(newblk, D_NEWBLK); 7442} 7443 7444/* 7445 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 7446 * This routine must be called with splbio interrupts blocked. 7447 */ 7448static void 7449free_newdirblk(newdirblk) 7450 struct newdirblk *newdirblk; 7451{ 7452 struct pagedep *pagedep; 7453 struct diradd *dap; 7454 struct worklist *wk; 7455 7456 LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp)); 7457 WORKLIST_REMOVE(&newdirblk->db_list); 7458 /* 7459 * If the pagedep is still linked onto the directory buffer 7460 * dependency chain, then some of the entries on the 7461 * pd_pendinghd list may not be committed to disk yet. In 7462 * this case, we will simply clear the NEWBLOCK flag and 7463 * let the pd_pendinghd list be processed when the pagedep 7464 * is next written. If the pagedep is no longer on the buffer 7465 * dependency chain, then all the entries on the pd_pending 7466 * list are committed to disk and we can free them here. 7467 */ 7468 pagedep = newdirblk->db_pagedep; 7469 pagedep->pd_state &= ~NEWBLOCK; 7470 if ((pagedep->pd_state & ONWORKLIST) == 0) { 7471 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 7472 free_diradd(dap, NULL); 7473 /* 7474 * If no dependencies remain, the pagedep will be freed. 7475 */ 7476 free_pagedep(pagedep); 7477 } 7478 /* Should only ever be one item in the list. */ 7479 while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) { 7480 WORKLIST_REMOVE(wk); 7481 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 7482 } 7483 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 7484} 7485 7486/* 7487 * Prepare an inode to be freed. The actual free operation is not 7488 * done until the zero'ed inode has been written to disk. 7489 */ 7490void 7491softdep_freefile(pvp, ino, mode) 7492 struct vnode *pvp; 7493 ino_t ino; 7494 int mode; 7495{ 7496 struct inode *ip = VTOI(pvp); 7497 struct inodedep *inodedep; 7498 struct freefile *freefile; 7499 struct freeblks *freeblks; 7500 struct ufsmount *ump; 7501 7502 ump = ip->i_ump; 7503 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 7504 ("softdep_freefile called on non-softdep filesystem")); 7505 /* 7506 * This sets up the inode de-allocation dependency. 7507 */ 7508 freefile = malloc(sizeof(struct freefile), 7509 M_FREEFILE, M_SOFTDEP_FLAGS); 7510 workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount); 7511 freefile->fx_mode = mode; 7512 freefile->fx_oldinum = ino; 7513 freefile->fx_devvp = ip->i_devvp; 7514 LIST_INIT(&freefile->fx_jwork); 7515 UFS_LOCK(ump); 7516 ip->i_fs->fs_pendinginodes += 1; 7517 UFS_UNLOCK(ump); 7518 7519 /* 7520 * If the inodedep does not exist, then the zero'ed inode has 7521 * been written to disk. If the allocated inode has never been 7522 * written to disk, then the on-disk inode is zero'ed. In either 7523 * case we can free the file immediately. If the journal was 7524 * canceled before being written the inode will never make it to 7525 * disk and we must send the canceled journal entrys to 7526 * ffs_freefile() to be cleared in conjunction with the bitmap. 7527 * Any blocks waiting on the inode to write can be safely freed 7528 * here as it will never been written. 7529 */ 7530 ACQUIRE_LOCK(ump); 7531 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7532 if (inodedep) { 7533 /* 7534 * Clear out freeblks that no longer need to reference 7535 * this inode. 7536 */ 7537 while ((freeblks = 7538 TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) { 7539 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, 7540 fb_next); 7541 freeblks->fb_state &= ~ONDEPLIST; 7542 } 7543 /* 7544 * Remove this inode from the unlinked list. 7545 */ 7546 if (inodedep->id_state & UNLINKED) { 7547 /* 7548 * Save the journal work to be freed with the bitmap 7549 * before we clear UNLINKED. Otherwise it can be lost 7550 * if the inode block is written. 7551 */ 7552 handle_bufwait(inodedep, &freefile->fx_jwork); 7553 clear_unlinked_inodedep(inodedep); 7554 /* 7555 * Re-acquire inodedep as we've dropped the 7556 * per-filesystem lock in clear_unlinked_inodedep(). 7557 */ 7558 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7559 } 7560 } 7561 if (inodedep == NULL || check_inode_unwritten(inodedep)) { 7562 FREE_LOCK(ump); 7563 handle_workitem_freefile(freefile); 7564 return; 7565 } 7566 if ((inodedep->id_state & DEPCOMPLETE) == 0) 7567 inodedep->id_state |= GOINGAWAY; 7568 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 7569 FREE_LOCK(ump); 7570 if (ip->i_number == ino) 7571 ip->i_flag |= IN_MODIFIED; 7572} 7573 7574/* 7575 * Check to see if an inode has never been written to disk. If 7576 * so free the inodedep and return success, otherwise return failure. 7577 * This routine must be called with splbio interrupts blocked. 7578 * 7579 * If we still have a bitmap dependency, then the inode has never 7580 * been written to disk. Drop the dependency as it is no longer 7581 * necessary since the inode is being deallocated. We set the 7582 * ALLCOMPLETE flags since the bitmap now properly shows that the 7583 * inode is not allocated. Even if the inode is actively being 7584 * written, it has been rolled back to its zero'ed state, so we 7585 * are ensured that a zero inode is what is on the disk. For short 7586 * lived files, this change will usually result in removing all the 7587 * dependencies from the inode so that it can be freed immediately. 7588 */ 7589static int 7590check_inode_unwritten(inodedep) 7591 struct inodedep *inodedep; 7592{ 7593 7594 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7595 7596 if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 || 7597 !LIST_EMPTY(&inodedep->id_dirremhd) || 7598 !LIST_EMPTY(&inodedep->id_pendinghd) || 7599 !LIST_EMPTY(&inodedep->id_bufwait) || 7600 !LIST_EMPTY(&inodedep->id_inowait) || 7601 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7602 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7603 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7604 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7605 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7606 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7607 inodedep->id_mkdiradd != NULL || 7608 inodedep->id_nlinkdelta != 0) 7609 return (0); 7610 /* 7611 * Another process might be in initiate_write_inodeblock_ufs[12] 7612 * trying to allocate memory without holding "Softdep Lock". 7613 */ 7614 if ((inodedep->id_state & IOSTARTED) != 0 && 7615 inodedep->id_savedino1 == NULL) 7616 return (0); 7617 7618 if (inodedep->id_state & ONDEPLIST) 7619 LIST_REMOVE(inodedep, id_deps); 7620 inodedep->id_state &= ~ONDEPLIST; 7621 inodedep->id_state |= ALLCOMPLETE; 7622 inodedep->id_bmsafemap = NULL; 7623 if (inodedep->id_state & ONWORKLIST) 7624 WORKLIST_REMOVE(&inodedep->id_list); 7625 if (inodedep->id_savedino1 != NULL) { 7626 free(inodedep->id_savedino1, M_SAVEDINO); 7627 inodedep->id_savedino1 = NULL; 7628 } 7629 if (free_inodedep(inodedep) == 0) 7630 panic("check_inode_unwritten: busy inode"); 7631 return (1); 7632} 7633 7634static int 7635check_inodedep_free(inodedep) 7636 struct inodedep *inodedep; 7637{ 7638 7639 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7640 if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 7641 !LIST_EMPTY(&inodedep->id_dirremhd) || 7642 !LIST_EMPTY(&inodedep->id_pendinghd) || 7643 !LIST_EMPTY(&inodedep->id_bufwait) || 7644 !LIST_EMPTY(&inodedep->id_inowait) || 7645 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7646 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7647 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7648 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7649 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7650 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7651 inodedep->id_mkdiradd != NULL || 7652 inodedep->id_nlinkdelta != 0 || 7653 inodedep->id_savedino1 != NULL) 7654 return (0); 7655 return (1); 7656} 7657 7658/* 7659 * Try to free an inodedep structure. Return 1 if it could be freed. 7660 */ 7661static int 7662free_inodedep(inodedep) 7663 struct inodedep *inodedep; 7664{ 7665 7666 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7667 if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 || 7668 !check_inodedep_free(inodedep)) 7669 return (0); 7670 if (inodedep->id_state & ONDEPLIST) 7671 LIST_REMOVE(inodedep, id_deps); 7672 LIST_REMOVE(inodedep, id_hash); 7673 WORKITEM_FREE(inodedep, D_INODEDEP); 7674 return (1); 7675} 7676 7677/* 7678 * Free the block referenced by a freework structure. The parent freeblks 7679 * structure is released and completed when the final cg bitmap reaches 7680 * the disk. This routine may be freeing a jnewblk which never made it to 7681 * disk in which case we do not have to wait as the operation is undone 7682 * in memory immediately. 7683 */ 7684static void 7685freework_freeblock(freework) 7686 struct freework *freework; 7687{ 7688 struct freeblks *freeblks; 7689 struct jnewblk *jnewblk; 7690 struct ufsmount *ump; 7691 struct workhead wkhd; 7692 struct fs *fs; 7693 int bsize; 7694 int needj; 7695 7696 ump = VFSTOUFS(freework->fw_list.wk_mp); 7697 LOCK_OWNED(ump); 7698 /* 7699 * Handle partial truncate separately. 7700 */ 7701 if (freework->fw_indir) { 7702 complete_trunc_indir(freework); 7703 return; 7704 } 7705 freeblks = freework->fw_freeblks; 7706 fs = ump->um_fs; 7707 needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0; 7708 bsize = lfragtosize(fs, freework->fw_frags); 7709 LIST_INIT(&wkhd); 7710 /* 7711 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives 7712 * on the indirblk hashtable and prevents premature freeing. 7713 */ 7714 freework->fw_state |= DEPCOMPLETE; 7715 /* 7716 * SUJ needs to wait for the segment referencing freed indirect 7717 * blocks to expire so that we know the checker will not confuse 7718 * a re-allocated indirect block with its old contents. 7719 */ 7720 if (needj && freework->fw_lbn <= -NDADDR) 7721 indirblk_insert(freework); 7722 /* 7723 * If we are canceling an existing jnewblk pass it to the free 7724 * routine, otherwise pass the freeblk which will ultimately 7725 * release the freeblks. If we're not journaling, we can just 7726 * free the freeblks immediately. 7727 */ 7728 jnewblk = freework->fw_jnewblk; 7729 if (jnewblk != NULL) { 7730 cancel_jnewblk(jnewblk, &wkhd); 7731 needj = 0; 7732 } else if (needj) { 7733 freework->fw_state |= DELAYEDFREE; 7734 freeblks->fb_cgwait++; 7735 WORKLIST_INSERT(&wkhd, &freework->fw_list); 7736 } 7737 FREE_LOCK(ump); 7738 freeblks_free(ump, freeblks, btodb(bsize)); 7739 CTR4(KTR_SUJ, 7740 "freework_freeblock: ino %d blkno %jd lbn %jd size %ld", 7741 freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize); 7742 ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize, 7743 freeblks->fb_inum, freeblks->fb_vtype, &wkhd); 7744 ACQUIRE_LOCK(ump); 7745 /* 7746 * The jnewblk will be discarded and the bits in the map never 7747 * made it to disk. We can immediately free the freeblk. 7748 */ 7749 if (needj == 0) 7750 handle_written_freework(freework); 7751} 7752 7753/* 7754 * We enqueue freework items that need processing back on the freeblks and 7755 * add the freeblks to the worklist. This makes it easier to find all work 7756 * required to flush a truncation in process_truncates(). 7757 */ 7758static void 7759freework_enqueue(freework) 7760 struct freework *freework; 7761{ 7762 struct freeblks *freeblks; 7763 7764 freeblks = freework->fw_freeblks; 7765 if ((freework->fw_state & INPROGRESS) == 0) 7766 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 7767 if ((freeblks->fb_state & 7768 (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE && 7769 LIST_EMPTY(&freeblks->fb_jblkdephd)) 7770 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7771} 7772 7773/* 7774 * Start, continue, or finish the process of freeing an indirect block tree. 7775 * The free operation may be paused at any point with fw_off containing the 7776 * offset to restart from. This enables us to implement some flow control 7777 * for large truncates which may fan out and generate a huge number of 7778 * dependencies. 7779 */ 7780static void 7781handle_workitem_indirblk(freework) 7782 struct freework *freework; 7783{ 7784 struct freeblks *freeblks; 7785 struct ufsmount *ump; 7786 struct fs *fs; 7787 7788 freeblks = freework->fw_freeblks; 7789 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7790 fs = ump->um_fs; 7791 if (freework->fw_state & DEPCOMPLETE) { 7792 handle_written_freework(freework); 7793 return; 7794 } 7795 if (freework->fw_off == NINDIR(fs)) { 7796 freework_freeblock(freework); 7797 return; 7798 } 7799 freework->fw_state |= INPROGRESS; 7800 FREE_LOCK(ump); 7801 indir_trunc(freework, fsbtodb(fs, freework->fw_blkno), 7802 freework->fw_lbn); 7803 ACQUIRE_LOCK(ump); 7804} 7805 7806/* 7807 * Called when a freework structure attached to a cg buf is written. The 7808 * ref on either the parent or the freeblks structure is released and 7809 * the freeblks is added back to the worklist if there is more work to do. 7810 */ 7811static void 7812handle_written_freework(freework) 7813 struct freework *freework; 7814{ 7815 struct freeblks *freeblks; 7816 struct freework *parent; 7817 7818 freeblks = freework->fw_freeblks; 7819 parent = freework->fw_parent; 7820 if (freework->fw_state & DELAYEDFREE) 7821 freeblks->fb_cgwait--; 7822 freework->fw_state |= COMPLETE; 7823 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 7824 WORKITEM_FREE(freework, D_FREEWORK); 7825 if (parent) { 7826 if (--parent->fw_ref == 0) 7827 freework_enqueue(parent); 7828 return; 7829 } 7830 if (--freeblks->fb_ref != 0) 7831 return; 7832 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) == 7833 ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) 7834 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7835} 7836 7837/* 7838 * This workitem routine performs the block de-allocation. 7839 * The workitem is added to the pending list after the updated 7840 * inode block has been written to disk. As mentioned above, 7841 * checks regarding the number of blocks de-allocated (compared 7842 * to the number of blocks allocated for the file) are also 7843 * performed in this function. 7844 */ 7845static int 7846handle_workitem_freeblocks(freeblks, flags) 7847 struct freeblks *freeblks; 7848 int flags; 7849{ 7850 struct freework *freework; 7851 struct newblk *newblk; 7852 struct allocindir *aip; 7853 struct ufsmount *ump; 7854 struct worklist *wk; 7855 7856 KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd), 7857 ("handle_workitem_freeblocks: Journal entries not written.")); 7858 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7859 ACQUIRE_LOCK(ump); 7860 while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) { 7861 WORKLIST_REMOVE(wk); 7862 switch (wk->wk_type) { 7863 case D_DIRREM: 7864 wk->wk_state |= COMPLETE; 7865 add_to_worklist(wk, 0); 7866 continue; 7867 7868 case D_ALLOCDIRECT: 7869 free_newblk(WK_NEWBLK(wk)); 7870 continue; 7871 7872 case D_ALLOCINDIR: 7873 aip = WK_ALLOCINDIR(wk); 7874 freework = NULL; 7875 if (aip->ai_state & DELAYEDFREE) { 7876 FREE_LOCK(ump); 7877 freework = newfreework(ump, freeblks, NULL, 7878 aip->ai_lbn, aip->ai_newblkno, 7879 ump->um_fs->fs_frag, 0, 0); 7880 ACQUIRE_LOCK(ump); 7881 } 7882 newblk = WK_NEWBLK(wk); 7883 if (newblk->nb_jnewblk) { 7884 freework->fw_jnewblk = newblk->nb_jnewblk; 7885 newblk->nb_jnewblk->jn_dep = &freework->fw_list; 7886 newblk->nb_jnewblk = NULL; 7887 } 7888 free_newblk(newblk); 7889 continue; 7890 7891 case D_FREEWORK: 7892 freework = WK_FREEWORK(wk); 7893 if (freework->fw_lbn <= -NDADDR) 7894 handle_workitem_indirblk(freework); 7895 else 7896 freework_freeblock(freework); 7897 continue; 7898 default: 7899 panic("handle_workitem_freeblocks: Unknown type %s", 7900 TYPENAME(wk->wk_type)); 7901 } 7902 } 7903 if (freeblks->fb_ref != 0) { 7904 freeblks->fb_state &= ~INPROGRESS; 7905 wake_worklist(&freeblks->fb_list); 7906 freeblks = NULL; 7907 } 7908 FREE_LOCK(ump); 7909 if (freeblks) 7910 return handle_complete_freeblocks(freeblks, flags); 7911 return (0); 7912} 7913 7914/* 7915 * Handle completion of block free via truncate. This allows fs_pending 7916 * to track the actual free block count more closely than if we only updated 7917 * it at the end. We must be careful to handle cases where the block count 7918 * on free was incorrect. 7919 */ 7920static void 7921freeblks_free(ump, freeblks, blocks) 7922 struct ufsmount *ump; 7923 struct freeblks *freeblks; 7924 int blocks; 7925{ 7926 struct fs *fs; 7927 ufs2_daddr_t remain; 7928 7929 UFS_LOCK(ump); 7930 remain = -freeblks->fb_chkcnt; 7931 freeblks->fb_chkcnt += blocks; 7932 if (remain > 0) { 7933 if (remain < blocks) 7934 blocks = remain; 7935 fs = ump->um_fs; 7936 fs->fs_pendingblocks -= blocks; 7937 } 7938 UFS_UNLOCK(ump); 7939} 7940 7941/* 7942 * Once all of the freework workitems are complete we can retire the 7943 * freeblocks dependency and any journal work awaiting completion. This 7944 * can not be called until all other dependencies are stable on disk. 7945 */ 7946static int 7947handle_complete_freeblocks(freeblks, flags) 7948 struct freeblks *freeblks; 7949 int flags; 7950{ 7951 struct inodedep *inodedep; 7952 struct inode *ip; 7953 struct vnode *vp; 7954 struct fs *fs; 7955 struct ufsmount *ump; 7956 ufs2_daddr_t spare; 7957 7958 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7959 fs = ump->um_fs; 7960 flags = LK_EXCLUSIVE | flags; 7961 spare = freeblks->fb_chkcnt; 7962 7963 /* 7964 * If we did not release the expected number of blocks we may have 7965 * to adjust the inode block count here. Only do so if it wasn't 7966 * a truncation to zero and the modrev still matches. 7967 */ 7968 if (spare && freeblks->fb_len != 0) { 7969 if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum, 7970 flags, &vp, FFSV_FORCEINSMQ) != 0) 7971 return (EBUSY); 7972 ip = VTOI(vp); 7973 if (DIP(ip, i_modrev) == freeblks->fb_modrev) { 7974 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare); 7975 ip->i_flag |= IN_CHANGE; 7976 /* 7977 * We must wait so this happens before the 7978 * journal is reclaimed. 7979 */ 7980 ffs_update(vp, 1); 7981 } 7982 vput(vp); 7983 } 7984 if (spare < 0) { 7985 UFS_LOCK(ump); 7986 fs->fs_pendingblocks += spare; 7987 UFS_UNLOCK(ump); 7988 } 7989#ifdef QUOTA 7990 /* Handle spare. */ 7991 if (spare) 7992 quotaadj(freeblks->fb_quota, ump, -spare); 7993 quotarele(freeblks->fb_quota); 7994#endif 7995 ACQUIRE_LOCK(ump); 7996 if (freeblks->fb_state & ONDEPLIST) { 7997 inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum, 7998 0, &inodedep); 7999 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); 8000 freeblks->fb_state &= ~ONDEPLIST; 8001 if (TAILQ_EMPTY(&inodedep->id_freeblklst)) 8002 free_inodedep(inodedep); 8003 } 8004 /* 8005 * All of the freeblock deps must be complete prior to this call 8006 * so it's now safe to complete earlier outstanding journal entries. 8007 */ 8008 handle_jwork(&freeblks->fb_jwork); 8009 WORKITEM_FREE(freeblks, D_FREEBLKS); 8010 FREE_LOCK(ump); 8011 return (0); 8012} 8013 8014/* 8015 * Release blocks associated with the freeblks and stored in the indirect 8016 * block dbn. If level is greater than SINGLE, the block is an indirect block 8017 * and recursive calls to indirtrunc must be used to cleanse other indirect 8018 * blocks. 8019 * 8020 * This handles partial and complete truncation of blocks. Partial is noted 8021 * with goingaway == 0. In this case the freework is completed after the 8022 * zero'd indirects are written to disk. For full truncation the freework 8023 * is completed after the block is freed. 8024 */ 8025static void 8026indir_trunc(freework, dbn, lbn) 8027 struct freework *freework; 8028 ufs2_daddr_t dbn; 8029 ufs_lbn_t lbn; 8030{ 8031 struct freework *nfreework; 8032 struct workhead wkhd; 8033 struct freeblks *freeblks; 8034 struct buf *bp; 8035 struct fs *fs; 8036 struct indirdep *indirdep; 8037 struct ufsmount *ump; 8038 ufs1_daddr_t *bap1 = 0; 8039 ufs2_daddr_t nb, nnb, *bap2 = 0; 8040 ufs_lbn_t lbnadd, nlbn; 8041 int i, nblocks, ufs1fmt; 8042 int freedblocks; 8043 int goingaway; 8044 int freedeps; 8045 int needj; 8046 int level; 8047 int cnt; 8048 8049 freeblks = freework->fw_freeblks; 8050 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 8051 fs = ump->um_fs; 8052 /* 8053 * Get buffer of block pointers to be freed. There are three cases: 8054 * 8055 * 1) Partial truncate caches the indirdep pointer in the freework 8056 * which provides us a back copy to the save bp which holds the 8057 * pointers we want to clear. When this completes the zero 8058 * pointers are written to the real copy. 8059 * 2) The indirect is being completely truncated, cancel_indirdep() 8060 * eliminated the real copy and placed the indirdep on the saved 8061 * copy. The indirdep and buf are discarded when this completes. 8062 * 3) The indirect was not in memory, we read a copy off of the disk 8063 * using the devvp and drop and invalidate the buffer when we're 8064 * done. 8065 */ 8066 goingaway = 1; 8067 indirdep = NULL; 8068 if (freework->fw_indir != NULL) { 8069 goingaway = 0; 8070 indirdep = freework->fw_indir; 8071 bp = indirdep->ir_savebp; 8072 if (bp == NULL || bp->b_blkno != dbn) 8073 panic("indir_trunc: Bad saved buf %p blkno %jd", 8074 bp, (intmax_t)dbn); 8075 } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { 8076 /* 8077 * The lock prevents the buf dep list from changing and 8078 * indirects on devvp should only ever have one dependency. 8079 */ 8080 indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep)); 8081 if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0) 8082 panic("indir_trunc: Bad indirdep %p from buf %p", 8083 indirdep, bp); 8084 } else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 8085 NOCRED, &bp) != 0) { 8086 brelse(bp); 8087 return; 8088 } 8089 ACQUIRE_LOCK(ump); 8090 /* Protects against a race with complete_trunc_indir(). */ 8091 freework->fw_state &= ~INPROGRESS; 8092 /* 8093 * If we have an indirdep we need to enforce the truncation order 8094 * and discard it when it is complete. 8095 */ 8096 if (indirdep) { 8097 if (freework != TAILQ_FIRST(&indirdep->ir_trunc) && 8098 !TAILQ_EMPTY(&indirdep->ir_trunc)) { 8099 /* 8100 * Add the complete truncate to the list on the 8101 * indirdep to enforce in-order processing. 8102 */ 8103 if (freework->fw_indir == NULL) 8104 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, 8105 freework, fw_next); 8106 FREE_LOCK(ump); 8107 return; 8108 } 8109 /* 8110 * If we're goingaway, free the indirdep. Otherwise it will 8111 * linger until the write completes. 8112 */ 8113 if (goingaway) 8114 free_indirdep(indirdep); 8115 } 8116 FREE_LOCK(ump); 8117 /* Initialize pointers depending on block size. */ 8118 if (ump->um_fstype == UFS1) { 8119 bap1 = (ufs1_daddr_t *)bp->b_data; 8120 nb = bap1[freework->fw_off]; 8121 ufs1fmt = 1; 8122 } else { 8123 bap2 = (ufs2_daddr_t *)bp->b_data; 8124 nb = bap2[freework->fw_off]; 8125 ufs1fmt = 0; 8126 } 8127 level = lbn_level(lbn); 8128 needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0; 8129 lbnadd = lbn_offset(fs, level); 8130 nblocks = btodb(fs->fs_bsize); 8131 nfreework = freework; 8132 freedeps = 0; 8133 cnt = 0; 8134 /* 8135 * Reclaim blocks. Traverses into nested indirect levels and 8136 * arranges for the current level to be freed when subordinates 8137 * are free when journaling. 8138 */ 8139 for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) { 8140 if (i != NINDIR(fs) - 1) { 8141 if (ufs1fmt) 8142 nnb = bap1[i+1]; 8143 else 8144 nnb = bap2[i+1]; 8145 } else 8146 nnb = 0; 8147 if (nb == 0) 8148 continue; 8149 cnt++; 8150 if (level != 0) { 8151 nlbn = (lbn + 1) - (i * lbnadd); 8152 if (needj != 0) { 8153 nfreework = newfreework(ump, freeblks, freework, 8154 nlbn, nb, fs->fs_frag, 0, 0); 8155 freedeps++; 8156 } 8157 indir_trunc(nfreework, fsbtodb(fs, nb), nlbn); 8158 } else { 8159 struct freedep *freedep; 8160 8161 /* 8162 * Attempt to aggregate freedep dependencies for 8163 * all blocks being released to the same CG. 8164 */ 8165 LIST_INIT(&wkhd); 8166 if (needj != 0 && 8167 (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) { 8168 freedep = newfreedep(freework); 8169 WORKLIST_INSERT_UNLOCKED(&wkhd, 8170 &freedep->fd_list); 8171 freedeps++; 8172 } 8173 CTR3(KTR_SUJ, 8174 "indir_trunc: ino %d blkno %jd size %ld", 8175 freeblks->fb_inum, nb, fs->fs_bsize); 8176 ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, 8177 fs->fs_bsize, freeblks->fb_inum, 8178 freeblks->fb_vtype, &wkhd); 8179 } 8180 } 8181 if (goingaway) { 8182 bp->b_flags |= B_INVAL | B_NOCACHE; 8183 brelse(bp); 8184 } 8185 freedblocks = 0; 8186 if (level == 0) 8187 freedblocks = (nblocks * cnt); 8188 if (needj == 0) 8189 freedblocks += nblocks; 8190 freeblks_free(ump, freeblks, freedblocks); 8191 /* 8192 * If we are journaling set up the ref counts and offset so this 8193 * indirect can be completed when its children are free. 8194 */ 8195 if (needj) { 8196 ACQUIRE_LOCK(ump); 8197 freework->fw_off = i; 8198 freework->fw_ref += freedeps; 8199 freework->fw_ref -= NINDIR(fs) + 1; 8200 if (level == 0) 8201 freeblks->fb_cgwait += freedeps; 8202 if (freework->fw_ref == 0) 8203 freework_freeblock(freework); 8204 FREE_LOCK(ump); 8205 return; 8206 } 8207 /* 8208 * If we're not journaling we can free the indirect now. 8209 */ 8210 dbn = dbtofsb(fs, dbn); 8211 CTR3(KTR_SUJ, 8212 "indir_trunc 2: ino %d blkno %jd size %ld", 8213 freeblks->fb_inum, dbn, fs->fs_bsize); 8214 ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize, 8215 freeblks->fb_inum, freeblks->fb_vtype, NULL); 8216 /* Non SUJ softdep does single-threaded truncations. */ 8217 if (freework->fw_blkno == dbn) { 8218 freework->fw_state |= ALLCOMPLETE; 8219 ACQUIRE_LOCK(ump); 8220 handle_written_freework(freework); 8221 FREE_LOCK(ump); 8222 } 8223 return; 8224} 8225 8226/* 8227 * Cancel an allocindir when it is removed via truncation. When bp is not 8228 * NULL the indirect never appeared on disk and is scheduled to be freed 8229 * independently of the indir so we can more easily track journal work. 8230 */ 8231static void 8232cancel_allocindir(aip, bp, freeblks, trunc) 8233 struct allocindir *aip; 8234 struct buf *bp; 8235 struct freeblks *freeblks; 8236 int trunc; 8237{ 8238 struct indirdep *indirdep; 8239 struct freefrag *freefrag; 8240 struct newblk *newblk; 8241 8242 newblk = (struct newblk *)aip; 8243 LIST_REMOVE(aip, ai_next); 8244 /* 8245 * We must eliminate the pointer in bp if it must be freed on its 8246 * own due to partial truncate or pending journal work. 8247 */ 8248 if (bp && (trunc || newblk->nb_jnewblk)) { 8249 /* 8250 * Clear the pointer and mark the aip to be freed 8251 * directly if it never existed on disk. 8252 */ 8253 aip->ai_state |= DELAYEDFREE; 8254 indirdep = aip->ai_indirdep; 8255 if (indirdep->ir_state & UFS1FMT) 8256 ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8257 else 8258 ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8259 } 8260 /* 8261 * When truncating the previous pointer will be freed via 8262 * savedbp. Eliminate the freefrag which would dup free. 8263 */ 8264 if (trunc && (freefrag = newblk->nb_freefrag) != NULL) { 8265 newblk->nb_freefrag = NULL; 8266 if (freefrag->ff_jdep) 8267 cancel_jfreefrag( 8268 WK_JFREEFRAG(freefrag->ff_jdep)); 8269 jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork); 8270 WORKITEM_FREE(freefrag, D_FREEFRAG); 8271 } 8272 /* 8273 * If the journal hasn't been written the jnewblk must be passed 8274 * to the call to ffs_blkfree that reclaims the space. We accomplish 8275 * this by leaving the journal dependency on the newblk to be freed 8276 * when a freework is created in handle_workitem_freeblocks(). 8277 */ 8278 cancel_newblk(newblk, NULL, &freeblks->fb_jwork); 8279 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 8280} 8281 8282/* 8283 * Create the mkdir dependencies for . and .. in a new directory. Link them 8284 * in to a newdirblk so any subsequent additions are tracked properly. The 8285 * caller is responsible for adding the mkdir1 dependency to the journal 8286 * and updating id_mkdiradd. This function returns with the per-filesystem 8287 * lock held. 8288 */ 8289static struct mkdir * 8290setup_newdir(dap, newinum, dinum, newdirbp, mkdirp) 8291 struct diradd *dap; 8292 ino_t newinum; 8293 ino_t dinum; 8294 struct buf *newdirbp; 8295 struct mkdir **mkdirp; 8296{ 8297 struct newblk *newblk; 8298 struct pagedep *pagedep; 8299 struct inodedep *inodedep; 8300 struct newdirblk *newdirblk = 0; 8301 struct mkdir *mkdir1, *mkdir2; 8302 struct worklist *wk; 8303 struct jaddref *jaddref; 8304 struct ufsmount *ump; 8305 struct mount *mp; 8306 8307 mp = dap->da_list.wk_mp; 8308 ump = VFSTOUFS(mp); 8309 newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, 8310 M_SOFTDEP_FLAGS); 8311 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8312 LIST_INIT(&newdirblk->db_mkdir); 8313 mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8314 workitem_alloc(&mkdir1->md_list, D_MKDIR, mp); 8315 mkdir1->md_state = ATTACHED | MKDIR_BODY; 8316 mkdir1->md_diradd = dap; 8317 mkdir1->md_jaddref = NULL; 8318 mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8319 workitem_alloc(&mkdir2->md_list, D_MKDIR, mp); 8320 mkdir2->md_state = ATTACHED | MKDIR_PARENT; 8321 mkdir2->md_diradd = dap; 8322 mkdir2->md_jaddref = NULL; 8323 if (MOUNTEDSUJ(mp) == 0) { 8324 mkdir1->md_state |= DEPCOMPLETE; 8325 mkdir2->md_state |= DEPCOMPLETE; 8326 } 8327 /* 8328 * Dependency on "." and ".." being written to disk. 8329 */ 8330 mkdir1->md_buf = newdirbp; 8331 ACQUIRE_LOCK(VFSTOUFS(mp)); 8332 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs); 8333 /* 8334 * We must link the pagedep, allocdirect, and newdirblk for 8335 * the initial file page so the pointer to the new directory 8336 * is not written until the directory contents are live and 8337 * any subsequent additions are not marked live until the 8338 * block is reachable via the inode. 8339 */ 8340 if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0) 8341 panic("setup_newdir: lost pagedep"); 8342 LIST_FOREACH(wk, &newdirbp->b_dep, wk_list) 8343 if (wk->wk_type == D_ALLOCDIRECT) 8344 break; 8345 if (wk == NULL) 8346 panic("setup_newdir: lost allocdirect"); 8347 if (pagedep->pd_state & NEWBLOCK) 8348 panic("setup_newdir: NEWBLOCK already set"); 8349 newblk = WK_NEWBLK(wk); 8350 pagedep->pd_state |= NEWBLOCK; 8351 pagedep->pd_newdirblk = newdirblk; 8352 newdirblk->db_pagedep = pagedep; 8353 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8354 WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list); 8355 /* 8356 * Look up the inodedep for the parent directory so that we 8357 * can link mkdir2 into the pending dotdot jaddref or 8358 * the inode write if there is none. If the inode is 8359 * ALLCOMPLETE and no jaddref is present all dependencies have 8360 * been satisfied and mkdir2 can be freed. 8361 */ 8362 inodedep_lookup(mp, dinum, 0, &inodedep); 8363 if (MOUNTEDSUJ(mp)) { 8364 if (inodedep == NULL) 8365 panic("setup_newdir: Lost parent."); 8366 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8367 inoreflst); 8368 KASSERT(jaddref != NULL && jaddref->ja_parent == newinum && 8369 (jaddref->ja_state & MKDIR_PARENT), 8370 ("setup_newdir: bad dotdot jaddref %p", jaddref)); 8371 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8372 mkdir2->md_jaddref = jaddref; 8373 jaddref->ja_mkdir = mkdir2; 8374 } else if (inodedep == NULL || 8375 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 8376 dap->da_state &= ~MKDIR_PARENT; 8377 WORKITEM_FREE(mkdir2, D_MKDIR); 8378 mkdir2 = NULL; 8379 } else { 8380 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8381 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list); 8382 } 8383 *mkdirp = mkdir2; 8384 8385 return (mkdir1); 8386} 8387 8388/* 8389 * Directory entry addition dependencies. 8390 * 8391 * When adding a new directory entry, the inode (with its incremented link 8392 * count) must be written to disk before the directory entry's pointer to it. 8393 * Also, if the inode is newly allocated, the corresponding freemap must be 8394 * updated (on disk) before the directory entry's pointer. These requirements 8395 * are met via undo/redo on the directory entry's pointer, which consists 8396 * simply of the inode number. 8397 * 8398 * As directory entries are added and deleted, the free space within a 8399 * directory block can become fragmented. The ufs filesystem will compact 8400 * a fragmented directory block to make space for a new entry. When this 8401 * occurs, the offsets of previously added entries change. Any "diradd" 8402 * dependency structures corresponding to these entries must be updated with 8403 * the new offsets. 8404 */ 8405 8406/* 8407 * This routine is called after the in-memory inode's link 8408 * count has been incremented, but before the directory entry's 8409 * pointer to the inode has been set. 8410 */ 8411int 8412softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 8413 struct buf *bp; /* buffer containing directory block */ 8414 struct inode *dp; /* inode for directory */ 8415 off_t diroffset; /* offset of new entry in directory */ 8416 ino_t newinum; /* inode referenced by new directory entry */ 8417 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 8418 int isnewblk; /* entry is in a newly allocated block */ 8419{ 8420 int offset; /* offset of new entry within directory block */ 8421 ufs_lbn_t lbn; /* block in directory containing new entry */ 8422 struct fs *fs; 8423 struct diradd *dap; 8424 struct newblk *newblk; 8425 struct pagedep *pagedep; 8426 struct inodedep *inodedep; 8427 struct newdirblk *newdirblk = 0; 8428 struct mkdir *mkdir1, *mkdir2; 8429 struct jaddref *jaddref; 8430 struct ufsmount *ump; 8431 struct mount *mp; 8432 int isindir; 8433 8434 ump = dp->i_ump; 8435 mp = UFSTOVFS(ump); 8436 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8437 ("softdep_setup_directory_add called on non-softdep filesystem")); 8438 /* 8439 * Whiteouts have no dependencies. 8440 */ 8441 if (newinum == WINO) { 8442 if (newdirbp != NULL) 8443 bdwrite(newdirbp); 8444 return (0); 8445 } 8446 jaddref = NULL; 8447 mkdir1 = mkdir2 = NULL; 8448 fs = dp->i_fs; 8449 lbn = lblkno(fs, diroffset); 8450 offset = blkoff(fs, diroffset); 8451 dap = malloc(sizeof(struct diradd), M_DIRADD, 8452 M_SOFTDEP_FLAGS|M_ZERO); 8453 workitem_alloc(&dap->da_list, D_DIRADD, mp); 8454 dap->da_offset = offset; 8455 dap->da_newinum = newinum; 8456 dap->da_state = ATTACHED; 8457 LIST_INIT(&dap->da_jwork); 8458 isindir = bp->b_lblkno >= NDADDR; 8459 if (isnewblk && 8460 (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) { 8461 newdirblk = malloc(sizeof(struct newdirblk), 8462 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 8463 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8464 LIST_INIT(&newdirblk->db_mkdir); 8465 } 8466 /* 8467 * If we're creating a new directory setup the dependencies and set 8468 * the dap state to wait for them. Otherwise it's COMPLETE and 8469 * we can move on. 8470 */ 8471 if (newdirbp == NULL) { 8472 dap->da_state |= DEPCOMPLETE; 8473 ACQUIRE_LOCK(ump); 8474 } else { 8475 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 8476 mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp, 8477 &mkdir2); 8478 } 8479 /* 8480 * Link into parent directory pagedep to await its being written. 8481 */ 8482 pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep); 8483#ifdef DEBUG 8484 if (diradd_lookup(pagedep, offset) != NULL) 8485 panic("softdep_setup_directory_add: %p already at off %d\n", 8486 diradd_lookup(pagedep, offset), offset); 8487#endif 8488 dap->da_pagedep = pagedep; 8489 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 8490 da_pdlist); 8491 inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); 8492 /* 8493 * If we're journaling, link the diradd into the jaddref so it 8494 * may be completed after the journal entry is written. Otherwise, 8495 * link the diradd into its inodedep. If the inode is not yet 8496 * written place it on the bufwait list, otherwise do the post-inode 8497 * write processing to put it on the id_pendinghd list. 8498 */ 8499 if (MOUNTEDSUJ(mp)) { 8500 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8501 inoreflst); 8502 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 8503 ("softdep_setup_directory_add: bad jaddref %p", jaddref)); 8504 jaddref->ja_diroff = diroffset; 8505 jaddref->ja_diradd = dap; 8506 add_to_journal(&jaddref->ja_list); 8507 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 8508 diradd_inode_written(dap, inodedep); 8509 else 8510 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 8511 /* 8512 * Add the journal entries for . and .. links now that the primary 8513 * link is written. 8514 */ 8515 if (mkdir1 != NULL && MOUNTEDSUJ(mp)) { 8516 jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 8517 inoreflst, if_deps); 8518 KASSERT(jaddref != NULL && 8519 jaddref->ja_ino == jaddref->ja_parent && 8520 (jaddref->ja_state & MKDIR_BODY), 8521 ("softdep_setup_directory_add: bad dot jaddref %p", 8522 jaddref)); 8523 mkdir1->md_jaddref = jaddref; 8524 jaddref->ja_mkdir = mkdir1; 8525 /* 8526 * It is important that the dotdot journal entry 8527 * is added prior to the dot entry since dot writes 8528 * both the dot and dotdot links. These both must 8529 * be added after the primary link for the journal 8530 * to remain consistent. 8531 */ 8532 add_to_journal(&mkdir2->md_jaddref->ja_list); 8533 add_to_journal(&jaddref->ja_list); 8534 } 8535 /* 8536 * If we are adding a new directory remember this diradd so that if 8537 * we rename it we can keep the dot and dotdot dependencies. If 8538 * we are adding a new name for an inode that has a mkdiradd we 8539 * must be in rename and we have to move the dot and dotdot 8540 * dependencies to this new name. The old name is being orphaned 8541 * soon. 8542 */ 8543 if (mkdir1 != NULL) { 8544 if (inodedep->id_mkdiradd != NULL) 8545 panic("softdep_setup_directory_add: Existing mkdir"); 8546 inodedep->id_mkdiradd = dap; 8547 } else if (inodedep->id_mkdiradd) 8548 merge_diradd(inodedep, dap); 8549 if (newdirblk) { 8550 /* 8551 * There is nothing to do if we are already tracking 8552 * this block. 8553 */ 8554 if ((pagedep->pd_state & NEWBLOCK) != 0) { 8555 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 8556 FREE_LOCK(ump); 8557 return (0); 8558 } 8559 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk) 8560 == 0) 8561 panic("softdep_setup_directory_add: lost entry"); 8562 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8563 pagedep->pd_state |= NEWBLOCK; 8564 pagedep->pd_newdirblk = newdirblk; 8565 newdirblk->db_pagedep = pagedep; 8566 FREE_LOCK(ump); 8567 /* 8568 * If we extended into an indirect signal direnter to sync. 8569 */ 8570 if (isindir) 8571 return (1); 8572 return (0); 8573 } 8574 FREE_LOCK(ump); 8575 return (0); 8576} 8577 8578/* 8579 * This procedure is called to change the offset of a directory 8580 * entry when compacting a directory block which must be owned 8581 * exclusively by the caller. Note that the actual entry movement 8582 * must be done in this procedure to ensure that no I/O completions 8583 * occur while the move is in progress. 8584 */ 8585void 8586softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 8587 struct buf *bp; /* Buffer holding directory block. */ 8588 struct inode *dp; /* inode for directory */ 8589 caddr_t base; /* address of dp->i_offset */ 8590 caddr_t oldloc; /* address of old directory location */ 8591 caddr_t newloc; /* address of new directory location */ 8592 int entrysize; /* size of directory entry */ 8593{ 8594 int offset, oldoffset, newoffset; 8595 struct pagedep *pagedep; 8596 struct jmvref *jmvref; 8597 struct diradd *dap; 8598 struct direct *de; 8599 struct mount *mp; 8600 ufs_lbn_t lbn; 8601 int flags; 8602 8603 mp = UFSTOVFS(dp->i_ump); 8604 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8605 ("softdep_change_directoryentry_offset called on " 8606 "non-softdep filesystem")); 8607 de = (struct direct *)oldloc; 8608 jmvref = NULL; 8609 flags = 0; 8610 /* 8611 * Moves are always journaled as it would be too complex to 8612 * determine if any affected adds or removes are present in the 8613 * journal. 8614 */ 8615 if (MOUNTEDSUJ(mp)) { 8616 flags = DEPALLOC; 8617 jmvref = newjmvref(dp, de->d_ino, 8618 dp->i_offset + (oldloc - base), 8619 dp->i_offset + (newloc - base)); 8620 } 8621 lbn = lblkno(dp->i_fs, dp->i_offset); 8622 offset = blkoff(dp->i_fs, dp->i_offset); 8623 oldoffset = offset + (oldloc - base); 8624 newoffset = offset + (newloc - base); 8625 ACQUIRE_LOCK(dp->i_ump); 8626 if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0) 8627 goto done; 8628 dap = diradd_lookup(pagedep, oldoffset); 8629 if (dap) { 8630 dap->da_offset = newoffset; 8631 newoffset = DIRADDHASH(newoffset); 8632 oldoffset = DIRADDHASH(oldoffset); 8633 if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE && 8634 newoffset != oldoffset) { 8635 LIST_REMOVE(dap, da_pdlist); 8636 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset], 8637 dap, da_pdlist); 8638 } 8639 } 8640done: 8641 if (jmvref) { 8642 jmvref->jm_pagedep = pagedep; 8643 LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps); 8644 add_to_journal(&jmvref->jm_list); 8645 } 8646 bcopy(oldloc, newloc, entrysize); 8647 FREE_LOCK(dp->i_ump); 8648} 8649 8650/* 8651 * Move the mkdir dependencies and journal work from one diradd to another 8652 * when renaming a directory. The new name must depend on the mkdir deps 8653 * completing as the old name did. Directories can only have one valid link 8654 * at a time so one must be canonical. 8655 */ 8656static void 8657merge_diradd(inodedep, newdap) 8658 struct inodedep *inodedep; 8659 struct diradd *newdap; 8660{ 8661 struct diradd *olddap; 8662 struct mkdir *mkdir, *nextmd; 8663 struct ufsmount *ump; 8664 short state; 8665 8666 olddap = inodedep->id_mkdiradd; 8667 inodedep->id_mkdiradd = newdap; 8668 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8669 newdap->da_state &= ~DEPCOMPLETE; 8670 ump = VFSTOUFS(inodedep->id_list.wk_mp); 8671 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8672 mkdir = nextmd) { 8673 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8674 if (mkdir->md_diradd != olddap) 8675 continue; 8676 mkdir->md_diradd = newdap; 8677 state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY); 8678 newdap->da_state |= state; 8679 olddap->da_state &= ~state; 8680 if ((olddap->da_state & 8681 (MKDIR_PARENT | MKDIR_BODY)) == 0) 8682 break; 8683 } 8684 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8685 panic("merge_diradd: unfound ref"); 8686 } 8687 /* 8688 * Any mkdir related journal items are not safe to be freed until 8689 * the new name is stable. 8690 */ 8691 jwork_move(&newdap->da_jwork, &olddap->da_jwork); 8692 olddap->da_state |= DEPCOMPLETE; 8693 complete_diradd(olddap); 8694} 8695 8696/* 8697 * Move the diradd to the pending list when all diradd dependencies are 8698 * complete. 8699 */ 8700static void 8701complete_diradd(dap) 8702 struct diradd *dap; 8703{ 8704 struct pagedep *pagedep; 8705 8706 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 8707 if (dap->da_state & DIRCHG) 8708 pagedep = dap->da_previous->dm_pagedep; 8709 else 8710 pagedep = dap->da_pagedep; 8711 LIST_REMOVE(dap, da_pdlist); 8712 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 8713 } 8714} 8715 8716/* 8717 * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal 8718 * add entries and conditonally journal the remove. 8719 */ 8720static void 8721cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref) 8722 struct diradd *dap; 8723 struct dirrem *dirrem; 8724 struct jremref *jremref; 8725 struct jremref *dotremref; 8726 struct jremref *dotdotremref; 8727{ 8728 struct inodedep *inodedep; 8729 struct jaddref *jaddref; 8730 struct inoref *inoref; 8731 struct ufsmount *ump; 8732 struct mkdir *mkdir; 8733 8734 /* 8735 * If no remove references were allocated we're on a non-journaled 8736 * filesystem and can skip the cancel step. 8737 */ 8738 if (jremref == NULL) { 8739 free_diradd(dap, NULL); 8740 return; 8741 } 8742 /* 8743 * Cancel the primary name an free it if it does not require 8744 * journaling. 8745 */ 8746 if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum, 8747 0, &inodedep) != 0) { 8748 /* Abort the addref that reference this diradd. */ 8749 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 8750 if (inoref->if_list.wk_type != D_JADDREF) 8751 continue; 8752 jaddref = (struct jaddref *)inoref; 8753 if (jaddref->ja_diradd != dap) 8754 continue; 8755 if (cancel_jaddref(jaddref, inodedep, 8756 &dirrem->dm_jwork) == 0) { 8757 free_jremref(jremref); 8758 jremref = NULL; 8759 } 8760 break; 8761 } 8762 } 8763 /* 8764 * Cancel subordinate names and free them if they do not require 8765 * journaling. 8766 */ 8767 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8768 ump = VFSTOUFS(dap->da_list.wk_mp); 8769 LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) { 8770 if (mkdir->md_diradd != dap) 8771 continue; 8772 if ((jaddref = mkdir->md_jaddref) == NULL) 8773 continue; 8774 mkdir->md_jaddref = NULL; 8775 if (mkdir->md_state & MKDIR_PARENT) { 8776 if (cancel_jaddref(jaddref, NULL, 8777 &dirrem->dm_jwork) == 0) { 8778 free_jremref(dotdotremref); 8779 dotdotremref = NULL; 8780 } 8781 } else { 8782 if (cancel_jaddref(jaddref, inodedep, 8783 &dirrem->dm_jwork) == 0) { 8784 free_jremref(dotremref); 8785 dotremref = NULL; 8786 } 8787 } 8788 } 8789 } 8790 8791 if (jremref) 8792 journal_jremref(dirrem, jremref, inodedep); 8793 if (dotremref) 8794 journal_jremref(dirrem, dotremref, inodedep); 8795 if (dotdotremref) 8796 journal_jremref(dirrem, dotdotremref, NULL); 8797 jwork_move(&dirrem->dm_jwork, &dap->da_jwork); 8798 free_diradd(dap, &dirrem->dm_jwork); 8799} 8800 8801/* 8802 * Free a diradd dependency structure. This routine must be called 8803 * with splbio interrupts blocked. 8804 */ 8805static void 8806free_diradd(dap, wkhd) 8807 struct diradd *dap; 8808 struct workhead *wkhd; 8809{ 8810 struct dirrem *dirrem; 8811 struct pagedep *pagedep; 8812 struct inodedep *inodedep; 8813 struct mkdir *mkdir, *nextmd; 8814 struct ufsmount *ump; 8815 8816 ump = VFSTOUFS(dap->da_list.wk_mp); 8817 LOCK_OWNED(ump); 8818 LIST_REMOVE(dap, da_pdlist); 8819 if (dap->da_state & ONWORKLIST) 8820 WORKLIST_REMOVE(&dap->da_list); 8821 if ((dap->da_state & DIRCHG) == 0) { 8822 pagedep = dap->da_pagedep; 8823 } else { 8824 dirrem = dap->da_previous; 8825 pagedep = dirrem->dm_pagedep; 8826 dirrem->dm_dirinum = pagedep->pd_ino; 8827 dirrem->dm_state |= COMPLETE; 8828 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 8829 add_to_worklist(&dirrem->dm_list, 0); 8830 } 8831 if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum, 8832 0, &inodedep) != 0) 8833 if (inodedep->id_mkdiradd == dap) 8834 inodedep->id_mkdiradd = NULL; 8835 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8836 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8837 mkdir = nextmd) { 8838 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8839 if (mkdir->md_diradd != dap) 8840 continue; 8841 dap->da_state &= 8842 ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 8843 LIST_REMOVE(mkdir, md_mkdirs); 8844 if (mkdir->md_state & ONWORKLIST) 8845 WORKLIST_REMOVE(&mkdir->md_list); 8846 if (mkdir->md_jaddref != NULL) 8847 panic("free_diradd: Unexpected jaddref"); 8848 WORKITEM_FREE(mkdir, D_MKDIR); 8849 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 8850 break; 8851 } 8852 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8853 panic("free_diradd: unfound ref"); 8854 } 8855 if (inodedep) 8856 free_inodedep(inodedep); 8857 /* 8858 * Free any journal segments waiting for the directory write. 8859 */ 8860 handle_jwork(&dap->da_jwork); 8861 WORKITEM_FREE(dap, D_DIRADD); 8862} 8863 8864/* 8865 * Directory entry removal dependencies. 8866 * 8867 * When removing a directory entry, the entry's inode pointer must be 8868 * zero'ed on disk before the corresponding inode's link count is decremented 8869 * (possibly freeing the inode for re-use). This dependency is handled by 8870 * updating the directory entry but delaying the inode count reduction until 8871 * after the directory block has been written to disk. After this point, the 8872 * inode count can be decremented whenever it is convenient. 8873 */ 8874 8875/* 8876 * This routine should be called immediately after removing 8877 * a directory entry. The inode's link count should not be 8878 * decremented by the calling procedure -- the soft updates 8879 * code will do this task when it is safe. 8880 */ 8881void 8882softdep_setup_remove(bp, dp, ip, isrmdir) 8883 struct buf *bp; /* buffer containing directory block */ 8884 struct inode *dp; /* inode for the directory being modified */ 8885 struct inode *ip; /* inode for directory entry being removed */ 8886 int isrmdir; /* indicates if doing RMDIR */ 8887{ 8888 struct dirrem *dirrem, *prevdirrem; 8889 struct inodedep *inodedep; 8890 int direct; 8891 8892 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 8893 ("softdep_setup_remove called on non-softdep filesystem")); 8894 /* 8895 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want 8896 * newdirrem() to setup the full directory remove which requires 8897 * isrmdir > 1. 8898 */ 8899 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 8900 /* 8901 * Add the dirrem to the inodedep's pending remove list for quick 8902 * discovery later. 8903 */ 8904 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 8905 &inodedep) == 0) 8906 panic("softdep_setup_remove: Lost inodedep."); 8907 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 8908 dirrem->dm_state |= ONDEPLIST; 8909 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 8910 8911 /* 8912 * If the COMPLETE flag is clear, then there were no active 8913 * entries and we want to roll back to a zeroed entry until 8914 * the new inode is committed to disk. If the COMPLETE flag is 8915 * set then we have deleted an entry that never made it to 8916 * disk. If the entry we deleted resulted from a name change, 8917 * then the old name still resides on disk. We cannot delete 8918 * its inode (returned to us in prevdirrem) until the zeroed 8919 * directory entry gets to disk. The new inode has never been 8920 * referenced on the disk, so can be deleted immediately. 8921 */ 8922 if ((dirrem->dm_state & COMPLETE) == 0) { 8923 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 8924 dm_next); 8925 FREE_LOCK(ip->i_ump); 8926 } else { 8927 if (prevdirrem != NULL) 8928 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 8929 prevdirrem, dm_next); 8930 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 8931 direct = LIST_EMPTY(&dirrem->dm_jremrefhd); 8932 FREE_LOCK(ip->i_ump); 8933 if (direct) 8934 handle_workitem_remove(dirrem, 0); 8935 } 8936} 8937 8938/* 8939 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the 8940 * pd_pendinghd list of a pagedep. 8941 */ 8942static struct diradd * 8943diradd_lookup(pagedep, offset) 8944 struct pagedep *pagedep; 8945 int offset; 8946{ 8947 struct diradd *dap; 8948 8949 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 8950 if (dap->da_offset == offset) 8951 return (dap); 8952 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 8953 if (dap->da_offset == offset) 8954 return (dap); 8955 return (NULL); 8956} 8957 8958/* 8959 * Search for a .. diradd dependency in a directory that is being removed. 8960 * If the directory was renamed to a new parent we have a diradd rather 8961 * than a mkdir for the .. entry. We need to cancel it now before 8962 * it is found in truncate(). 8963 */ 8964static struct jremref * 8965cancel_diradd_dotdot(ip, dirrem, jremref) 8966 struct inode *ip; 8967 struct dirrem *dirrem; 8968 struct jremref *jremref; 8969{ 8970 struct pagedep *pagedep; 8971 struct diradd *dap; 8972 struct worklist *wk; 8973 8974 if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0, 8975 &pagedep) == 0) 8976 return (jremref); 8977 dap = diradd_lookup(pagedep, DOTDOT_OFFSET); 8978 if (dap == NULL) 8979 return (jremref); 8980 cancel_diradd(dap, dirrem, jremref, NULL, NULL); 8981 /* 8982 * Mark any journal work as belonging to the parent so it is freed 8983 * with the .. reference. 8984 */ 8985 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 8986 wk->wk_state |= MKDIR_PARENT; 8987 return (NULL); 8988} 8989 8990/* 8991 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to 8992 * replace it with a dirrem/diradd pair as a result of re-parenting a 8993 * directory. This ensures that we don't simultaneously have a mkdir and 8994 * a diradd for the same .. entry. 8995 */ 8996static struct jremref * 8997cancel_mkdir_dotdot(ip, dirrem, jremref) 8998 struct inode *ip; 8999 struct dirrem *dirrem; 9000 struct jremref *jremref; 9001{ 9002 struct inodedep *inodedep; 9003 struct jaddref *jaddref; 9004 struct ufsmount *ump; 9005 struct mkdir *mkdir; 9006 struct diradd *dap; 9007 9008 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 9009 &inodedep) == 0) 9010 return (jremref); 9011 dap = inodedep->id_mkdiradd; 9012 if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0) 9013 return (jremref); 9014 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9015 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 9016 mkdir = LIST_NEXT(mkdir, md_mkdirs)) 9017 if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT) 9018 break; 9019 if (mkdir == NULL) 9020 panic("cancel_mkdir_dotdot: Unable to find mkdir\n"); 9021 if ((jaddref = mkdir->md_jaddref) != NULL) { 9022 mkdir->md_jaddref = NULL; 9023 jaddref->ja_state &= ~MKDIR_PARENT; 9024 if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0, 9025 &inodedep) == 0) 9026 panic("cancel_mkdir_dotdot: Lost parent inodedep"); 9027 if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) { 9028 journal_jremref(dirrem, jremref, inodedep); 9029 jremref = NULL; 9030 } 9031 } 9032 if (mkdir->md_state & ONWORKLIST) 9033 WORKLIST_REMOVE(&mkdir->md_list); 9034 mkdir->md_state |= ALLCOMPLETE; 9035 complete_mkdir(mkdir); 9036 return (jremref); 9037} 9038 9039static void 9040journal_jremref(dirrem, jremref, inodedep) 9041 struct dirrem *dirrem; 9042 struct jremref *jremref; 9043 struct inodedep *inodedep; 9044{ 9045 9046 if (inodedep == NULL) 9047 if (inodedep_lookup(jremref->jr_list.wk_mp, 9048 jremref->jr_ref.if_ino, 0, &inodedep) == 0) 9049 panic("journal_jremref: Lost inodedep"); 9050 LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps); 9051 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 9052 add_to_journal(&jremref->jr_list); 9053} 9054 9055static void 9056dirrem_journal(dirrem, jremref, dotremref, dotdotremref) 9057 struct dirrem *dirrem; 9058 struct jremref *jremref; 9059 struct jremref *dotremref; 9060 struct jremref *dotdotremref; 9061{ 9062 struct inodedep *inodedep; 9063 9064 9065 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, 9066 &inodedep) == 0) 9067 panic("dirrem_journal: Lost inodedep"); 9068 journal_jremref(dirrem, jremref, inodedep); 9069 if (dotremref) 9070 journal_jremref(dirrem, dotremref, inodedep); 9071 if (dotdotremref) 9072 journal_jremref(dirrem, dotdotremref, NULL); 9073} 9074 9075/* 9076 * Allocate a new dirrem if appropriate and return it along with 9077 * its associated pagedep. Called without a lock, returns with lock. 9078 */ 9079static struct dirrem * 9080newdirrem(bp, dp, ip, isrmdir, prevdirremp) 9081 struct buf *bp; /* buffer containing directory block */ 9082 struct inode *dp; /* inode for the directory being modified */ 9083 struct inode *ip; /* inode for directory entry being removed */ 9084 int isrmdir; /* indicates if doing RMDIR */ 9085 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 9086{ 9087 int offset; 9088 ufs_lbn_t lbn; 9089 struct diradd *dap; 9090 struct dirrem *dirrem; 9091 struct pagedep *pagedep; 9092 struct jremref *jremref; 9093 struct jremref *dotremref; 9094 struct jremref *dotdotremref; 9095 struct vnode *dvp; 9096 9097 /* 9098 * Whiteouts have no deletion dependencies. 9099 */ 9100 if (ip == NULL) 9101 panic("newdirrem: whiteout"); 9102 dvp = ITOV(dp); 9103 /* 9104 * If the system is over its limit and our filesystem is 9105 * responsible for more than our share of that usage and 9106 * we are not a snapshot, request some inodedep cleanup. 9107 * Limiting the number of dirrem structures will also limit 9108 * the number of freefile and freeblks structures. 9109 */ 9110 ACQUIRE_LOCK(ip->i_ump); 9111 if (!IS_SNAPSHOT(ip) && softdep_excess_dirrem(ip->i_ump)) 9112 schedule_cleanup(ITOV(dp)->v_mount); 9113 else 9114 FREE_LOCK(ip->i_ump); 9115 dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS | 9116 M_ZERO); 9117 workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount); 9118 LIST_INIT(&dirrem->dm_jremrefhd); 9119 LIST_INIT(&dirrem->dm_jwork); 9120 dirrem->dm_state = isrmdir ? RMDIR : 0; 9121 dirrem->dm_oldinum = ip->i_number; 9122 *prevdirremp = NULL; 9123 /* 9124 * Allocate remove reference structures to track journal write 9125 * dependencies. We will always have one for the link and 9126 * when doing directories we will always have one more for dot. 9127 * When renaming a directory we skip the dotdot link change so 9128 * this is not needed. 9129 */ 9130 jremref = dotremref = dotdotremref = NULL; 9131 if (DOINGSUJ(dvp)) { 9132 if (isrmdir) { 9133 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9134 ip->i_effnlink + 2); 9135 dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET, 9136 ip->i_effnlink + 1); 9137 dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET, 9138 dp->i_effnlink + 1); 9139 dotdotremref->jr_state |= MKDIR_PARENT; 9140 } else 9141 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9142 ip->i_effnlink + 1); 9143 } 9144 ACQUIRE_LOCK(ip->i_ump); 9145 lbn = lblkno(dp->i_fs, dp->i_offset); 9146 offset = blkoff(dp->i_fs, dp->i_offset); 9147 pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC, 9148 &pagedep); 9149 dirrem->dm_pagedep = pagedep; 9150 dirrem->dm_offset = offset; 9151 /* 9152 * If we're renaming a .. link to a new directory, cancel any 9153 * existing MKDIR_PARENT mkdir. If it has already been canceled 9154 * the jremref is preserved for any potential diradd in this 9155 * location. This can not coincide with a rmdir. 9156 */ 9157 if (dp->i_offset == DOTDOT_OFFSET) { 9158 if (isrmdir) 9159 panic("newdirrem: .. directory change during remove?"); 9160 jremref = cancel_mkdir_dotdot(dp, dirrem, jremref); 9161 } 9162 /* 9163 * If we're removing a directory search for the .. dependency now and 9164 * cancel it. Any pending journal work will be added to the dirrem 9165 * to be completed when the workitem remove completes. 9166 */ 9167 if (isrmdir) 9168 dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref); 9169 /* 9170 * Check for a diradd dependency for the same directory entry. 9171 * If present, then both dependencies become obsolete and can 9172 * be de-allocated. 9173 */ 9174 dap = diradd_lookup(pagedep, offset); 9175 if (dap == NULL) { 9176 /* 9177 * Link the jremref structures into the dirrem so they are 9178 * written prior to the pagedep. 9179 */ 9180 if (jremref) 9181 dirrem_journal(dirrem, jremref, dotremref, 9182 dotdotremref); 9183 return (dirrem); 9184 } 9185 /* 9186 * Must be ATTACHED at this point. 9187 */ 9188 if ((dap->da_state & ATTACHED) == 0) 9189 panic("newdirrem: not ATTACHED"); 9190 if (dap->da_newinum != ip->i_number) 9191 panic("newdirrem: inum %ju should be %ju", 9192 (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum); 9193 /* 9194 * If we are deleting a changed name that never made it to disk, 9195 * then return the dirrem describing the previous inode (which 9196 * represents the inode currently referenced from this entry on disk). 9197 */ 9198 if ((dap->da_state & DIRCHG) != 0) { 9199 *prevdirremp = dap->da_previous; 9200 dap->da_state &= ~DIRCHG; 9201 dap->da_pagedep = pagedep; 9202 } 9203 /* 9204 * We are deleting an entry that never made it to disk. 9205 * Mark it COMPLETE so we can delete its inode immediately. 9206 */ 9207 dirrem->dm_state |= COMPLETE; 9208 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref); 9209#ifdef SUJ_DEBUG 9210 if (isrmdir == 0) { 9211 struct worklist *wk; 9212 9213 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 9214 if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT)) 9215 panic("bad wk %p (0x%X)\n", wk, wk->wk_state); 9216 } 9217#endif 9218 9219 return (dirrem); 9220} 9221 9222/* 9223 * Directory entry change dependencies. 9224 * 9225 * Changing an existing directory entry requires that an add operation 9226 * be completed first followed by a deletion. The semantics for the addition 9227 * are identical to the description of adding a new entry above except 9228 * that the rollback is to the old inode number rather than zero. Once 9229 * the addition dependency is completed, the removal is done as described 9230 * in the removal routine above. 9231 */ 9232 9233/* 9234 * This routine should be called immediately after changing 9235 * a directory entry. The inode's link count should not be 9236 * decremented by the calling procedure -- the soft updates 9237 * code will perform this task when it is safe. 9238 */ 9239void 9240softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 9241 struct buf *bp; /* buffer containing directory block */ 9242 struct inode *dp; /* inode for the directory being modified */ 9243 struct inode *ip; /* inode for directory entry being removed */ 9244 ino_t newinum; /* new inode number for changed entry */ 9245 int isrmdir; /* indicates if doing RMDIR */ 9246{ 9247 int offset; 9248 struct diradd *dap = NULL; 9249 struct dirrem *dirrem, *prevdirrem; 9250 struct pagedep *pagedep; 9251 struct inodedep *inodedep; 9252 struct jaddref *jaddref; 9253 struct mount *mp; 9254 9255 offset = blkoff(dp->i_fs, dp->i_offset); 9256 mp = UFSTOVFS(dp->i_ump); 9257 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 9258 ("softdep_setup_directory_change called on non-softdep filesystem")); 9259 9260 /* 9261 * Whiteouts do not need diradd dependencies. 9262 */ 9263 if (newinum != WINO) { 9264 dap = malloc(sizeof(struct diradd), 9265 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 9266 workitem_alloc(&dap->da_list, D_DIRADD, mp); 9267 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 9268 dap->da_offset = offset; 9269 dap->da_newinum = newinum; 9270 LIST_INIT(&dap->da_jwork); 9271 } 9272 9273 /* 9274 * Allocate a new dirrem and ACQUIRE_LOCK. 9275 */ 9276 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 9277 pagedep = dirrem->dm_pagedep; 9278 /* 9279 * The possible values for isrmdir: 9280 * 0 - non-directory file rename 9281 * 1 - directory rename within same directory 9282 * inum - directory rename to new directory of given inode number 9283 * When renaming to a new directory, we are both deleting and 9284 * creating a new directory entry, so the link count on the new 9285 * directory should not change. Thus we do not need the followup 9286 * dirrem which is usually done in handle_workitem_remove. We set 9287 * the DIRCHG flag to tell handle_workitem_remove to skip the 9288 * followup dirrem. 9289 */ 9290 if (isrmdir > 1) 9291 dirrem->dm_state |= DIRCHG; 9292 9293 /* 9294 * Whiteouts have no additional dependencies, 9295 * so just put the dirrem on the correct list. 9296 */ 9297 if (newinum == WINO) { 9298 if ((dirrem->dm_state & COMPLETE) == 0) { 9299 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 9300 dm_next); 9301 } else { 9302 dirrem->dm_dirinum = pagedep->pd_ino; 9303 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9304 add_to_worklist(&dirrem->dm_list, 0); 9305 } 9306 FREE_LOCK(dp->i_ump); 9307 return; 9308 } 9309 /* 9310 * Add the dirrem to the inodedep's pending remove list for quick 9311 * discovery later. A valid nlinkdelta ensures that this lookup 9312 * will not fail. 9313 */ 9314 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 9315 panic("softdep_setup_directory_change: Lost inodedep."); 9316 dirrem->dm_state |= ONDEPLIST; 9317 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9318 9319 /* 9320 * If the COMPLETE flag is clear, then there were no active 9321 * entries and we want to roll back to the previous inode until 9322 * the new inode is committed to disk. If the COMPLETE flag is 9323 * set, then we have deleted an entry that never made it to disk. 9324 * If the entry we deleted resulted from a name change, then the old 9325 * inode reference still resides on disk. Any rollback that we do 9326 * needs to be to that old inode (returned to us in prevdirrem). If 9327 * the entry we deleted resulted from a create, then there is 9328 * no entry on the disk, so we want to roll back to zero rather 9329 * than the uncommitted inode. In either of the COMPLETE cases we 9330 * want to immediately free the unwritten and unreferenced inode. 9331 */ 9332 if ((dirrem->dm_state & COMPLETE) == 0) { 9333 dap->da_previous = dirrem; 9334 } else { 9335 if (prevdirrem != NULL) { 9336 dap->da_previous = prevdirrem; 9337 } else { 9338 dap->da_state &= ~DIRCHG; 9339 dap->da_pagedep = pagedep; 9340 } 9341 dirrem->dm_dirinum = pagedep->pd_ino; 9342 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9343 add_to_worklist(&dirrem->dm_list, 0); 9344 } 9345 /* 9346 * Lookup the jaddref for this journal entry. We must finish 9347 * initializing it and make the diradd write dependent on it. 9348 * If we're not journaling, put it on the id_bufwait list if the 9349 * inode is not yet written. If it is written, do the post-inode 9350 * write processing to put it on the id_pendinghd list. 9351 */ 9352 inodedep_lookup(mp, newinum, DEPALLOC, &inodedep); 9353 if (MOUNTEDSUJ(mp)) { 9354 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 9355 inoreflst); 9356 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 9357 ("softdep_setup_directory_change: bad jaddref %p", 9358 jaddref)); 9359 jaddref->ja_diroff = dp->i_offset; 9360 jaddref->ja_diradd = dap; 9361 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9362 dap, da_pdlist); 9363 add_to_journal(&jaddref->ja_list); 9364 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 9365 dap->da_state |= COMPLETE; 9366 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 9367 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 9368 } else { 9369 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9370 dap, da_pdlist); 9371 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 9372 } 9373 /* 9374 * If we're making a new name for a directory that has not been 9375 * committed when need to move the dot and dotdot references to 9376 * this new name. 9377 */ 9378 if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET) 9379 merge_diradd(inodedep, dap); 9380 FREE_LOCK(dp->i_ump); 9381} 9382 9383/* 9384 * Called whenever the link count on an inode is changed. 9385 * It creates an inode dependency so that the new reference(s) 9386 * to the inode cannot be committed to disk until the updated 9387 * inode has been written. 9388 */ 9389void 9390softdep_change_linkcnt(ip) 9391 struct inode *ip; /* the inode with the increased link count */ 9392{ 9393 struct inodedep *inodedep; 9394 9395 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 9396 ("softdep_change_linkcnt called on non-softdep filesystem")); 9397 ACQUIRE_LOCK(ip->i_ump); 9398 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC, 9399 &inodedep); 9400 if (ip->i_nlink < ip->i_effnlink) 9401 panic("softdep_change_linkcnt: bad delta"); 9402 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9403 FREE_LOCK(ip->i_ump); 9404} 9405 9406/* 9407 * Attach a sbdep dependency to the superblock buf so that we can keep 9408 * track of the head of the linked list of referenced but unlinked inodes. 9409 */ 9410void 9411softdep_setup_sbupdate(ump, fs, bp) 9412 struct ufsmount *ump; 9413 struct fs *fs; 9414 struct buf *bp; 9415{ 9416 struct sbdep *sbdep; 9417 struct worklist *wk; 9418 9419 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 9420 ("softdep_setup_sbupdate called on non-softdep filesystem")); 9421 LIST_FOREACH(wk, &bp->b_dep, wk_list) 9422 if (wk->wk_type == D_SBDEP) 9423 break; 9424 if (wk != NULL) 9425 return; 9426 sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS); 9427 workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump)); 9428 sbdep->sb_fs = fs; 9429 sbdep->sb_ump = ump; 9430 ACQUIRE_LOCK(ump); 9431 WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list); 9432 FREE_LOCK(ump); 9433} 9434 9435/* 9436 * Return the first unlinked inodedep which is ready to be the head of the 9437 * list. The inodedep and all those after it must have valid next pointers. 9438 */ 9439static struct inodedep * 9440first_unlinked_inodedep(ump) 9441 struct ufsmount *ump; 9442{ 9443 struct inodedep *inodedep; 9444 struct inodedep *idp; 9445 9446 LOCK_OWNED(ump); 9447 for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst); 9448 inodedep; inodedep = idp) { 9449 if ((inodedep->id_state & UNLINKNEXT) == 0) 9450 return (NULL); 9451 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9452 if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0) 9453 break; 9454 if ((inodedep->id_state & UNLINKPREV) == 0) 9455 break; 9456 } 9457 return (inodedep); 9458} 9459 9460/* 9461 * Set the sujfree unlinked head pointer prior to writing a superblock. 9462 */ 9463static void 9464initiate_write_sbdep(sbdep) 9465 struct sbdep *sbdep; 9466{ 9467 struct inodedep *inodedep; 9468 struct fs *bpfs; 9469 struct fs *fs; 9470 9471 bpfs = sbdep->sb_fs; 9472 fs = sbdep->sb_ump->um_fs; 9473 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9474 if (inodedep) { 9475 fs->fs_sujfree = inodedep->id_ino; 9476 inodedep->id_state |= UNLINKPREV; 9477 } else 9478 fs->fs_sujfree = 0; 9479 bpfs->fs_sujfree = fs->fs_sujfree; 9480} 9481 9482/* 9483 * After a superblock is written determine whether it must be written again 9484 * due to a changing unlinked list head. 9485 */ 9486static int 9487handle_written_sbdep(sbdep, bp) 9488 struct sbdep *sbdep; 9489 struct buf *bp; 9490{ 9491 struct inodedep *inodedep; 9492 struct fs *fs; 9493 9494 LOCK_OWNED(sbdep->sb_ump); 9495 fs = sbdep->sb_fs; 9496 /* 9497 * If the superblock doesn't match the in-memory list start over. 9498 */ 9499 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9500 if ((inodedep && fs->fs_sujfree != inodedep->id_ino) || 9501 (inodedep == NULL && fs->fs_sujfree != 0)) { 9502 bdirty(bp); 9503 return (1); 9504 } 9505 WORKITEM_FREE(sbdep, D_SBDEP); 9506 if (fs->fs_sujfree == 0) 9507 return (0); 9508 /* 9509 * Now that we have a record of this inode in stable store allow it 9510 * to be written to free up pending work. Inodes may see a lot of 9511 * write activity after they are unlinked which we must not hold up. 9512 */ 9513 for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 9514 if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS) 9515 panic("handle_written_sbdep: Bad inodedep %p (0x%X)", 9516 inodedep, inodedep->id_state); 9517 if (inodedep->id_state & UNLINKONLIST) 9518 break; 9519 inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST; 9520 } 9521 9522 return (0); 9523} 9524 9525/* 9526 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list. 9527 */ 9528static void 9529unlinked_inodedep(mp, inodedep) 9530 struct mount *mp; 9531 struct inodedep *inodedep; 9532{ 9533 struct ufsmount *ump; 9534 9535 ump = VFSTOUFS(mp); 9536 LOCK_OWNED(ump); 9537 if (MOUNTEDSUJ(mp) == 0) 9538 return; 9539 ump->um_fs->fs_fmod = 1; 9540 if (inodedep->id_state & UNLINKED) 9541 panic("unlinked_inodedep: %p already unlinked\n", inodedep); 9542 inodedep->id_state |= UNLINKED; 9543 TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked); 9544} 9545 9546/* 9547 * Remove an inodedep from the unlinked inodedep list. This may require 9548 * disk writes if the inode has made it that far. 9549 */ 9550static void 9551clear_unlinked_inodedep(inodedep) 9552 struct inodedep *inodedep; 9553{ 9554 struct ufsmount *ump; 9555 struct inodedep *idp; 9556 struct inodedep *idn; 9557 struct fs *fs; 9558 struct buf *bp; 9559 ino_t ino; 9560 ino_t nino; 9561 ino_t pino; 9562 int error; 9563 9564 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9565 fs = ump->um_fs; 9566 ino = inodedep->id_ino; 9567 error = 0; 9568 for (;;) { 9569 LOCK_OWNED(ump); 9570 KASSERT((inodedep->id_state & UNLINKED) != 0, 9571 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9572 inodedep)); 9573 /* 9574 * If nothing has yet been written simply remove us from 9575 * the in memory list and return. This is the most common 9576 * case where handle_workitem_remove() loses the final 9577 * reference. 9578 */ 9579 if ((inodedep->id_state & UNLINKLINKS) == 0) 9580 break; 9581 /* 9582 * If we have a NEXT pointer and no PREV pointer we can simply 9583 * clear NEXT's PREV and remove ourselves from the list. Be 9584 * careful not to clear PREV if the superblock points at 9585 * next as well. 9586 */ 9587 idn = TAILQ_NEXT(inodedep, id_unlinked); 9588 if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) { 9589 if (idn && fs->fs_sujfree != idn->id_ino) 9590 idn->id_state &= ~UNLINKPREV; 9591 break; 9592 } 9593 /* 9594 * Here we have an inodedep which is actually linked into 9595 * the list. We must remove it by forcing a write to the 9596 * link before us, whether it be the superblock or an inode. 9597 * Unfortunately the list may change while we're waiting 9598 * on the buf lock for either resource so we must loop until 9599 * we lock the right one. If both the superblock and an 9600 * inode point to this inode we must clear the inode first 9601 * followed by the superblock. 9602 */ 9603 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9604 pino = 0; 9605 if (idp && (idp->id_state & UNLINKNEXT)) 9606 pino = idp->id_ino; 9607 FREE_LOCK(ump); 9608 if (pino == 0) { 9609 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9610 (int)fs->fs_sbsize, 0, 0, 0); 9611 } else { 9612 error = bread(ump->um_devvp, 9613 fsbtodb(fs, ino_to_fsba(fs, pino)), 9614 (int)fs->fs_bsize, NOCRED, &bp); 9615 if (error) 9616 brelse(bp); 9617 } 9618 ACQUIRE_LOCK(ump); 9619 if (error) 9620 break; 9621 /* If the list has changed restart the loop. */ 9622 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9623 nino = 0; 9624 if (idp && (idp->id_state & UNLINKNEXT)) 9625 nino = idp->id_ino; 9626 if (nino != pino || 9627 (inodedep->id_state & UNLINKPREV) != UNLINKPREV) { 9628 FREE_LOCK(ump); 9629 brelse(bp); 9630 ACQUIRE_LOCK(ump); 9631 continue; 9632 } 9633 nino = 0; 9634 idn = TAILQ_NEXT(inodedep, id_unlinked); 9635 if (idn) 9636 nino = idn->id_ino; 9637 /* 9638 * Remove us from the in memory list. After this we cannot 9639 * access the inodedep. 9640 */ 9641 KASSERT((inodedep->id_state & UNLINKED) != 0, 9642 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9643 inodedep)); 9644 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9645 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9646 FREE_LOCK(ump); 9647 /* 9648 * The predecessor's next pointer is manually updated here 9649 * so that the NEXT flag is never cleared for an element 9650 * that is in the list. 9651 */ 9652 if (pino == 0) { 9653 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9654 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9655 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9656 bp); 9657 } else if (fs->fs_magic == FS_UFS1_MAGIC) 9658 ((struct ufs1_dinode *)bp->b_data + 9659 ino_to_fsbo(fs, pino))->di_freelink = nino; 9660 else 9661 ((struct ufs2_dinode *)bp->b_data + 9662 ino_to_fsbo(fs, pino))->di_freelink = nino; 9663 /* 9664 * If the bwrite fails we have no recourse to recover. The 9665 * filesystem is corrupted already. 9666 */ 9667 bwrite(bp); 9668 ACQUIRE_LOCK(ump); 9669 /* 9670 * If the superblock pointer still needs to be cleared force 9671 * a write here. 9672 */ 9673 if (fs->fs_sujfree == ino) { 9674 FREE_LOCK(ump); 9675 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9676 (int)fs->fs_sbsize, 0, 0, 0); 9677 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9678 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9679 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9680 bp); 9681 bwrite(bp); 9682 ACQUIRE_LOCK(ump); 9683 } 9684 9685 if (fs->fs_sujfree != ino) 9686 return; 9687 panic("clear_unlinked_inodedep: Failed to clear free head"); 9688 } 9689 if (inodedep->id_ino == fs->fs_sujfree) 9690 panic("clear_unlinked_inodedep: Freeing head of free list"); 9691 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9692 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9693 return; 9694} 9695 9696/* 9697 * This workitem decrements the inode's link count. 9698 * If the link count reaches zero, the file is removed. 9699 */ 9700static int 9701handle_workitem_remove(dirrem, flags) 9702 struct dirrem *dirrem; 9703 int flags; 9704{ 9705 struct inodedep *inodedep; 9706 struct workhead dotdotwk; 9707 struct worklist *wk; 9708 struct ufsmount *ump; 9709 struct mount *mp; 9710 struct vnode *vp; 9711 struct inode *ip; 9712 ino_t oldinum; 9713 9714 if (dirrem->dm_state & ONWORKLIST) 9715 panic("handle_workitem_remove: dirrem %p still on worklist", 9716 dirrem); 9717 oldinum = dirrem->dm_oldinum; 9718 mp = dirrem->dm_list.wk_mp; 9719 ump = VFSTOUFS(mp); 9720 flags |= LK_EXCLUSIVE; 9721 if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0) 9722 return (EBUSY); 9723 ip = VTOI(vp); 9724 ACQUIRE_LOCK(ump); 9725 if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0) 9726 panic("handle_workitem_remove: lost inodedep"); 9727 if (dirrem->dm_state & ONDEPLIST) 9728 LIST_REMOVE(dirrem, dm_inonext); 9729 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 9730 ("handle_workitem_remove: Journal entries not written.")); 9731 9732 /* 9733 * Move all dependencies waiting on the remove to complete 9734 * from the dirrem to the inode inowait list to be completed 9735 * after the inode has been updated and written to disk. Any 9736 * marked MKDIR_PARENT are saved to be completed when the .. ref 9737 * is removed. 9738 */ 9739 LIST_INIT(&dotdotwk); 9740 while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) { 9741 WORKLIST_REMOVE(wk); 9742 if (wk->wk_state & MKDIR_PARENT) { 9743 wk->wk_state &= ~MKDIR_PARENT; 9744 WORKLIST_INSERT(&dotdotwk, wk); 9745 continue; 9746 } 9747 WORKLIST_INSERT(&inodedep->id_inowait, wk); 9748 } 9749 LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list); 9750 /* 9751 * Normal file deletion. 9752 */ 9753 if ((dirrem->dm_state & RMDIR) == 0) { 9754 ip->i_nlink--; 9755 DIP_SET(ip, i_nlink, ip->i_nlink); 9756 ip->i_flag |= IN_CHANGE; 9757 if (ip->i_nlink < ip->i_effnlink) 9758 panic("handle_workitem_remove: bad file delta"); 9759 if (ip->i_nlink == 0) 9760 unlinked_inodedep(mp, inodedep); 9761 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9762 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9763 ("handle_workitem_remove: worklist not empty. %s", 9764 TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type))); 9765 WORKITEM_FREE(dirrem, D_DIRREM); 9766 FREE_LOCK(ump); 9767 goto out; 9768 } 9769 /* 9770 * Directory deletion. Decrement reference count for both the 9771 * just deleted parent directory entry and the reference for ".". 9772 * Arrange to have the reference count on the parent decremented 9773 * to account for the loss of "..". 9774 */ 9775 ip->i_nlink -= 2; 9776 DIP_SET(ip, i_nlink, ip->i_nlink); 9777 ip->i_flag |= IN_CHANGE; 9778 if (ip->i_nlink < ip->i_effnlink) 9779 panic("handle_workitem_remove: bad dir delta"); 9780 if (ip->i_nlink == 0) 9781 unlinked_inodedep(mp, inodedep); 9782 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9783 /* 9784 * Rename a directory to a new parent. Since, we are both deleting 9785 * and creating a new directory entry, the link count on the new 9786 * directory should not change. Thus we skip the followup dirrem. 9787 */ 9788 if (dirrem->dm_state & DIRCHG) { 9789 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9790 ("handle_workitem_remove: DIRCHG and worklist not empty.")); 9791 WORKITEM_FREE(dirrem, D_DIRREM); 9792 FREE_LOCK(ump); 9793 goto out; 9794 } 9795 dirrem->dm_state = ONDEPLIST; 9796 dirrem->dm_oldinum = dirrem->dm_dirinum; 9797 /* 9798 * Place the dirrem on the parent's diremhd list. 9799 */ 9800 if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0) 9801 panic("handle_workitem_remove: lost dir inodedep"); 9802 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9803 /* 9804 * If the allocated inode has never been written to disk, then 9805 * the on-disk inode is zero'ed and we can remove the file 9806 * immediately. When journaling if the inode has been marked 9807 * unlinked and not DEPCOMPLETE we know it can never be written. 9808 */ 9809 inodedep_lookup(mp, oldinum, 0, &inodedep); 9810 if (inodedep == NULL || 9811 (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED || 9812 check_inode_unwritten(inodedep)) { 9813 FREE_LOCK(ump); 9814 vput(vp); 9815 return handle_workitem_remove(dirrem, flags); 9816 } 9817 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 9818 FREE_LOCK(ump); 9819 ip->i_flag |= IN_CHANGE; 9820out: 9821 ffs_update(vp, 0); 9822 vput(vp); 9823 return (0); 9824} 9825 9826/* 9827 * Inode de-allocation dependencies. 9828 * 9829 * When an inode's link count is reduced to zero, it can be de-allocated. We 9830 * found it convenient to postpone de-allocation until after the inode is 9831 * written to disk with its new link count (zero). At this point, all of the 9832 * on-disk inode's block pointers are nullified and, with careful dependency 9833 * list ordering, all dependencies related to the inode will be satisfied and 9834 * the corresponding dependency structures de-allocated. So, if/when the 9835 * inode is reused, there will be no mixing of old dependencies with new 9836 * ones. This artificial dependency is set up by the block de-allocation 9837 * procedure above (softdep_setup_freeblocks) and completed by the 9838 * following procedure. 9839 */ 9840static void 9841handle_workitem_freefile(freefile) 9842 struct freefile *freefile; 9843{ 9844 struct workhead wkhd; 9845 struct fs *fs; 9846 struct inodedep *idp; 9847 struct ufsmount *ump; 9848 int error; 9849 9850 ump = VFSTOUFS(freefile->fx_list.wk_mp); 9851 fs = ump->um_fs; 9852#ifdef DEBUG 9853 ACQUIRE_LOCK(ump); 9854 error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp); 9855 FREE_LOCK(ump); 9856 if (error) 9857 panic("handle_workitem_freefile: inodedep %p survived", idp); 9858#endif 9859 UFS_LOCK(ump); 9860 fs->fs_pendinginodes -= 1; 9861 UFS_UNLOCK(ump); 9862 LIST_INIT(&wkhd); 9863 LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list); 9864 if ((error = ffs_freefile(ump, fs, freefile->fx_devvp, 9865 freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0) 9866 softdep_error("handle_workitem_freefile", error); 9867 ACQUIRE_LOCK(ump); 9868 WORKITEM_FREE(freefile, D_FREEFILE); 9869 FREE_LOCK(ump); 9870} 9871 9872 9873/* 9874 * Helper function which unlinks marker element from work list and returns 9875 * the next element on the list. 9876 */ 9877static __inline struct worklist * 9878markernext(struct worklist *marker) 9879{ 9880 struct worklist *next; 9881 9882 next = LIST_NEXT(marker, wk_list); 9883 LIST_REMOVE(marker, wk_list); 9884 return next; 9885} 9886 9887/* 9888 * Disk writes. 9889 * 9890 * The dependency structures constructed above are most actively used when file 9891 * system blocks are written to disk. No constraints are placed on when a 9892 * block can be written, but unsatisfied update dependencies are made safe by 9893 * modifying (or replacing) the source memory for the duration of the disk 9894 * write. When the disk write completes, the memory block is again brought 9895 * up-to-date. 9896 * 9897 * In-core inode structure reclamation. 9898 * 9899 * Because there are a finite number of "in-core" inode structures, they are 9900 * reused regularly. By transferring all inode-related dependencies to the 9901 * in-memory inode block and indexing them separately (via "inodedep"s), we 9902 * can allow "in-core" inode structures to be reused at any time and avoid 9903 * any increase in contention. 9904 * 9905 * Called just before entering the device driver to initiate a new disk I/O. 9906 * The buffer must be locked, thus, no I/O completion operations can occur 9907 * while we are manipulating its associated dependencies. 9908 */ 9909static void 9910softdep_disk_io_initiation(bp) 9911 struct buf *bp; /* structure describing disk write to occur */ 9912{ 9913 struct worklist *wk; 9914 struct worklist marker; 9915 struct inodedep *inodedep; 9916 struct freeblks *freeblks; 9917 struct jblkdep *jblkdep; 9918 struct newblk *newblk; 9919 struct ufsmount *ump; 9920 9921 /* 9922 * We only care about write operations. There should never 9923 * be dependencies for reads. 9924 */ 9925 if (bp->b_iocmd != BIO_WRITE) 9926 panic("softdep_disk_io_initiation: not write"); 9927 9928 if (bp->b_vflags & BV_BKGRDINPROG) 9929 panic("softdep_disk_io_initiation: Writing buffer with " 9930 "background write in progress: %p", bp); 9931 9932 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 9933 return; 9934 ump = VFSTOUFS(wk->wk_mp); 9935 9936 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 9937 PHOLD(curproc); /* Don't swap out kernel stack */ 9938 ACQUIRE_LOCK(ump); 9939 /* 9940 * Do any necessary pre-I/O processing. 9941 */ 9942 for (wk = LIST_FIRST(&bp->b_dep); wk != NULL; 9943 wk = markernext(&marker)) { 9944 LIST_INSERT_AFTER(wk, &marker, wk_list); 9945 switch (wk->wk_type) { 9946 9947 case D_PAGEDEP: 9948 initiate_write_filepage(WK_PAGEDEP(wk), bp); 9949 continue; 9950 9951 case D_INODEDEP: 9952 inodedep = WK_INODEDEP(wk); 9953 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 9954 initiate_write_inodeblock_ufs1(inodedep, bp); 9955 else 9956 initiate_write_inodeblock_ufs2(inodedep, bp); 9957 continue; 9958 9959 case D_INDIRDEP: 9960 initiate_write_indirdep(WK_INDIRDEP(wk), bp); 9961 continue; 9962 9963 case D_BMSAFEMAP: 9964 initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp); 9965 continue; 9966 9967 case D_JSEG: 9968 WK_JSEG(wk)->js_buf = NULL; 9969 continue; 9970 9971 case D_FREEBLKS: 9972 freeblks = WK_FREEBLKS(wk); 9973 jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd); 9974 /* 9975 * We have to wait for the freeblks to be journaled 9976 * before we can write an inodeblock with updated 9977 * pointers. Be careful to arrange the marker so 9978 * we revisit the freeblks if it's not removed by 9979 * the first jwait(). 9980 */ 9981 if (jblkdep != NULL) { 9982 LIST_REMOVE(&marker, wk_list); 9983 LIST_INSERT_BEFORE(wk, &marker, wk_list); 9984 jwait(&jblkdep->jb_list, MNT_WAIT); 9985 } 9986 continue; 9987 case D_ALLOCDIRECT: 9988 case D_ALLOCINDIR: 9989 /* 9990 * We have to wait for the jnewblk to be journaled 9991 * before we can write to a block if the contents 9992 * may be confused with an earlier file's indirect 9993 * at recovery time. Handle the marker as described 9994 * above. 9995 */ 9996 newblk = WK_NEWBLK(wk); 9997 if (newblk->nb_jnewblk != NULL && 9998 indirblk_lookup(newblk->nb_list.wk_mp, 9999 newblk->nb_newblkno)) { 10000 LIST_REMOVE(&marker, wk_list); 10001 LIST_INSERT_BEFORE(wk, &marker, wk_list); 10002 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 10003 } 10004 continue; 10005 10006 case D_SBDEP: 10007 initiate_write_sbdep(WK_SBDEP(wk)); 10008 continue; 10009 10010 case D_MKDIR: 10011 case D_FREEWORK: 10012 case D_FREEDEP: 10013 case D_JSEGDEP: 10014 continue; 10015 10016 default: 10017 panic("handle_disk_io_initiation: Unexpected type %s", 10018 TYPENAME(wk->wk_type)); 10019 /* NOTREACHED */ 10020 } 10021 } 10022 FREE_LOCK(ump); 10023 PRELE(curproc); /* Allow swapout of kernel stack */ 10024} 10025 10026/* 10027 * Called from within the procedure above to deal with unsatisfied 10028 * allocation dependencies in a directory. The buffer must be locked, 10029 * thus, no I/O completion operations can occur while we are 10030 * manipulating its associated dependencies. 10031 */ 10032static void 10033initiate_write_filepage(pagedep, bp) 10034 struct pagedep *pagedep; 10035 struct buf *bp; 10036{ 10037 struct jremref *jremref; 10038 struct jmvref *jmvref; 10039 struct dirrem *dirrem; 10040 struct diradd *dap; 10041 struct direct *ep; 10042 int i; 10043 10044 if (pagedep->pd_state & IOSTARTED) { 10045 /* 10046 * This can only happen if there is a driver that does not 10047 * understand chaining. Here biodone will reissue the call 10048 * to strategy for the incomplete buffers. 10049 */ 10050 printf("initiate_write_filepage: already started\n"); 10051 return; 10052 } 10053 pagedep->pd_state |= IOSTARTED; 10054 /* 10055 * Wait for all journal remove dependencies to hit the disk. 10056 * We can not allow any potentially conflicting directory adds 10057 * to be visible before removes and rollback is too difficult. 10058 * The per-filesystem lock may be dropped and re-acquired, however 10059 * we hold the buf locked so the dependency can not go away. 10060 */ 10061 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) 10062 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) 10063 jwait(&jremref->jr_list, MNT_WAIT); 10064 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) 10065 jwait(&jmvref->jm_list, MNT_WAIT); 10066 for (i = 0; i < DAHASHSZ; i++) { 10067 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 10068 ep = (struct direct *) 10069 ((char *)bp->b_data + dap->da_offset); 10070 if (ep->d_ino != dap->da_newinum) 10071 panic("%s: dir inum %ju != new %ju", 10072 "initiate_write_filepage", 10073 (uintmax_t)ep->d_ino, 10074 (uintmax_t)dap->da_newinum); 10075 if (dap->da_state & DIRCHG) 10076 ep->d_ino = dap->da_previous->dm_oldinum; 10077 else 10078 ep->d_ino = 0; 10079 dap->da_state &= ~ATTACHED; 10080 dap->da_state |= UNDONE; 10081 } 10082 } 10083} 10084 10085/* 10086 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 10087 * Note that any bug fixes made to this routine must be done in the 10088 * version found below. 10089 * 10090 * Called from within the procedure above to deal with unsatisfied 10091 * allocation dependencies in an inodeblock. The buffer must be 10092 * locked, thus, no I/O completion operations can occur while we 10093 * are manipulating its associated dependencies. 10094 */ 10095static void 10096initiate_write_inodeblock_ufs1(inodedep, bp) 10097 struct inodedep *inodedep; 10098 struct buf *bp; /* The inode block */ 10099{ 10100 struct allocdirect *adp, *lastadp; 10101 struct ufs1_dinode *dp; 10102 struct ufs1_dinode *sip; 10103 struct inoref *inoref; 10104 struct ufsmount *ump; 10105 struct fs *fs; 10106 ufs_lbn_t i; 10107#ifdef INVARIANTS 10108 ufs_lbn_t prevlbn = 0; 10109#endif 10110 int deplist; 10111 10112 if (inodedep->id_state & IOSTARTED) 10113 panic("initiate_write_inodeblock_ufs1: already started"); 10114 inodedep->id_state |= IOSTARTED; 10115 fs = inodedep->id_fs; 10116 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10117 LOCK_OWNED(ump); 10118 dp = (struct ufs1_dinode *)bp->b_data + 10119 ino_to_fsbo(fs, inodedep->id_ino); 10120 10121 /* 10122 * If we're on the unlinked list but have not yet written our 10123 * next pointer initialize it here. 10124 */ 10125 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10126 struct inodedep *inon; 10127 10128 inon = TAILQ_NEXT(inodedep, id_unlinked); 10129 dp->di_freelink = inon ? inon->id_ino : 0; 10130 } 10131 /* 10132 * If the bitmap is not yet written, then the allocated 10133 * inode cannot be written to disk. 10134 */ 10135 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10136 if (inodedep->id_savedino1 != NULL) 10137 panic("initiate_write_inodeblock_ufs1: I/O underway"); 10138 FREE_LOCK(ump); 10139 sip = malloc(sizeof(struct ufs1_dinode), 10140 M_SAVEDINO, M_SOFTDEP_FLAGS); 10141 ACQUIRE_LOCK(ump); 10142 inodedep->id_savedino1 = sip; 10143 *inodedep->id_savedino1 = *dp; 10144 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 10145 dp->di_gen = inodedep->id_savedino1->di_gen; 10146 dp->di_freelink = inodedep->id_savedino1->di_freelink; 10147 return; 10148 } 10149 /* 10150 * If no dependencies, then there is nothing to roll back. 10151 */ 10152 inodedep->id_savedsize = dp->di_size; 10153 inodedep->id_savedextsize = 0; 10154 inodedep->id_savednlink = dp->di_nlink; 10155 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10156 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10157 return; 10158 /* 10159 * Revert the link count to that of the first unwritten journal entry. 10160 */ 10161 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10162 if (inoref) 10163 dp->di_nlink = inoref->if_nlink; 10164 /* 10165 * Set the dependencies to busy. 10166 */ 10167 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10168 adp = TAILQ_NEXT(adp, ad_next)) { 10169#ifdef INVARIANTS 10170 if (deplist != 0 && prevlbn >= adp->ad_offset) 10171 panic("softdep_write_inodeblock: lbn order"); 10172 prevlbn = adp->ad_offset; 10173 if (adp->ad_offset < NDADDR && 10174 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10175 panic("%s: direct pointer #%jd mismatch %d != %jd", 10176 "softdep_write_inodeblock", 10177 (intmax_t)adp->ad_offset, 10178 dp->di_db[adp->ad_offset], 10179 (intmax_t)adp->ad_newblkno); 10180 if (adp->ad_offset >= NDADDR && 10181 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 10182 panic("%s: indirect pointer #%jd mismatch %d != %jd", 10183 "softdep_write_inodeblock", 10184 (intmax_t)adp->ad_offset - NDADDR, 10185 dp->di_ib[adp->ad_offset - NDADDR], 10186 (intmax_t)adp->ad_newblkno); 10187 deplist |= 1 << adp->ad_offset; 10188 if ((adp->ad_state & ATTACHED) == 0) 10189 panic("softdep_write_inodeblock: Unknown state 0x%x", 10190 adp->ad_state); 10191#endif /* INVARIANTS */ 10192 adp->ad_state &= ~ATTACHED; 10193 adp->ad_state |= UNDONE; 10194 } 10195 /* 10196 * The on-disk inode cannot claim to be any larger than the last 10197 * fragment that has been written. Otherwise, the on-disk inode 10198 * might have fragments that were not the last block in the file 10199 * which would corrupt the filesystem. 10200 */ 10201 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10202 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10203 if (adp->ad_offset >= NDADDR) 10204 break; 10205 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10206 /* keep going until hitting a rollback to a frag */ 10207 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10208 continue; 10209 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10210 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 10211#ifdef INVARIANTS 10212 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10213 panic("softdep_write_inodeblock: lost dep1"); 10214#endif /* INVARIANTS */ 10215 dp->di_db[i] = 0; 10216 } 10217 for (i = 0; i < NIADDR; i++) { 10218#ifdef INVARIANTS 10219 if (dp->di_ib[i] != 0 && 10220 (deplist & ((1 << NDADDR) << i)) == 0) 10221 panic("softdep_write_inodeblock: lost dep2"); 10222#endif /* INVARIANTS */ 10223 dp->di_ib[i] = 0; 10224 } 10225 return; 10226 } 10227 /* 10228 * If we have zero'ed out the last allocated block of the file, 10229 * roll back the size to the last currently allocated block. 10230 * We know that this last allocated block is a full-sized as 10231 * we already checked for fragments in the loop above. 10232 */ 10233 if (lastadp != NULL && 10234 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10235 for (i = lastadp->ad_offset; i >= 0; i--) 10236 if (dp->di_db[i] != 0) 10237 break; 10238 dp->di_size = (i + 1) * fs->fs_bsize; 10239 } 10240 /* 10241 * The only dependencies are for indirect blocks. 10242 * 10243 * The file size for indirect block additions is not guaranteed. 10244 * Such a guarantee would be non-trivial to achieve. The conventional 10245 * synchronous write implementation also does not make this guarantee. 10246 * Fsck should catch and fix discrepancies. Arguably, the file size 10247 * can be over-estimated without destroying integrity when the file 10248 * moves into the indirect blocks (i.e., is large). If we want to 10249 * postpone fsck, we are stuck with this argument. 10250 */ 10251 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10252 dp->di_ib[adp->ad_offset - NDADDR] = 0; 10253} 10254 10255/* 10256 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 10257 * Note that any bug fixes made to this routine must be done in the 10258 * version found above. 10259 * 10260 * Called from within the procedure above to deal with unsatisfied 10261 * allocation dependencies in an inodeblock. The buffer must be 10262 * locked, thus, no I/O completion operations can occur while we 10263 * are manipulating its associated dependencies. 10264 */ 10265static void 10266initiate_write_inodeblock_ufs2(inodedep, bp) 10267 struct inodedep *inodedep; 10268 struct buf *bp; /* The inode block */ 10269{ 10270 struct allocdirect *adp, *lastadp; 10271 struct ufs2_dinode *dp; 10272 struct ufs2_dinode *sip; 10273 struct inoref *inoref; 10274 struct ufsmount *ump; 10275 struct fs *fs; 10276 ufs_lbn_t i; 10277#ifdef INVARIANTS 10278 ufs_lbn_t prevlbn = 0; 10279#endif 10280 int deplist; 10281 10282 if (inodedep->id_state & IOSTARTED) 10283 panic("initiate_write_inodeblock_ufs2: already started"); 10284 inodedep->id_state |= IOSTARTED; 10285 fs = inodedep->id_fs; 10286 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10287 LOCK_OWNED(ump); 10288 dp = (struct ufs2_dinode *)bp->b_data + 10289 ino_to_fsbo(fs, inodedep->id_ino); 10290 10291 /* 10292 * If we're on the unlinked list but have not yet written our 10293 * next pointer initialize it here. 10294 */ 10295 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10296 struct inodedep *inon; 10297 10298 inon = TAILQ_NEXT(inodedep, id_unlinked); 10299 dp->di_freelink = inon ? inon->id_ino : 0; 10300 } 10301 /* 10302 * If the bitmap is not yet written, then the allocated 10303 * inode cannot be written to disk. 10304 */ 10305 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10306 if (inodedep->id_savedino2 != NULL) 10307 panic("initiate_write_inodeblock_ufs2: I/O underway"); 10308 FREE_LOCK(ump); 10309 sip = malloc(sizeof(struct ufs2_dinode), 10310 M_SAVEDINO, M_SOFTDEP_FLAGS); 10311 ACQUIRE_LOCK(ump); 10312 inodedep->id_savedino2 = sip; 10313 *inodedep->id_savedino2 = *dp; 10314 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 10315 dp->di_gen = inodedep->id_savedino2->di_gen; 10316 dp->di_freelink = inodedep->id_savedino2->di_freelink; 10317 return; 10318 } 10319 /* 10320 * If no dependencies, then there is nothing to roll back. 10321 */ 10322 inodedep->id_savedsize = dp->di_size; 10323 inodedep->id_savedextsize = dp->di_extsize; 10324 inodedep->id_savednlink = dp->di_nlink; 10325 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10326 TAILQ_EMPTY(&inodedep->id_extupdt) && 10327 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10328 return; 10329 /* 10330 * Revert the link count to that of the first unwritten journal entry. 10331 */ 10332 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10333 if (inoref) 10334 dp->di_nlink = inoref->if_nlink; 10335 10336 /* 10337 * Set the ext data dependencies to busy. 10338 */ 10339 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10340 adp = TAILQ_NEXT(adp, ad_next)) { 10341#ifdef INVARIANTS 10342 if (deplist != 0 && prevlbn >= adp->ad_offset) 10343 panic("softdep_write_inodeblock: lbn order"); 10344 prevlbn = adp->ad_offset; 10345 if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno) 10346 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10347 "softdep_write_inodeblock", 10348 (intmax_t)adp->ad_offset, 10349 (intmax_t)dp->di_extb[adp->ad_offset], 10350 (intmax_t)adp->ad_newblkno); 10351 deplist |= 1 << adp->ad_offset; 10352 if ((adp->ad_state & ATTACHED) == 0) 10353 panic("softdep_write_inodeblock: Unknown state 0x%x", 10354 adp->ad_state); 10355#endif /* INVARIANTS */ 10356 adp->ad_state &= ~ATTACHED; 10357 adp->ad_state |= UNDONE; 10358 } 10359 /* 10360 * The on-disk inode cannot claim to be any larger than the last 10361 * fragment that has been written. Otherwise, the on-disk inode 10362 * might have fragments that were not the last block in the ext 10363 * data which would corrupt the filesystem. 10364 */ 10365 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10366 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10367 dp->di_extb[adp->ad_offset] = adp->ad_oldblkno; 10368 /* keep going until hitting a rollback to a frag */ 10369 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10370 continue; 10371 dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10372 for (i = adp->ad_offset + 1; i < NXADDR; i++) { 10373#ifdef INVARIANTS 10374 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) 10375 panic("softdep_write_inodeblock: lost dep1"); 10376#endif /* INVARIANTS */ 10377 dp->di_extb[i] = 0; 10378 } 10379 lastadp = NULL; 10380 break; 10381 } 10382 /* 10383 * If we have zero'ed out the last allocated block of the ext 10384 * data, roll back the size to the last currently allocated block. 10385 * We know that this last allocated block is a full-sized as 10386 * we already checked for fragments in the loop above. 10387 */ 10388 if (lastadp != NULL && 10389 dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10390 for (i = lastadp->ad_offset; i >= 0; i--) 10391 if (dp->di_extb[i] != 0) 10392 break; 10393 dp->di_extsize = (i + 1) * fs->fs_bsize; 10394 } 10395 /* 10396 * Set the file data dependencies to busy. 10397 */ 10398 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10399 adp = TAILQ_NEXT(adp, ad_next)) { 10400#ifdef INVARIANTS 10401 if (deplist != 0 && prevlbn >= adp->ad_offset) 10402 panic("softdep_write_inodeblock: lbn order"); 10403 if ((adp->ad_state & ATTACHED) == 0) 10404 panic("inodedep %p and adp %p not attached", inodedep, adp); 10405 prevlbn = adp->ad_offset; 10406 if (adp->ad_offset < NDADDR && 10407 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10408 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10409 "softdep_write_inodeblock", 10410 (intmax_t)adp->ad_offset, 10411 (intmax_t)dp->di_db[adp->ad_offset], 10412 (intmax_t)adp->ad_newblkno); 10413 if (adp->ad_offset >= NDADDR && 10414 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 10415 panic("%s indirect pointer #%jd mismatch %jd != %jd", 10416 "softdep_write_inodeblock:", 10417 (intmax_t)adp->ad_offset - NDADDR, 10418 (intmax_t)dp->di_ib[adp->ad_offset - NDADDR], 10419 (intmax_t)adp->ad_newblkno); 10420 deplist |= 1 << adp->ad_offset; 10421 if ((adp->ad_state & ATTACHED) == 0) 10422 panic("softdep_write_inodeblock: Unknown state 0x%x", 10423 adp->ad_state); 10424#endif /* INVARIANTS */ 10425 adp->ad_state &= ~ATTACHED; 10426 adp->ad_state |= UNDONE; 10427 } 10428 /* 10429 * The on-disk inode cannot claim to be any larger than the last 10430 * fragment that has been written. Otherwise, the on-disk inode 10431 * might have fragments that were not the last block in the file 10432 * which would corrupt the filesystem. 10433 */ 10434 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10435 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10436 if (adp->ad_offset >= NDADDR) 10437 break; 10438 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10439 /* keep going until hitting a rollback to a frag */ 10440 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10441 continue; 10442 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10443 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 10444#ifdef INVARIANTS 10445 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10446 panic("softdep_write_inodeblock: lost dep2"); 10447#endif /* INVARIANTS */ 10448 dp->di_db[i] = 0; 10449 } 10450 for (i = 0; i < NIADDR; i++) { 10451#ifdef INVARIANTS 10452 if (dp->di_ib[i] != 0 && 10453 (deplist & ((1 << NDADDR) << i)) == 0) 10454 panic("softdep_write_inodeblock: lost dep3"); 10455#endif /* INVARIANTS */ 10456 dp->di_ib[i] = 0; 10457 } 10458 return; 10459 } 10460 /* 10461 * If we have zero'ed out the last allocated block of the file, 10462 * roll back the size to the last currently allocated block. 10463 * We know that this last allocated block is a full-sized as 10464 * we already checked for fragments in the loop above. 10465 */ 10466 if (lastadp != NULL && 10467 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10468 for (i = lastadp->ad_offset; i >= 0; i--) 10469 if (dp->di_db[i] != 0) 10470 break; 10471 dp->di_size = (i + 1) * fs->fs_bsize; 10472 } 10473 /* 10474 * The only dependencies are for indirect blocks. 10475 * 10476 * The file size for indirect block additions is not guaranteed. 10477 * Such a guarantee would be non-trivial to achieve. The conventional 10478 * synchronous write implementation also does not make this guarantee. 10479 * Fsck should catch and fix discrepancies. Arguably, the file size 10480 * can be over-estimated without destroying integrity when the file 10481 * moves into the indirect blocks (i.e., is large). If we want to 10482 * postpone fsck, we are stuck with this argument. 10483 */ 10484 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10485 dp->di_ib[adp->ad_offset - NDADDR] = 0; 10486} 10487 10488/* 10489 * Cancel an indirdep as a result of truncation. Release all of the 10490 * children allocindirs and place their journal work on the appropriate 10491 * list. 10492 */ 10493static void 10494cancel_indirdep(indirdep, bp, freeblks) 10495 struct indirdep *indirdep; 10496 struct buf *bp; 10497 struct freeblks *freeblks; 10498{ 10499 struct allocindir *aip; 10500 10501 /* 10502 * None of the indirect pointers will ever be visible, 10503 * so they can simply be tossed. GOINGAWAY ensures 10504 * that allocated pointers will be saved in the buffer 10505 * cache until they are freed. Note that they will 10506 * only be able to be found by their physical address 10507 * since the inode mapping the logical address will 10508 * be gone. The save buffer used for the safe copy 10509 * was allocated in setup_allocindir_phase2 using 10510 * the physical address so it could be used for this 10511 * purpose. Hence we swap the safe copy with the real 10512 * copy, allowing the safe copy to be freed and holding 10513 * on to the real copy for later use in indir_trunc. 10514 */ 10515 if (indirdep->ir_state & GOINGAWAY) 10516 panic("cancel_indirdep: already gone"); 10517 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 10518 indirdep->ir_state |= DEPCOMPLETE; 10519 LIST_REMOVE(indirdep, ir_next); 10520 } 10521 indirdep->ir_state |= GOINGAWAY; 10522 /* 10523 * Pass in bp for blocks still have journal writes 10524 * pending so we can cancel them on their own. 10525 */ 10526 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 10527 cancel_allocindir(aip, bp, freeblks, 0); 10528 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) 10529 cancel_allocindir(aip, NULL, freeblks, 0); 10530 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) 10531 cancel_allocindir(aip, NULL, freeblks, 0); 10532 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0) 10533 cancel_allocindir(aip, NULL, freeblks, 0); 10534 /* 10535 * If there are pending partial truncations we need to keep the 10536 * old block copy around until they complete. This is because 10537 * the current b_data is not a perfect superset of the available 10538 * blocks. 10539 */ 10540 if (TAILQ_EMPTY(&indirdep->ir_trunc)) 10541 bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount); 10542 else 10543 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10544 WORKLIST_REMOVE(&indirdep->ir_list); 10545 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list); 10546 indirdep->ir_bp = NULL; 10547 indirdep->ir_freeblks = freeblks; 10548} 10549 10550/* 10551 * Free an indirdep once it no longer has new pointers to track. 10552 */ 10553static void 10554free_indirdep(indirdep) 10555 struct indirdep *indirdep; 10556{ 10557 10558 KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc), 10559 ("free_indirdep: Indir trunc list not empty.")); 10560 KASSERT(LIST_EMPTY(&indirdep->ir_completehd), 10561 ("free_indirdep: Complete head not empty.")); 10562 KASSERT(LIST_EMPTY(&indirdep->ir_writehd), 10563 ("free_indirdep: write head not empty.")); 10564 KASSERT(LIST_EMPTY(&indirdep->ir_donehd), 10565 ("free_indirdep: done head not empty.")); 10566 KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd), 10567 ("free_indirdep: deplist head not empty.")); 10568 KASSERT((indirdep->ir_state & DEPCOMPLETE), 10569 ("free_indirdep: %p still on newblk list.", indirdep)); 10570 KASSERT(indirdep->ir_saveddata == NULL, 10571 ("free_indirdep: %p still has saved data.", indirdep)); 10572 if (indirdep->ir_state & ONWORKLIST) 10573 WORKLIST_REMOVE(&indirdep->ir_list); 10574 WORKITEM_FREE(indirdep, D_INDIRDEP); 10575} 10576 10577/* 10578 * Called before a write to an indirdep. This routine is responsible for 10579 * rolling back pointers to a safe state which includes only those 10580 * allocindirs which have been completed. 10581 */ 10582static void 10583initiate_write_indirdep(indirdep, bp) 10584 struct indirdep *indirdep; 10585 struct buf *bp; 10586{ 10587 struct ufsmount *ump; 10588 10589 indirdep->ir_state |= IOSTARTED; 10590 if (indirdep->ir_state & GOINGAWAY) 10591 panic("disk_io_initiation: indirdep gone"); 10592 /* 10593 * If there are no remaining dependencies, this will be writing 10594 * the real pointers. 10595 */ 10596 if (LIST_EMPTY(&indirdep->ir_deplisthd) && 10597 TAILQ_EMPTY(&indirdep->ir_trunc)) 10598 return; 10599 /* 10600 * Replace up-to-date version with safe version. 10601 */ 10602 if (indirdep->ir_saveddata == NULL) { 10603 ump = VFSTOUFS(indirdep->ir_list.wk_mp); 10604 LOCK_OWNED(ump); 10605 FREE_LOCK(ump); 10606 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 10607 M_SOFTDEP_FLAGS); 10608 ACQUIRE_LOCK(ump); 10609 } 10610 indirdep->ir_state &= ~ATTACHED; 10611 indirdep->ir_state |= UNDONE; 10612 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10613 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 10614 bp->b_bcount); 10615} 10616 10617/* 10618 * Called when an inode has been cleared in a cg bitmap. This finally 10619 * eliminates any canceled jaddrefs 10620 */ 10621void 10622softdep_setup_inofree(mp, bp, ino, wkhd) 10623 struct mount *mp; 10624 struct buf *bp; 10625 ino_t ino; 10626 struct workhead *wkhd; 10627{ 10628 struct worklist *wk, *wkn; 10629 struct inodedep *inodedep; 10630 struct ufsmount *ump; 10631 uint8_t *inosused; 10632 struct cg *cgp; 10633 struct fs *fs; 10634 10635 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 10636 ("softdep_setup_inofree called on non-softdep filesystem")); 10637 ump = VFSTOUFS(mp); 10638 ACQUIRE_LOCK(ump); 10639 fs = ump->um_fs; 10640 cgp = (struct cg *)bp->b_data; 10641 inosused = cg_inosused(cgp); 10642 if (isset(inosused, ino % fs->fs_ipg)) 10643 panic("softdep_setup_inofree: inode %ju not freed.", 10644 (uintmax_t)ino); 10645 if (inodedep_lookup(mp, ino, 0, &inodedep)) 10646 panic("softdep_setup_inofree: ino %ju has existing inodedep %p", 10647 (uintmax_t)ino, inodedep); 10648 if (wkhd) { 10649 LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) { 10650 if (wk->wk_type != D_JADDREF) 10651 continue; 10652 WORKLIST_REMOVE(wk); 10653 /* 10654 * We can free immediately even if the jaddref 10655 * isn't attached in a background write as now 10656 * the bitmaps are reconciled. 10657 */ 10658 wk->wk_state |= COMPLETE | ATTACHED; 10659 free_jaddref(WK_JADDREF(wk)); 10660 } 10661 jwork_move(&bp->b_dep, wkhd); 10662 } 10663 FREE_LOCK(ump); 10664} 10665 10666 10667/* 10668 * Called via ffs_blkfree() after a set of frags has been cleared from a cg 10669 * map. Any dependencies waiting for the write to clear are added to the 10670 * buf's list and any jnewblks that are being canceled are discarded 10671 * immediately. 10672 */ 10673void 10674softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 10675 struct mount *mp; 10676 struct buf *bp; 10677 ufs2_daddr_t blkno; 10678 int frags; 10679 struct workhead *wkhd; 10680{ 10681 struct bmsafemap *bmsafemap; 10682 struct jnewblk *jnewblk; 10683 struct ufsmount *ump; 10684 struct worklist *wk; 10685 struct fs *fs; 10686#ifdef SUJ_DEBUG 10687 uint8_t *blksfree; 10688 struct cg *cgp; 10689 ufs2_daddr_t jstart; 10690 ufs2_daddr_t jend; 10691 ufs2_daddr_t end; 10692 long bno; 10693 int i; 10694#endif 10695 10696 CTR3(KTR_SUJ, 10697 "softdep_setup_blkfree: blkno %jd frags %d wk head %p", 10698 blkno, frags, wkhd); 10699 10700 ump = VFSTOUFS(mp); 10701 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 10702 ("softdep_setup_blkfree called on non-softdep filesystem")); 10703 ACQUIRE_LOCK(ump); 10704 /* Lookup the bmsafemap so we track when it is dirty. */ 10705 fs = ump->um_fs; 10706 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10707 /* 10708 * Detach any jnewblks which have been canceled. They must linger 10709 * until the bitmap is cleared again by ffs_blkfree() to prevent 10710 * an unjournaled allocation from hitting the disk. 10711 */ 10712 if (wkhd) { 10713 while ((wk = LIST_FIRST(wkhd)) != NULL) { 10714 CTR2(KTR_SUJ, 10715 "softdep_setup_blkfree: blkno %jd wk type %d", 10716 blkno, wk->wk_type); 10717 WORKLIST_REMOVE(wk); 10718 if (wk->wk_type != D_JNEWBLK) { 10719 WORKLIST_INSERT(&bmsafemap->sm_freehd, wk); 10720 continue; 10721 } 10722 jnewblk = WK_JNEWBLK(wk); 10723 KASSERT(jnewblk->jn_state & GOINGAWAY, 10724 ("softdep_setup_blkfree: jnewblk not canceled.")); 10725#ifdef SUJ_DEBUG 10726 /* 10727 * Assert that this block is free in the bitmap 10728 * before we discard the jnewblk. 10729 */ 10730 cgp = (struct cg *)bp->b_data; 10731 blksfree = cg_blksfree(cgp); 10732 bno = dtogd(fs, jnewblk->jn_blkno); 10733 for (i = jnewblk->jn_oldfrags; 10734 i < jnewblk->jn_frags; i++) { 10735 if (isset(blksfree, bno + i)) 10736 continue; 10737 panic("softdep_setup_blkfree: not free"); 10738 } 10739#endif 10740 /* 10741 * Even if it's not attached we can free immediately 10742 * as the new bitmap is correct. 10743 */ 10744 wk->wk_state |= COMPLETE | ATTACHED; 10745 free_jnewblk(jnewblk); 10746 } 10747 } 10748 10749#ifdef SUJ_DEBUG 10750 /* 10751 * Assert that we are not freeing a block which has an outstanding 10752 * allocation dependency. 10753 */ 10754 fs = VFSTOUFS(mp)->um_fs; 10755 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10756 end = blkno + frags; 10757 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10758 /* 10759 * Don't match against blocks that will be freed when the 10760 * background write is done. 10761 */ 10762 if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) == 10763 (COMPLETE | DEPCOMPLETE)) 10764 continue; 10765 jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags; 10766 jend = jnewblk->jn_blkno + jnewblk->jn_frags; 10767 if ((blkno >= jstart && blkno < jend) || 10768 (end > jstart && end <= jend)) { 10769 printf("state 0x%X %jd - %d %d dep %p\n", 10770 jnewblk->jn_state, jnewblk->jn_blkno, 10771 jnewblk->jn_oldfrags, jnewblk->jn_frags, 10772 jnewblk->jn_dep); 10773 panic("softdep_setup_blkfree: " 10774 "%jd-%jd(%d) overlaps with %jd-%jd", 10775 blkno, end, frags, jstart, jend); 10776 } 10777 } 10778#endif 10779 FREE_LOCK(ump); 10780} 10781 10782/* 10783 * Revert a block allocation when the journal record that describes it 10784 * is not yet written. 10785 */ 10786static int 10787jnewblk_rollback(jnewblk, fs, cgp, blksfree) 10788 struct jnewblk *jnewblk; 10789 struct fs *fs; 10790 struct cg *cgp; 10791 uint8_t *blksfree; 10792{ 10793 ufs1_daddr_t fragno; 10794 long cgbno, bbase; 10795 int frags, blk; 10796 int i; 10797 10798 frags = 0; 10799 cgbno = dtogd(fs, jnewblk->jn_blkno); 10800 /* 10801 * We have to test which frags need to be rolled back. We may 10802 * be operating on a stale copy when doing background writes. 10803 */ 10804 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) 10805 if (isclr(blksfree, cgbno + i)) 10806 frags++; 10807 if (frags == 0) 10808 return (0); 10809 /* 10810 * This is mostly ffs_blkfree() sans some validation and 10811 * superblock updates. 10812 */ 10813 if (frags == fs->fs_frag) { 10814 fragno = fragstoblks(fs, cgbno); 10815 ffs_setblock(fs, blksfree, fragno); 10816 ffs_clusteracct(fs, cgp, fragno, 1); 10817 cgp->cg_cs.cs_nbfree++; 10818 } else { 10819 cgbno += jnewblk->jn_oldfrags; 10820 bbase = cgbno - fragnum(fs, cgbno); 10821 /* Decrement the old frags. */ 10822 blk = blkmap(fs, blksfree, bbase); 10823 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 10824 /* Deallocate the fragment */ 10825 for (i = 0; i < frags; i++) 10826 setbit(blksfree, cgbno + i); 10827 cgp->cg_cs.cs_nffree += frags; 10828 /* Add back in counts associated with the new frags */ 10829 blk = blkmap(fs, blksfree, bbase); 10830 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 10831 /* If a complete block has been reassembled, account for it. */ 10832 fragno = fragstoblks(fs, bbase); 10833 if (ffs_isblock(fs, blksfree, fragno)) { 10834 cgp->cg_cs.cs_nffree -= fs->fs_frag; 10835 ffs_clusteracct(fs, cgp, fragno, 1); 10836 cgp->cg_cs.cs_nbfree++; 10837 } 10838 } 10839 stat_jnewblk++; 10840 jnewblk->jn_state &= ~ATTACHED; 10841 jnewblk->jn_state |= UNDONE; 10842 10843 return (frags); 10844} 10845 10846static void 10847initiate_write_bmsafemap(bmsafemap, bp) 10848 struct bmsafemap *bmsafemap; 10849 struct buf *bp; /* The cg block. */ 10850{ 10851 struct jaddref *jaddref; 10852 struct jnewblk *jnewblk; 10853 uint8_t *inosused; 10854 uint8_t *blksfree; 10855 struct cg *cgp; 10856 struct fs *fs; 10857 ino_t ino; 10858 10859 if (bmsafemap->sm_state & IOSTARTED) 10860 return; 10861 bmsafemap->sm_state |= IOSTARTED; 10862 /* 10863 * Clear any inode allocations which are pending journal writes. 10864 */ 10865 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) { 10866 cgp = (struct cg *)bp->b_data; 10867 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10868 inosused = cg_inosused(cgp); 10869 LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) { 10870 ino = jaddref->ja_ino % fs->fs_ipg; 10871 if (isset(inosused, ino)) { 10872 if ((jaddref->ja_mode & IFMT) == IFDIR) 10873 cgp->cg_cs.cs_ndir--; 10874 cgp->cg_cs.cs_nifree++; 10875 clrbit(inosused, ino); 10876 jaddref->ja_state &= ~ATTACHED; 10877 jaddref->ja_state |= UNDONE; 10878 stat_jaddref++; 10879 } else 10880 panic("initiate_write_bmsafemap: inode %ju " 10881 "marked free", (uintmax_t)jaddref->ja_ino); 10882 } 10883 } 10884 /* 10885 * Clear any block allocations which are pending journal writes. 10886 */ 10887 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 10888 cgp = (struct cg *)bp->b_data; 10889 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10890 blksfree = cg_blksfree(cgp); 10891 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10892 if (jnewblk_rollback(jnewblk, fs, cgp, blksfree)) 10893 continue; 10894 panic("initiate_write_bmsafemap: block %jd " 10895 "marked free", jnewblk->jn_blkno); 10896 } 10897 } 10898 /* 10899 * Move allocation lists to the written lists so they can be 10900 * cleared once the block write is complete. 10901 */ 10902 LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr, 10903 inodedep, id_deps); 10904 LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, 10905 newblk, nb_deps); 10906 LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, 10907 wk_list); 10908} 10909 10910/* 10911 * This routine is called during the completion interrupt 10912 * service routine for a disk write (from the procedure called 10913 * by the device driver to inform the filesystem caches of 10914 * a request completion). It should be called early in this 10915 * procedure, before the block is made available to other 10916 * processes or other routines are called. 10917 * 10918 */ 10919static void 10920softdep_disk_write_complete(bp) 10921 struct buf *bp; /* describes the completed disk write */ 10922{ 10923 struct worklist *wk; 10924 struct worklist *owk; 10925 struct ufsmount *ump; 10926 struct workhead reattach; 10927 struct freeblks *freeblks; 10928 struct buf *sbp; 10929 10930 /* 10931 * If an error occurred while doing the write, then the data 10932 * has not hit the disk and the dependencies cannot be unrolled. 10933 */ 10934 if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) 10935 return; 10936 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 10937 return; 10938 ump = VFSTOUFS(wk->wk_mp); 10939 LIST_INIT(&reattach); 10940 /* 10941 * This lock must not be released anywhere in this code segment. 10942 */ 10943 sbp = NULL; 10944 owk = NULL; 10945 ACQUIRE_LOCK(ump); 10946 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 10947 WORKLIST_REMOVE(wk); 10948 atomic_add_long(&dep_write[wk->wk_type], 1); 10949 if (wk == owk) 10950 panic("duplicate worklist: %p\n", wk); 10951 owk = wk; 10952 switch (wk->wk_type) { 10953 10954 case D_PAGEDEP: 10955 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 10956 WORKLIST_INSERT(&reattach, wk); 10957 continue; 10958 10959 case D_INODEDEP: 10960 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 10961 WORKLIST_INSERT(&reattach, wk); 10962 continue; 10963 10964 case D_BMSAFEMAP: 10965 if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp)) 10966 WORKLIST_INSERT(&reattach, wk); 10967 continue; 10968 10969 case D_MKDIR: 10970 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 10971 continue; 10972 10973 case D_ALLOCDIRECT: 10974 wk->wk_state |= COMPLETE; 10975 handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL); 10976 continue; 10977 10978 case D_ALLOCINDIR: 10979 wk->wk_state |= COMPLETE; 10980 handle_allocindir_partdone(WK_ALLOCINDIR(wk)); 10981 continue; 10982 10983 case D_INDIRDEP: 10984 if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp)) 10985 WORKLIST_INSERT(&reattach, wk); 10986 continue; 10987 10988 case D_FREEBLKS: 10989 wk->wk_state |= COMPLETE; 10990 freeblks = WK_FREEBLKS(wk); 10991 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE && 10992 LIST_EMPTY(&freeblks->fb_jblkdephd)) 10993 add_to_worklist(wk, WK_NODELAY); 10994 continue; 10995 10996 case D_FREEWORK: 10997 handle_written_freework(WK_FREEWORK(wk)); 10998 break; 10999 11000 case D_JSEGDEP: 11001 free_jsegdep(WK_JSEGDEP(wk)); 11002 continue; 11003 11004 case D_JSEG: 11005 handle_written_jseg(WK_JSEG(wk), bp); 11006 continue; 11007 11008 case D_SBDEP: 11009 if (handle_written_sbdep(WK_SBDEP(wk), bp)) 11010 WORKLIST_INSERT(&reattach, wk); 11011 continue; 11012 11013 case D_FREEDEP: 11014 free_freedep(WK_FREEDEP(wk)); 11015 continue; 11016 11017 default: 11018 panic("handle_disk_write_complete: Unknown type %s", 11019 TYPENAME(wk->wk_type)); 11020 /* NOTREACHED */ 11021 } 11022 } 11023 /* 11024 * Reattach any requests that must be redone. 11025 */ 11026 while ((wk = LIST_FIRST(&reattach)) != NULL) { 11027 WORKLIST_REMOVE(wk); 11028 WORKLIST_INSERT(&bp->b_dep, wk); 11029 } 11030 FREE_LOCK(ump); 11031 if (sbp) 11032 brelse(sbp); 11033} 11034 11035/* 11036 * Called from within softdep_disk_write_complete above. Note that 11037 * this routine is always called from interrupt level with further 11038 * splbio interrupts blocked. 11039 */ 11040static void 11041handle_allocdirect_partdone(adp, wkhd) 11042 struct allocdirect *adp; /* the completed allocdirect */ 11043 struct workhead *wkhd; /* Work to do when inode is writtne. */ 11044{ 11045 struct allocdirectlst *listhead; 11046 struct allocdirect *listadp; 11047 struct inodedep *inodedep; 11048 long bsize; 11049 11050 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 11051 return; 11052 /* 11053 * The on-disk inode cannot claim to be any larger than the last 11054 * fragment that has been written. Otherwise, the on-disk inode 11055 * might have fragments that were not the last block in the file 11056 * which would corrupt the filesystem. Thus, we cannot free any 11057 * allocdirects after one whose ad_oldblkno claims a fragment as 11058 * these blocks must be rolled back to zero before writing the inode. 11059 * We check the currently active set of allocdirects in id_inoupdt 11060 * or id_extupdt as appropriate. 11061 */ 11062 inodedep = adp->ad_inodedep; 11063 bsize = inodedep->id_fs->fs_bsize; 11064 if (adp->ad_state & EXTDATA) 11065 listhead = &inodedep->id_extupdt; 11066 else 11067 listhead = &inodedep->id_inoupdt; 11068 TAILQ_FOREACH(listadp, listhead, ad_next) { 11069 /* found our block */ 11070 if (listadp == adp) 11071 break; 11072 /* continue if ad_oldlbn is not a fragment */ 11073 if (listadp->ad_oldsize == 0 || 11074 listadp->ad_oldsize == bsize) 11075 continue; 11076 /* hit a fragment */ 11077 return; 11078 } 11079 /* 11080 * If we have reached the end of the current list without 11081 * finding the just finished dependency, then it must be 11082 * on the future dependency list. Future dependencies cannot 11083 * be freed until they are moved to the current list. 11084 */ 11085 if (listadp == NULL) { 11086#ifdef DEBUG 11087 if (adp->ad_state & EXTDATA) 11088 listhead = &inodedep->id_newextupdt; 11089 else 11090 listhead = &inodedep->id_newinoupdt; 11091 TAILQ_FOREACH(listadp, listhead, ad_next) 11092 /* found our block */ 11093 if (listadp == adp) 11094 break; 11095 if (listadp == NULL) 11096 panic("handle_allocdirect_partdone: lost dep"); 11097#endif /* DEBUG */ 11098 return; 11099 } 11100 /* 11101 * If we have found the just finished dependency, then queue 11102 * it along with anything that follows it that is complete. 11103 * Since the pointer has not yet been written in the inode 11104 * as the dependency prevents it, place the allocdirect on the 11105 * bufwait list where it will be freed once the pointer is 11106 * valid. 11107 */ 11108 if (wkhd == NULL) 11109 wkhd = &inodedep->id_bufwait; 11110 for (; adp; adp = listadp) { 11111 listadp = TAILQ_NEXT(adp, ad_next); 11112 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 11113 return; 11114 TAILQ_REMOVE(listhead, adp, ad_next); 11115 WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list); 11116 } 11117} 11118 11119/* 11120 * Called from within softdep_disk_write_complete above. This routine 11121 * completes successfully written allocindirs. 11122 */ 11123static void 11124handle_allocindir_partdone(aip) 11125 struct allocindir *aip; /* the completed allocindir */ 11126{ 11127 struct indirdep *indirdep; 11128 11129 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 11130 return; 11131 indirdep = aip->ai_indirdep; 11132 LIST_REMOVE(aip, ai_next); 11133 /* 11134 * Don't set a pointer while the buffer is undergoing IO or while 11135 * we have active truncations. 11136 */ 11137 if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) { 11138 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 11139 return; 11140 } 11141 if (indirdep->ir_state & UFS1FMT) 11142 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11143 aip->ai_newblkno; 11144 else 11145 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11146 aip->ai_newblkno; 11147 /* 11148 * Await the pointer write before freeing the allocindir. 11149 */ 11150 LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next); 11151} 11152 11153/* 11154 * Release segments held on a jwork list. 11155 */ 11156static void 11157handle_jwork(wkhd) 11158 struct workhead *wkhd; 11159{ 11160 struct worklist *wk; 11161 11162 while ((wk = LIST_FIRST(wkhd)) != NULL) { 11163 WORKLIST_REMOVE(wk); 11164 switch (wk->wk_type) { 11165 case D_JSEGDEP: 11166 free_jsegdep(WK_JSEGDEP(wk)); 11167 continue; 11168 case D_FREEDEP: 11169 free_freedep(WK_FREEDEP(wk)); 11170 continue; 11171 case D_FREEFRAG: 11172 rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep)); 11173 WORKITEM_FREE(wk, D_FREEFRAG); 11174 continue; 11175 case D_FREEWORK: 11176 handle_written_freework(WK_FREEWORK(wk)); 11177 continue; 11178 default: 11179 panic("handle_jwork: Unknown type %s\n", 11180 TYPENAME(wk->wk_type)); 11181 } 11182 } 11183} 11184 11185/* 11186 * Handle the bufwait list on an inode when it is safe to release items 11187 * held there. This normally happens after an inode block is written but 11188 * may be delayed and handled later if there are pending journal items that 11189 * are not yet safe to be released. 11190 */ 11191static struct freefile * 11192handle_bufwait(inodedep, refhd) 11193 struct inodedep *inodedep; 11194 struct workhead *refhd; 11195{ 11196 struct jaddref *jaddref; 11197 struct freefile *freefile; 11198 struct worklist *wk; 11199 11200 freefile = NULL; 11201 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 11202 WORKLIST_REMOVE(wk); 11203 switch (wk->wk_type) { 11204 case D_FREEFILE: 11205 /* 11206 * We defer adding freefile to the worklist 11207 * until all other additions have been made to 11208 * ensure that it will be done after all the 11209 * old blocks have been freed. 11210 */ 11211 if (freefile != NULL) 11212 panic("handle_bufwait: freefile"); 11213 freefile = WK_FREEFILE(wk); 11214 continue; 11215 11216 case D_MKDIR: 11217 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 11218 continue; 11219 11220 case D_DIRADD: 11221 diradd_inode_written(WK_DIRADD(wk), inodedep); 11222 continue; 11223 11224 case D_FREEFRAG: 11225 wk->wk_state |= COMPLETE; 11226 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE) 11227 add_to_worklist(wk, 0); 11228 continue; 11229 11230 case D_DIRREM: 11231 wk->wk_state |= COMPLETE; 11232 add_to_worklist(wk, 0); 11233 continue; 11234 11235 case D_ALLOCDIRECT: 11236 case D_ALLOCINDIR: 11237 free_newblk(WK_NEWBLK(wk)); 11238 continue; 11239 11240 case D_JNEWBLK: 11241 wk->wk_state |= COMPLETE; 11242 free_jnewblk(WK_JNEWBLK(wk)); 11243 continue; 11244 11245 /* 11246 * Save freed journal segments and add references on 11247 * the supplied list which will delay their release 11248 * until the cg bitmap is cleared on disk. 11249 */ 11250 case D_JSEGDEP: 11251 if (refhd == NULL) 11252 free_jsegdep(WK_JSEGDEP(wk)); 11253 else 11254 WORKLIST_INSERT(refhd, wk); 11255 continue; 11256 11257 case D_JADDREF: 11258 jaddref = WK_JADDREF(wk); 11259 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 11260 if_deps); 11261 /* 11262 * Transfer any jaddrefs to the list to be freed with 11263 * the bitmap if we're handling a removed file. 11264 */ 11265 if (refhd == NULL) { 11266 wk->wk_state |= COMPLETE; 11267 free_jaddref(jaddref); 11268 } else 11269 WORKLIST_INSERT(refhd, wk); 11270 continue; 11271 11272 default: 11273 panic("handle_bufwait: Unknown type %p(%s)", 11274 wk, TYPENAME(wk->wk_type)); 11275 /* NOTREACHED */ 11276 } 11277 } 11278 return (freefile); 11279} 11280/* 11281 * Called from within softdep_disk_write_complete above to restore 11282 * in-memory inode block contents to their most up-to-date state. Note 11283 * that this routine is always called from interrupt level with further 11284 * splbio interrupts blocked. 11285 */ 11286static int 11287handle_written_inodeblock(inodedep, bp) 11288 struct inodedep *inodedep; 11289 struct buf *bp; /* buffer containing the inode block */ 11290{ 11291 struct freefile *freefile; 11292 struct allocdirect *adp, *nextadp; 11293 struct ufs1_dinode *dp1 = NULL; 11294 struct ufs2_dinode *dp2 = NULL; 11295 struct workhead wkhd; 11296 int hadchanges, fstype; 11297 ino_t freelink; 11298 11299 LIST_INIT(&wkhd); 11300 hadchanges = 0; 11301 freefile = NULL; 11302 if ((inodedep->id_state & IOSTARTED) == 0) 11303 panic("handle_written_inodeblock: not started"); 11304 inodedep->id_state &= ~IOSTARTED; 11305 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 11306 fstype = UFS1; 11307 dp1 = (struct ufs1_dinode *)bp->b_data + 11308 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11309 freelink = dp1->di_freelink; 11310 } else { 11311 fstype = UFS2; 11312 dp2 = (struct ufs2_dinode *)bp->b_data + 11313 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11314 freelink = dp2->di_freelink; 11315 } 11316 /* 11317 * Leave this inodeblock dirty until it's in the list. 11318 */ 11319 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) { 11320 struct inodedep *inon; 11321 11322 inon = TAILQ_NEXT(inodedep, id_unlinked); 11323 if ((inon == NULL && freelink == 0) || 11324 (inon && inon->id_ino == freelink)) { 11325 if (inon) 11326 inon->id_state |= UNLINKPREV; 11327 inodedep->id_state |= UNLINKNEXT; 11328 } 11329 hadchanges = 1; 11330 } 11331 /* 11332 * If we had to rollback the inode allocation because of 11333 * bitmaps being incomplete, then simply restore it. 11334 * Keep the block dirty so that it will not be reclaimed until 11335 * all associated dependencies have been cleared and the 11336 * corresponding updates written to disk. 11337 */ 11338 if (inodedep->id_savedino1 != NULL) { 11339 hadchanges = 1; 11340 if (fstype == UFS1) 11341 *dp1 = *inodedep->id_savedino1; 11342 else 11343 *dp2 = *inodedep->id_savedino2; 11344 free(inodedep->id_savedino1, M_SAVEDINO); 11345 inodedep->id_savedino1 = NULL; 11346 if ((bp->b_flags & B_DELWRI) == 0) 11347 stat_inode_bitmap++; 11348 bdirty(bp); 11349 /* 11350 * If the inode is clear here and GOINGAWAY it will never 11351 * be written. Process the bufwait and clear any pending 11352 * work which may include the freefile. 11353 */ 11354 if (inodedep->id_state & GOINGAWAY) 11355 goto bufwait; 11356 return (1); 11357 } 11358 inodedep->id_state |= COMPLETE; 11359 /* 11360 * Roll forward anything that had to be rolled back before 11361 * the inode could be updated. 11362 */ 11363 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 11364 nextadp = TAILQ_NEXT(adp, ad_next); 11365 if (adp->ad_state & ATTACHED) 11366 panic("handle_written_inodeblock: new entry"); 11367 if (fstype == UFS1) { 11368 if (adp->ad_offset < NDADDR) { 11369 if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11370 panic("%s %s #%jd mismatch %d != %jd", 11371 "handle_written_inodeblock:", 11372 "direct pointer", 11373 (intmax_t)adp->ad_offset, 11374 dp1->di_db[adp->ad_offset], 11375 (intmax_t)adp->ad_oldblkno); 11376 dp1->di_db[adp->ad_offset] = adp->ad_newblkno; 11377 } else { 11378 if (dp1->di_ib[adp->ad_offset - NDADDR] != 0) 11379 panic("%s: %s #%jd allocated as %d", 11380 "handle_written_inodeblock", 11381 "indirect pointer", 11382 (intmax_t)adp->ad_offset - NDADDR, 11383 dp1->di_ib[adp->ad_offset - NDADDR]); 11384 dp1->di_ib[adp->ad_offset - NDADDR] = 11385 adp->ad_newblkno; 11386 } 11387 } else { 11388 if (adp->ad_offset < NDADDR) { 11389 if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11390 panic("%s: %s #%jd %s %jd != %jd", 11391 "handle_written_inodeblock", 11392 "direct pointer", 11393 (intmax_t)adp->ad_offset, "mismatch", 11394 (intmax_t)dp2->di_db[adp->ad_offset], 11395 (intmax_t)adp->ad_oldblkno); 11396 dp2->di_db[adp->ad_offset] = adp->ad_newblkno; 11397 } else { 11398 if (dp2->di_ib[adp->ad_offset - NDADDR] != 0) 11399 panic("%s: %s #%jd allocated as %jd", 11400 "handle_written_inodeblock", 11401 "indirect pointer", 11402 (intmax_t)adp->ad_offset - NDADDR, 11403 (intmax_t) 11404 dp2->di_ib[adp->ad_offset - NDADDR]); 11405 dp2->di_ib[adp->ad_offset - NDADDR] = 11406 adp->ad_newblkno; 11407 } 11408 } 11409 adp->ad_state &= ~UNDONE; 11410 adp->ad_state |= ATTACHED; 11411 hadchanges = 1; 11412 } 11413 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 11414 nextadp = TAILQ_NEXT(adp, ad_next); 11415 if (adp->ad_state & ATTACHED) 11416 panic("handle_written_inodeblock: new entry"); 11417 if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno) 11418 panic("%s: direct pointers #%jd %s %jd != %jd", 11419 "handle_written_inodeblock", 11420 (intmax_t)adp->ad_offset, "mismatch", 11421 (intmax_t)dp2->di_extb[adp->ad_offset], 11422 (intmax_t)adp->ad_oldblkno); 11423 dp2->di_extb[adp->ad_offset] = adp->ad_newblkno; 11424 adp->ad_state &= ~UNDONE; 11425 adp->ad_state |= ATTACHED; 11426 hadchanges = 1; 11427 } 11428 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 11429 stat_direct_blk_ptrs++; 11430 /* 11431 * Reset the file size to its most up-to-date value. 11432 */ 11433 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) 11434 panic("handle_written_inodeblock: bad size"); 11435 if (inodedep->id_savednlink > LINK_MAX) 11436 panic("handle_written_inodeblock: Invalid link count " 11437 "%d for inodedep %p", inodedep->id_savednlink, inodedep); 11438 if (fstype == UFS1) { 11439 if (dp1->di_nlink != inodedep->id_savednlink) { 11440 dp1->di_nlink = inodedep->id_savednlink; 11441 hadchanges = 1; 11442 } 11443 if (dp1->di_size != inodedep->id_savedsize) { 11444 dp1->di_size = inodedep->id_savedsize; 11445 hadchanges = 1; 11446 } 11447 } else { 11448 if (dp2->di_nlink != inodedep->id_savednlink) { 11449 dp2->di_nlink = inodedep->id_savednlink; 11450 hadchanges = 1; 11451 } 11452 if (dp2->di_size != inodedep->id_savedsize) { 11453 dp2->di_size = inodedep->id_savedsize; 11454 hadchanges = 1; 11455 } 11456 if (dp2->di_extsize != inodedep->id_savedextsize) { 11457 dp2->di_extsize = inodedep->id_savedextsize; 11458 hadchanges = 1; 11459 } 11460 } 11461 inodedep->id_savedsize = -1; 11462 inodedep->id_savedextsize = -1; 11463 inodedep->id_savednlink = -1; 11464 /* 11465 * If there were any rollbacks in the inode block, then it must be 11466 * marked dirty so that its will eventually get written back in 11467 * its correct form. 11468 */ 11469 if (hadchanges) 11470 bdirty(bp); 11471bufwait: 11472 /* 11473 * Process any allocdirects that completed during the update. 11474 */ 11475 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 11476 handle_allocdirect_partdone(adp, &wkhd); 11477 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 11478 handle_allocdirect_partdone(adp, &wkhd); 11479 /* 11480 * Process deallocations that were held pending until the 11481 * inode had been written to disk. Freeing of the inode 11482 * is delayed until after all blocks have been freed to 11483 * avoid creation of new <vfsid, inum, lbn> triples 11484 * before the old ones have been deleted. Completely 11485 * unlinked inodes are not processed until the unlinked 11486 * inode list is written or the last reference is removed. 11487 */ 11488 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) { 11489 freefile = handle_bufwait(inodedep, NULL); 11490 if (freefile && !LIST_EMPTY(&wkhd)) { 11491 WORKLIST_INSERT(&wkhd, &freefile->fx_list); 11492 freefile = NULL; 11493 } 11494 } 11495 /* 11496 * Move rolled forward dependency completions to the bufwait list 11497 * now that those that were already written have been processed. 11498 */ 11499 if (!LIST_EMPTY(&wkhd) && hadchanges == 0) 11500 panic("handle_written_inodeblock: bufwait but no changes"); 11501 jwork_move(&inodedep->id_bufwait, &wkhd); 11502 11503 if (freefile != NULL) { 11504 /* 11505 * If the inode is goingaway it was never written. Fake up 11506 * the state here so free_inodedep() can succeed. 11507 */ 11508 if (inodedep->id_state & GOINGAWAY) 11509 inodedep->id_state |= COMPLETE | DEPCOMPLETE; 11510 if (free_inodedep(inodedep) == 0) 11511 panic("handle_written_inodeblock: live inodedep %p", 11512 inodedep); 11513 add_to_worklist(&freefile->fx_list, 0); 11514 return (0); 11515 } 11516 11517 /* 11518 * If no outstanding dependencies, free it. 11519 */ 11520 if (free_inodedep(inodedep) || 11521 (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 && 11522 TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 11523 TAILQ_FIRST(&inodedep->id_extupdt) == 0 && 11524 LIST_FIRST(&inodedep->id_bufwait) == 0)) 11525 return (0); 11526 return (hadchanges); 11527} 11528 11529static int 11530handle_written_indirdep(indirdep, bp, bpp) 11531 struct indirdep *indirdep; 11532 struct buf *bp; 11533 struct buf **bpp; 11534{ 11535 struct allocindir *aip; 11536 struct buf *sbp; 11537 int chgs; 11538 11539 if (indirdep->ir_state & GOINGAWAY) 11540 panic("handle_written_indirdep: indirdep gone"); 11541 if ((indirdep->ir_state & IOSTARTED) == 0) 11542 panic("handle_written_indirdep: IO not started"); 11543 chgs = 0; 11544 /* 11545 * If there were rollbacks revert them here. 11546 */ 11547 if (indirdep->ir_saveddata) { 11548 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 11549 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11550 free(indirdep->ir_saveddata, M_INDIRDEP); 11551 indirdep->ir_saveddata = NULL; 11552 } 11553 chgs = 1; 11554 } 11555 indirdep->ir_state &= ~(UNDONE | IOSTARTED); 11556 indirdep->ir_state |= ATTACHED; 11557 /* 11558 * Move allocindirs with written pointers to the completehd if 11559 * the indirdep's pointer is not yet written. Otherwise 11560 * free them here. 11561 */ 11562 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) { 11563 LIST_REMOVE(aip, ai_next); 11564 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 11565 LIST_INSERT_HEAD(&indirdep->ir_completehd, aip, 11566 ai_next); 11567 newblk_freefrag(&aip->ai_block); 11568 continue; 11569 } 11570 free_newblk(&aip->ai_block); 11571 } 11572 /* 11573 * Move allocindirs that have finished dependency processing from 11574 * the done list to the write list after updating the pointers. 11575 */ 11576 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11577 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 11578 handle_allocindir_partdone(aip); 11579 if (aip == LIST_FIRST(&indirdep->ir_donehd)) 11580 panic("disk_write_complete: not gone"); 11581 chgs = 1; 11582 } 11583 } 11584 /* 11585 * Preserve the indirdep if there were any changes or if it is not 11586 * yet valid on disk. 11587 */ 11588 if (chgs) { 11589 stat_indir_blk_ptrs++; 11590 bdirty(bp); 11591 return (1); 11592 } 11593 /* 11594 * If there were no changes we can discard the savedbp and detach 11595 * ourselves from the buf. We are only carrying completed pointers 11596 * in this case. 11597 */ 11598 sbp = indirdep->ir_savebp; 11599 sbp->b_flags |= B_INVAL | B_NOCACHE; 11600 indirdep->ir_savebp = NULL; 11601 indirdep->ir_bp = NULL; 11602 if (*bpp != NULL) 11603 panic("handle_written_indirdep: bp already exists."); 11604 *bpp = sbp; 11605 /* 11606 * The indirdep may not be freed until its parent points at it. 11607 */ 11608 if (indirdep->ir_state & DEPCOMPLETE) 11609 free_indirdep(indirdep); 11610 11611 return (0); 11612} 11613 11614/* 11615 * Process a diradd entry after its dependent inode has been written. 11616 * This routine must be called with splbio interrupts blocked. 11617 */ 11618static void 11619diradd_inode_written(dap, inodedep) 11620 struct diradd *dap; 11621 struct inodedep *inodedep; 11622{ 11623 11624 dap->da_state |= COMPLETE; 11625 complete_diradd(dap); 11626 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 11627} 11628 11629/* 11630 * Returns true if the bmsafemap will have rollbacks when written. Must only 11631 * be called with the per-filesystem lock and the buf lock on the cg held. 11632 */ 11633static int 11634bmsafemap_backgroundwrite(bmsafemap, bp) 11635 struct bmsafemap *bmsafemap; 11636 struct buf *bp; 11637{ 11638 int dirty; 11639 11640 LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp)); 11641 dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) | 11642 !LIST_EMPTY(&bmsafemap->sm_jnewblkhd); 11643 /* 11644 * If we're initiating a background write we need to process the 11645 * rollbacks as they exist now, not as they exist when IO starts. 11646 * No other consumers will look at the contents of the shadowed 11647 * buf so this is safe to do here. 11648 */ 11649 if (bp->b_xflags & BX_BKGRDMARKER) 11650 initiate_write_bmsafemap(bmsafemap, bp); 11651 11652 return (dirty); 11653} 11654 11655/* 11656 * Re-apply an allocation when a cg write is complete. 11657 */ 11658static int 11659jnewblk_rollforward(jnewblk, fs, cgp, blksfree) 11660 struct jnewblk *jnewblk; 11661 struct fs *fs; 11662 struct cg *cgp; 11663 uint8_t *blksfree; 11664{ 11665 ufs1_daddr_t fragno; 11666 ufs2_daddr_t blkno; 11667 long cgbno, bbase; 11668 int frags, blk; 11669 int i; 11670 11671 frags = 0; 11672 cgbno = dtogd(fs, jnewblk->jn_blkno); 11673 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { 11674 if (isclr(blksfree, cgbno + i)) 11675 panic("jnewblk_rollforward: re-allocated fragment"); 11676 frags++; 11677 } 11678 if (frags == fs->fs_frag) { 11679 blkno = fragstoblks(fs, cgbno); 11680 ffs_clrblock(fs, blksfree, (long)blkno); 11681 ffs_clusteracct(fs, cgp, blkno, -1); 11682 cgp->cg_cs.cs_nbfree--; 11683 } else { 11684 bbase = cgbno - fragnum(fs, cgbno); 11685 cgbno += jnewblk->jn_oldfrags; 11686 /* If a complete block had been reassembled, account for it. */ 11687 fragno = fragstoblks(fs, bbase); 11688 if (ffs_isblock(fs, blksfree, fragno)) { 11689 cgp->cg_cs.cs_nffree += fs->fs_frag; 11690 ffs_clusteracct(fs, cgp, fragno, -1); 11691 cgp->cg_cs.cs_nbfree--; 11692 } 11693 /* Decrement the old frags. */ 11694 blk = blkmap(fs, blksfree, bbase); 11695 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 11696 /* Allocate the fragment */ 11697 for (i = 0; i < frags; i++) 11698 clrbit(blksfree, cgbno + i); 11699 cgp->cg_cs.cs_nffree -= frags; 11700 /* Add back in counts associated with the new frags */ 11701 blk = blkmap(fs, blksfree, bbase); 11702 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 11703 } 11704 return (frags); 11705} 11706 11707/* 11708 * Complete a write to a bmsafemap structure. Roll forward any bitmap 11709 * changes if it's not a background write. Set all written dependencies 11710 * to DEPCOMPLETE and free the structure if possible. 11711 */ 11712static int 11713handle_written_bmsafemap(bmsafemap, bp) 11714 struct bmsafemap *bmsafemap; 11715 struct buf *bp; 11716{ 11717 struct newblk *newblk; 11718 struct inodedep *inodedep; 11719 struct jaddref *jaddref, *jatmp; 11720 struct jnewblk *jnewblk, *jntmp; 11721 struct ufsmount *ump; 11722 uint8_t *inosused; 11723 uint8_t *blksfree; 11724 struct cg *cgp; 11725 struct fs *fs; 11726 ino_t ino; 11727 int foreground; 11728 int chgs; 11729 11730 if ((bmsafemap->sm_state & IOSTARTED) == 0) 11731 panic("initiate_write_bmsafemap: Not started\n"); 11732 ump = VFSTOUFS(bmsafemap->sm_list.wk_mp); 11733 chgs = 0; 11734 bmsafemap->sm_state &= ~IOSTARTED; 11735 foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0; 11736 /* 11737 * Release journal work that was waiting on the write. 11738 */ 11739 handle_jwork(&bmsafemap->sm_freewr); 11740 11741 /* 11742 * Restore unwritten inode allocation pending jaddref writes. 11743 */ 11744 if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) { 11745 cgp = (struct cg *)bp->b_data; 11746 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11747 inosused = cg_inosused(cgp); 11748 LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd, 11749 ja_bmdeps, jatmp) { 11750 if ((jaddref->ja_state & UNDONE) == 0) 11751 continue; 11752 ino = jaddref->ja_ino % fs->fs_ipg; 11753 if (isset(inosused, ino)) 11754 panic("handle_written_bmsafemap: " 11755 "re-allocated inode"); 11756 /* Do the roll-forward only if it's a real copy. */ 11757 if (foreground) { 11758 if ((jaddref->ja_mode & IFMT) == IFDIR) 11759 cgp->cg_cs.cs_ndir++; 11760 cgp->cg_cs.cs_nifree--; 11761 setbit(inosused, ino); 11762 chgs = 1; 11763 } 11764 jaddref->ja_state &= ~UNDONE; 11765 jaddref->ja_state |= ATTACHED; 11766 free_jaddref(jaddref); 11767 } 11768 } 11769 /* 11770 * Restore any block allocations which are pending journal writes. 11771 */ 11772 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 11773 cgp = (struct cg *)bp->b_data; 11774 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11775 blksfree = cg_blksfree(cgp); 11776 LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps, 11777 jntmp) { 11778 if ((jnewblk->jn_state & UNDONE) == 0) 11779 continue; 11780 /* Do the roll-forward only if it's a real copy. */ 11781 if (foreground && 11782 jnewblk_rollforward(jnewblk, fs, cgp, blksfree)) 11783 chgs = 1; 11784 jnewblk->jn_state &= ~(UNDONE | NEWBLOCK); 11785 jnewblk->jn_state |= ATTACHED; 11786 free_jnewblk(jnewblk); 11787 } 11788 } 11789 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) { 11790 newblk->nb_state |= DEPCOMPLETE; 11791 newblk->nb_state &= ~ONDEPLIST; 11792 newblk->nb_bmsafemap = NULL; 11793 LIST_REMOVE(newblk, nb_deps); 11794 if (newblk->nb_list.wk_type == D_ALLOCDIRECT) 11795 handle_allocdirect_partdone( 11796 WK_ALLOCDIRECT(&newblk->nb_list), NULL); 11797 else if (newblk->nb_list.wk_type == D_ALLOCINDIR) 11798 handle_allocindir_partdone( 11799 WK_ALLOCINDIR(&newblk->nb_list)); 11800 else if (newblk->nb_list.wk_type != D_NEWBLK) 11801 panic("handle_written_bmsafemap: Unexpected type: %s", 11802 TYPENAME(newblk->nb_list.wk_type)); 11803 } 11804 while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) { 11805 inodedep->id_state |= DEPCOMPLETE; 11806 inodedep->id_state &= ~ONDEPLIST; 11807 LIST_REMOVE(inodedep, id_deps); 11808 inodedep->id_bmsafemap = NULL; 11809 } 11810 LIST_REMOVE(bmsafemap, sm_next); 11811 if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) && 11812 LIST_EMPTY(&bmsafemap->sm_jnewblkhd) && 11813 LIST_EMPTY(&bmsafemap->sm_newblkhd) && 11814 LIST_EMPTY(&bmsafemap->sm_inodedephd) && 11815 LIST_EMPTY(&bmsafemap->sm_freehd)) { 11816 LIST_REMOVE(bmsafemap, sm_hash); 11817 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 11818 return (0); 11819 } 11820 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 11821 if (foreground) 11822 bdirty(bp); 11823 return (1); 11824} 11825 11826/* 11827 * Try to free a mkdir dependency. 11828 */ 11829static void 11830complete_mkdir(mkdir) 11831 struct mkdir *mkdir; 11832{ 11833 struct diradd *dap; 11834 11835 if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE) 11836 return; 11837 LIST_REMOVE(mkdir, md_mkdirs); 11838 dap = mkdir->md_diradd; 11839 dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 11840 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) { 11841 dap->da_state |= DEPCOMPLETE; 11842 complete_diradd(dap); 11843 } 11844 WORKITEM_FREE(mkdir, D_MKDIR); 11845} 11846 11847/* 11848 * Handle the completion of a mkdir dependency. 11849 */ 11850static void 11851handle_written_mkdir(mkdir, type) 11852 struct mkdir *mkdir; 11853 int type; 11854{ 11855 11856 if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type) 11857 panic("handle_written_mkdir: bad type"); 11858 mkdir->md_state |= COMPLETE; 11859 complete_mkdir(mkdir); 11860} 11861 11862static int 11863free_pagedep(pagedep) 11864 struct pagedep *pagedep; 11865{ 11866 int i; 11867 11868 if (pagedep->pd_state & NEWBLOCK) 11869 return (0); 11870 if (!LIST_EMPTY(&pagedep->pd_dirremhd)) 11871 return (0); 11872 for (i = 0; i < DAHASHSZ; i++) 11873 if (!LIST_EMPTY(&pagedep->pd_diraddhd[i])) 11874 return (0); 11875 if (!LIST_EMPTY(&pagedep->pd_pendinghd)) 11876 return (0); 11877 if (!LIST_EMPTY(&pagedep->pd_jmvrefhd)) 11878 return (0); 11879 if (pagedep->pd_state & ONWORKLIST) 11880 WORKLIST_REMOVE(&pagedep->pd_list); 11881 LIST_REMOVE(pagedep, pd_hash); 11882 WORKITEM_FREE(pagedep, D_PAGEDEP); 11883 11884 return (1); 11885} 11886 11887/* 11888 * Called from within softdep_disk_write_complete above. 11889 * A write operation was just completed. Removed inodes can 11890 * now be freed and associated block pointers may be committed. 11891 * Note that this routine is always called from interrupt level 11892 * with further splbio interrupts blocked. 11893 */ 11894static int 11895handle_written_filepage(pagedep, bp) 11896 struct pagedep *pagedep; 11897 struct buf *bp; /* buffer containing the written page */ 11898{ 11899 struct dirrem *dirrem; 11900 struct diradd *dap, *nextdap; 11901 struct direct *ep; 11902 int i, chgs; 11903 11904 if ((pagedep->pd_state & IOSTARTED) == 0) 11905 panic("handle_written_filepage: not started"); 11906 pagedep->pd_state &= ~IOSTARTED; 11907 /* 11908 * Process any directory removals that have been committed. 11909 */ 11910 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 11911 LIST_REMOVE(dirrem, dm_next); 11912 dirrem->dm_state |= COMPLETE; 11913 dirrem->dm_dirinum = pagedep->pd_ino; 11914 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 11915 ("handle_written_filepage: Journal entries not written.")); 11916 add_to_worklist(&dirrem->dm_list, 0); 11917 } 11918 /* 11919 * Free any directory additions that have been committed. 11920 * If it is a newly allocated block, we have to wait until 11921 * the on-disk directory inode claims the new block. 11922 */ 11923 if ((pagedep->pd_state & NEWBLOCK) == 0) 11924 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 11925 free_diradd(dap, NULL); 11926 /* 11927 * Uncommitted directory entries must be restored. 11928 */ 11929 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 11930 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 11931 dap = nextdap) { 11932 nextdap = LIST_NEXT(dap, da_pdlist); 11933 if (dap->da_state & ATTACHED) 11934 panic("handle_written_filepage: attached"); 11935 ep = (struct direct *) 11936 ((char *)bp->b_data + dap->da_offset); 11937 ep->d_ino = dap->da_newinum; 11938 dap->da_state &= ~UNDONE; 11939 dap->da_state |= ATTACHED; 11940 chgs = 1; 11941 /* 11942 * If the inode referenced by the directory has 11943 * been written out, then the dependency can be 11944 * moved to the pending list. 11945 */ 11946 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 11947 LIST_REMOVE(dap, da_pdlist); 11948 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 11949 da_pdlist); 11950 } 11951 } 11952 } 11953 /* 11954 * If there were any rollbacks in the directory, then it must be 11955 * marked dirty so that its will eventually get written back in 11956 * its correct form. 11957 */ 11958 if (chgs) { 11959 if ((bp->b_flags & B_DELWRI) == 0) 11960 stat_dir_entry++; 11961 bdirty(bp); 11962 return (1); 11963 } 11964 /* 11965 * If we are not waiting for a new directory block to be 11966 * claimed by its inode, then the pagedep will be freed. 11967 * Otherwise it will remain to track any new entries on 11968 * the page in case they are fsync'ed. 11969 */ 11970 free_pagedep(pagedep); 11971 return (0); 11972} 11973 11974/* 11975 * Writing back in-core inode structures. 11976 * 11977 * The filesystem only accesses an inode's contents when it occupies an 11978 * "in-core" inode structure. These "in-core" structures are separate from 11979 * the page frames used to cache inode blocks. Only the latter are 11980 * transferred to/from the disk. So, when the updated contents of the 11981 * "in-core" inode structure are copied to the corresponding in-memory inode 11982 * block, the dependencies are also transferred. The following procedure is 11983 * called when copying a dirty "in-core" inode to a cached inode block. 11984 */ 11985 11986/* 11987 * Called when an inode is loaded from disk. If the effective link count 11988 * differed from the actual link count when it was last flushed, then we 11989 * need to ensure that the correct effective link count is put back. 11990 */ 11991void 11992softdep_load_inodeblock(ip) 11993 struct inode *ip; /* the "in_core" copy of the inode */ 11994{ 11995 struct inodedep *inodedep; 11996 11997 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 11998 ("softdep_load_inodeblock called on non-softdep filesystem")); 11999 /* 12000 * Check for alternate nlink count. 12001 */ 12002 ip->i_effnlink = ip->i_nlink; 12003 ACQUIRE_LOCK(ip->i_ump); 12004 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 12005 &inodedep) == 0) { 12006 FREE_LOCK(ip->i_ump); 12007 return; 12008 } 12009 ip->i_effnlink -= inodedep->id_nlinkdelta; 12010 FREE_LOCK(ip->i_ump); 12011} 12012 12013/* 12014 * This routine is called just before the "in-core" inode 12015 * information is to be copied to the in-memory inode block. 12016 * Recall that an inode block contains several inodes. If 12017 * the force flag is set, then the dependencies will be 12018 * cleared so that the update can always be made. Note that 12019 * the buffer is locked when this routine is called, so we 12020 * will never be in the middle of writing the inode block 12021 * to disk. 12022 */ 12023void 12024softdep_update_inodeblock(ip, bp, waitfor) 12025 struct inode *ip; /* the "in_core" copy of the inode */ 12026 struct buf *bp; /* the buffer containing the inode block */ 12027 int waitfor; /* nonzero => update must be allowed */ 12028{ 12029 struct inodedep *inodedep; 12030 struct inoref *inoref; 12031 struct ufsmount *ump; 12032 struct worklist *wk; 12033 struct mount *mp; 12034 struct buf *ibp; 12035 struct fs *fs; 12036 int error; 12037 12038 ump = ip->i_ump; 12039 mp = UFSTOVFS(ump); 12040 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 12041 ("softdep_update_inodeblock called on non-softdep filesystem")); 12042 fs = ip->i_fs; 12043 /* 12044 * Preserve the freelink that is on disk. clear_unlinked_inodedep() 12045 * does not have access to the in-core ip so must write directly into 12046 * the inode block buffer when setting freelink. 12047 */ 12048 if (fs->fs_magic == FS_UFS1_MAGIC) 12049 DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data + 12050 ino_to_fsbo(fs, ip->i_number))->di_freelink); 12051 else 12052 DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data + 12053 ino_to_fsbo(fs, ip->i_number))->di_freelink); 12054 /* 12055 * If the effective link count is not equal to the actual link 12056 * count, then we must track the difference in an inodedep while 12057 * the inode is (potentially) tossed out of the cache. Otherwise, 12058 * if there is no existing inodedep, then there are no dependencies 12059 * to track. 12060 */ 12061 ACQUIRE_LOCK(ump); 12062again: 12063 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 12064 FREE_LOCK(ump); 12065 if (ip->i_effnlink != ip->i_nlink) 12066 panic("softdep_update_inodeblock: bad link count"); 12067 return; 12068 } 12069 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) 12070 panic("softdep_update_inodeblock: bad delta"); 12071 /* 12072 * If we're flushing all dependencies we must also move any waiting 12073 * for journal writes onto the bufwait list prior to I/O. 12074 */ 12075 if (waitfor) { 12076 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12077 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12078 == DEPCOMPLETE) { 12079 jwait(&inoref->if_list, MNT_WAIT); 12080 goto again; 12081 } 12082 } 12083 } 12084 /* 12085 * Changes have been initiated. Anything depending on these 12086 * changes cannot occur until this inode has been written. 12087 */ 12088 inodedep->id_state &= ~COMPLETE; 12089 if ((inodedep->id_state & ONWORKLIST) == 0) 12090 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 12091 /* 12092 * Any new dependencies associated with the incore inode must 12093 * now be moved to the list associated with the buffer holding 12094 * the in-memory copy of the inode. Once merged process any 12095 * allocdirects that are completed by the merger. 12096 */ 12097 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 12098 if (!TAILQ_EMPTY(&inodedep->id_inoupdt)) 12099 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt), 12100 NULL); 12101 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 12102 if (!TAILQ_EMPTY(&inodedep->id_extupdt)) 12103 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt), 12104 NULL); 12105 /* 12106 * Now that the inode has been pushed into the buffer, the 12107 * operations dependent on the inode being written to disk 12108 * can be moved to the id_bufwait so that they will be 12109 * processed when the buffer I/O completes. 12110 */ 12111 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 12112 WORKLIST_REMOVE(wk); 12113 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 12114 } 12115 /* 12116 * Newly allocated inodes cannot be written until the bitmap 12117 * that allocates them have been written (indicated by 12118 * DEPCOMPLETE being set in id_state). If we are doing a 12119 * forced sync (e.g., an fsync on a file), we force the bitmap 12120 * to be written so that the update can be done. 12121 */ 12122 if (waitfor == 0) { 12123 FREE_LOCK(ump); 12124 return; 12125 } 12126retry: 12127 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) { 12128 FREE_LOCK(ump); 12129 return; 12130 } 12131 ibp = inodedep->id_bmsafemap->sm_buf; 12132 ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT); 12133 if (ibp == NULL) { 12134 /* 12135 * If ibp came back as NULL, the dependency could have been 12136 * freed while we slept. Look it up again, and check to see 12137 * that it has completed. 12138 */ 12139 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 12140 goto retry; 12141 FREE_LOCK(ump); 12142 return; 12143 } 12144 FREE_LOCK(ump); 12145 if ((error = bwrite(ibp)) != 0) 12146 softdep_error("softdep_update_inodeblock: bwrite", error); 12147} 12148 12149/* 12150 * Merge the a new inode dependency list (such as id_newinoupdt) into an 12151 * old inode dependency list (such as id_inoupdt). This routine must be 12152 * called with splbio interrupts blocked. 12153 */ 12154static void 12155merge_inode_lists(newlisthead, oldlisthead) 12156 struct allocdirectlst *newlisthead; 12157 struct allocdirectlst *oldlisthead; 12158{ 12159 struct allocdirect *listadp, *newadp; 12160 12161 newadp = TAILQ_FIRST(newlisthead); 12162 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 12163 if (listadp->ad_offset < newadp->ad_offset) { 12164 listadp = TAILQ_NEXT(listadp, ad_next); 12165 continue; 12166 } 12167 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12168 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 12169 if (listadp->ad_offset == newadp->ad_offset) { 12170 allocdirect_merge(oldlisthead, newadp, 12171 listadp); 12172 listadp = newadp; 12173 } 12174 newadp = TAILQ_FIRST(newlisthead); 12175 } 12176 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 12177 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12178 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 12179 } 12180} 12181 12182/* 12183 * If we are doing an fsync, then we must ensure that any directory 12184 * entries for the inode have been written after the inode gets to disk. 12185 */ 12186int 12187softdep_fsync(vp) 12188 struct vnode *vp; /* the "in_core" copy of the inode */ 12189{ 12190 struct inodedep *inodedep; 12191 struct pagedep *pagedep; 12192 struct inoref *inoref; 12193 struct ufsmount *ump; 12194 struct worklist *wk; 12195 struct diradd *dap; 12196 struct mount *mp; 12197 struct vnode *pvp; 12198 struct inode *ip; 12199 struct buf *bp; 12200 struct fs *fs; 12201 struct thread *td = curthread; 12202 int error, flushparent, pagedep_new_block; 12203 ino_t parentino; 12204 ufs_lbn_t lbn; 12205 12206 ip = VTOI(vp); 12207 fs = ip->i_fs; 12208 ump = ip->i_ump; 12209 mp = vp->v_mount; 12210 if (MOUNTEDSOFTDEP(mp) == 0) 12211 return (0); 12212 ACQUIRE_LOCK(ump); 12213restart: 12214 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 12215 FREE_LOCK(ump); 12216 return (0); 12217 } 12218 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12219 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12220 == DEPCOMPLETE) { 12221 jwait(&inoref->if_list, MNT_WAIT); 12222 goto restart; 12223 } 12224 } 12225 if (!LIST_EMPTY(&inodedep->id_inowait) || 12226 !TAILQ_EMPTY(&inodedep->id_extupdt) || 12227 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 12228 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 12229 !TAILQ_EMPTY(&inodedep->id_newinoupdt)) 12230 panic("softdep_fsync: pending ops %p", inodedep); 12231 for (error = 0, flushparent = 0; ; ) { 12232 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 12233 break; 12234 if (wk->wk_type != D_DIRADD) 12235 panic("softdep_fsync: Unexpected type %s", 12236 TYPENAME(wk->wk_type)); 12237 dap = WK_DIRADD(wk); 12238 /* 12239 * Flush our parent if this directory entry has a MKDIR_PARENT 12240 * dependency or is contained in a newly allocated block. 12241 */ 12242 if (dap->da_state & DIRCHG) 12243 pagedep = dap->da_previous->dm_pagedep; 12244 else 12245 pagedep = dap->da_pagedep; 12246 parentino = pagedep->pd_ino; 12247 lbn = pagedep->pd_lbn; 12248 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) 12249 panic("softdep_fsync: dirty"); 12250 if ((dap->da_state & MKDIR_PARENT) || 12251 (pagedep->pd_state & NEWBLOCK)) 12252 flushparent = 1; 12253 else 12254 flushparent = 0; 12255 /* 12256 * If we are being fsync'ed as part of vgone'ing this vnode, 12257 * then we will not be able to release and recover the 12258 * vnode below, so we just have to give up on writing its 12259 * directory entry out. It will eventually be written, just 12260 * not now, but then the user was not asking to have it 12261 * written, so we are not breaking any promises. 12262 */ 12263 if (vp->v_iflag & VI_DOOMED) 12264 break; 12265 /* 12266 * We prevent deadlock by always fetching inodes from the 12267 * root, moving down the directory tree. Thus, when fetching 12268 * our parent directory, we first try to get the lock. If 12269 * that fails, we must unlock ourselves before requesting 12270 * the lock on our parent. See the comment in ufs_lookup 12271 * for details on possible races. 12272 */ 12273 FREE_LOCK(ump); 12274 if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp, 12275 FFSV_FORCEINSMQ)) { 12276 error = vfs_busy(mp, MBF_NOWAIT); 12277 if (error != 0) { 12278 vfs_ref(mp); 12279 VOP_UNLOCK(vp, 0); 12280 error = vfs_busy(mp, 0); 12281 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 12282 vfs_rel(mp); 12283 if (error != 0) 12284 return (ENOENT); 12285 if (vp->v_iflag & VI_DOOMED) { 12286 vfs_unbusy(mp); 12287 return (ENOENT); 12288 } 12289 } 12290 VOP_UNLOCK(vp, 0); 12291 error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, 12292 &pvp, FFSV_FORCEINSMQ); 12293 vfs_unbusy(mp); 12294 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 12295 if (vp->v_iflag & VI_DOOMED) { 12296 if (error == 0) 12297 vput(pvp); 12298 error = ENOENT; 12299 } 12300 if (error != 0) 12301 return (error); 12302 } 12303 /* 12304 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 12305 * that are contained in direct blocks will be resolved by 12306 * doing a ffs_update. Pagedeps contained in indirect blocks 12307 * may require a complete sync'ing of the directory. So, we 12308 * try the cheap and fast ffs_update first, and if that fails, 12309 * then we do the slower ffs_syncvnode of the directory. 12310 */ 12311 if (flushparent) { 12312 int locked; 12313 12314 if ((error = ffs_update(pvp, 1)) != 0) { 12315 vput(pvp); 12316 return (error); 12317 } 12318 ACQUIRE_LOCK(ump); 12319 locked = 1; 12320 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) { 12321 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) { 12322 if (wk->wk_type != D_DIRADD) 12323 panic("softdep_fsync: Unexpected type %s", 12324 TYPENAME(wk->wk_type)); 12325 dap = WK_DIRADD(wk); 12326 if (dap->da_state & DIRCHG) 12327 pagedep = dap->da_previous->dm_pagedep; 12328 else 12329 pagedep = dap->da_pagedep; 12330 pagedep_new_block = pagedep->pd_state & NEWBLOCK; 12331 FREE_LOCK(ump); 12332 locked = 0; 12333 if (pagedep_new_block && (error = 12334 ffs_syncvnode(pvp, MNT_WAIT, 0))) { 12335 vput(pvp); 12336 return (error); 12337 } 12338 } 12339 } 12340 if (locked) 12341 FREE_LOCK(ump); 12342 } 12343 /* 12344 * Flush directory page containing the inode's name. 12345 */ 12346 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 12347 &bp); 12348 if (error == 0) 12349 error = bwrite(bp); 12350 else 12351 brelse(bp); 12352 vput(pvp); 12353 if (error != 0) 12354 return (error); 12355 ACQUIRE_LOCK(ump); 12356 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 12357 break; 12358 } 12359 FREE_LOCK(ump); 12360 return (0); 12361} 12362 12363/* 12364 * Flush all the dirty bitmaps associated with the block device 12365 * before flushing the rest of the dirty blocks so as to reduce 12366 * the number of dependencies that will have to be rolled back. 12367 * 12368 * XXX Unused? 12369 */ 12370void 12371softdep_fsync_mountdev(vp) 12372 struct vnode *vp; 12373{ 12374 struct buf *bp, *nbp; 12375 struct worklist *wk; 12376 struct bufobj *bo; 12377 12378 if (!vn_isdisk(vp, NULL)) 12379 panic("softdep_fsync_mountdev: vnode not a disk"); 12380 bo = &vp->v_bufobj; 12381restart: 12382 BO_LOCK(bo); 12383 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 12384 /* 12385 * If it is already scheduled, skip to the next buffer. 12386 */ 12387 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 12388 continue; 12389 12390 if ((bp->b_flags & B_DELWRI) == 0) 12391 panic("softdep_fsync_mountdev: not dirty"); 12392 /* 12393 * We are only interested in bitmaps with outstanding 12394 * dependencies. 12395 */ 12396 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 12397 wk->wk_type != D_BMSAFEMAP || 12398 (bp->b_vflags & BV_BKGRDINPROG)) { 12399 BUF_UNLOCK(bp); 12400 continue; 12401 } 12402 BO_UNLOCK(bo); 12403 bremfree(bp); 12404 (void) bawrite(bp); 12405 goto restart; 12406 } 12407 drain_output(vp); 12408 BO_UNLOCK(bo); 12409} 12410 12411/* 12412 * Sync all cylinder groups that were dirty at the time this function is 12413 * called. Newly dirtied cgs will be inserted before the sentinel. This 12414 * is used to flush freedep activity that may be holding up writes to a 12415 * indirect block. 12416 */ 12417static int 12418sync_cgs(mp, waitfor) 12419 struct mount *mp; 12420 int waitfor; 12421{ 12422 struct bmsafemap *bmsafemap; 12423 struct bmsafemap *sentinel; 12424 struct ufsmount *ump; 12425 struct buf *bp; 12426 int error; 12427 12428 sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK); 12429 sentinel->sm_cg = -1; 12430 ump = VFSTOUFS(mp); 12431 error = 0; 12432 ACQUIRE_LOCK(ump); 12433 LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next); 12434 for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL; 12435 bmsafemap = LIST_NEXT(sentinel, sm_next)) { 12436 /* Skip sentinels and cgs with no work to release. */ 12437 if (bmsafemap->sm_cg == -1 || 12438 (LIST_EMPTY(&bmsafemap->sm_freehd) && 12439 LIST_EMPTY(&bmsafemap->sm_freewr))) { 12440 LIST_REMOVE(sentinel, sm_next); 12441 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12442 continue; 12443 } 12444 /* 12445 * If we don't get the lock and we're waiting try again, if 12446 * not move on to the next buf and try to sync it. 12447 */ 12448 bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor); 12449 if (bp == NULL && waitfor == MNT_WAIT) 12450 continue; 12451 LIST_REMOVE(sentinel, sm_next); 12452 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12453 if (bp == NULL) 12454 continue; 12455 FREE_LOCK(ump); 12456 if (waitfor == MNT_NOWAIT) 12457 bawrite(bp); 12458 else 12459 error = bwrite(bp); 12460 ACQUIRE_LOCK(ump); 12461 if (error) 12462 break; 12463 } 12464 LIST_REMOVE(sentinel, sm_next); 12465 FREE_LOCK(ump); 12466 free(sentinel, M_BMSAFEMAP); 12467 return (error); 12468} 12469 12470/* 12471 * This routine is called when we are trying to synchronously flush a 12472 * file. This routine must eliminate any filesystem metadata dependencies 12473 * so that the syncing routine can succeed. 12474 */ 12475int 12476softdep_sync_metadata(struct vnode *vp) 12477{ 12478 struct inode *ip; 12479 int error; 12480 12481 ip = VTOI(vp); 12482 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 12483 ("softdep_sync_metadata called on non-softdep filesystem")); 12484 /* 12485 * Ensure that any direct block dependencies have been cleared, 12486 * truncations are started, and inode references are journaled. 12487 */ 12488 ACQUIRE_LOCK(ip->i_ump); 12489 /* 12490 * Write all journal records to prevent rollbacks on devvp. 12491 */ 12492 if (vp->v_type == VCHR) 12493 softdep_flushjournal(vp->v_mount); 12494 error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number); 12495 /* 12496 * Ensure that all truncates are written so we won't find deps on 12497 * indirect blocks. 12498 */ 12499 process_truncates(vp); 12500 FREE_LOCK(ip->i_ump); 12501 12502 return (error); 12503} 12504 12505/* 12506 * This routine is called when we are attempting to sync a buf with 12507 * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any 12508 * other IO it can but returns EBUSY if the buffer is not yet able to 12509 * be written. Dependencies which will not cause rollbacks will always 12510 * return 0. 12511 */ 12512int 12513softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 12514{ 12515 struct indirdep *indirdep; 12516 struct pagedep *pagedep; 12517 struct allocindir *aip; 12518 struct newblk *newblk; 12519 struct ufsmount *ump; 12520 struct buf *nbp; 12521 struct worklist *wk; 12522 int i, error; 12523 12524 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 12525 ("softdep_sync_buf called on non-softdep filesystem")); 12526 /* 12527 * For VCHR we just don't want to force flush any dependencies that 12528 * will cause rollbacks. 12529 */ 12530 if (vp->v_type == VCHR) { 12531 if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0)) 12532 return (EBUSY); 12533 return (0); 12534 } 12535 ump = VTOI(vp)->i_ump; 12536 ACQUIRE_LOCK(ump); 12537 /* 12538 * As we hold the buffer locked, none of its dependencies 12539 * will disappear. 12540 */ 12541 error = 0; 12542top: 12543 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 12544 switch (wk->wk_type) { 12545 12546 case D_ALLOCDIRECT: 12547 case D_ALLOCINDIR: 12548 newblk = WK_NEWBLK(wk); 12549 if (newblk->nb_jnewblk != NULL) { 12550 if (waitfor == MNT_NOWAIT) { 12551 error = EBUSY; 12552 goto out_unlock; 12553 } 12554 jwait(&newblk->nb_jnewblk->jn_list, waitfor); 12555 goto top; 12556 } 12557 if (newblk->nb_state & DEPCOMPLETE || 12558 waitfor == MNT_NOWAIT) 12559 continue; 12560 nbp = newblk->nb_bmsafemap->sm_buf; 12561 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12562 if (nbp == NULL) 12563 goto top; 12564 FREE_LOCK(ump); 12565 if ((error = bwrite(nbp)) != 0) 12566 goto out; 12567 ACQUIRE_LOCK(ump); 12568 continue; 12569 12570 case D_INDIRDEP: 12571 indirdep = WK_INDIRDEP(wk); 12572 if (waitfor == MNT_NOWAIT) { 12573 if (!TAILQ_EMPTY(&indirdep->ir_trunc) || 12574 !LIST_EMPTY(&indirdep->ir_deplisthd)) { 12575 error = EBUSY; 12576 goto out_unlock; 12577 } 12578 } 12579 if (!TAILQ_EMPTY(&indirdep->ir_trunc)) 12580 panic("softdep_sync_buf: truncation pending."); 12581 restart: 12582 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 12583 newblk = (struct newblk *)aip; 12584 if (newblk->nb_jnewblk != NULL) { 12585 jwait(&newblk->nb_jnewblk->jn_list, 12586 waitfor); 12587 goto restart; 12588 } 12589 if (newblk->nb_state & DEPCOMPLETE) 12590 continue; 12591 nbp = newblk->nb_bmsafemap->sm_buf; 12592 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12593 if (nbp == NULL) 12594 goto restart; 12595 FREE_LOCK(ump); 12596 if ((error = bwrite(nbp)) != 0) 12597 goto out; 12598 ACQUIRE_LOCK(ump); 12599 goto restart; 12600 } 12601 continue; 12602 12603 case D_PAGEDEP: 12604 /* 12605 * Only flush directory entries in synchronous passes. 12606 */ 12607 if (waitfor != MNT_WAIT) { 12608 error = EBUSY; 12609 goto out_unlock; 12610 } 12611 /* 12612 * While syncing snapshots, we must allow recursive 12613 * lookups. 12614 */ 12615 BUF_AREC(bp); 12616 /* 12617 * We are trying to sync a directory that may 12618 * have dependencies on both its own metadata 12619 * and/or dependencies on the inodes of any 12620 * recently allocated files. We walk its diradd 12621 * lists pushing out the associated inode. 12622 */ 12623 pagedep = WK_PAGEDEP(wk); 12624 for (i = 0; i < DAHASHSZ; i++) { 12625 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 12626 continue; 12627 if ((error = flush_pagedep_deps(vp, wk->wk_mp, 12628 &pagedep->pd_diraddhd[i]))) { 12629 BUF_NOREC(bp); 12630 goto out_unlock; 12631 } 12632 } 12633 BUF_NOREC(bp); 12634 continue; 12635 12636 case D_FREEWORK: 12637 case D_FREEDEP: 12638 case D_JSEGDEP: 12639 case D_JNEWBLK: 12640 continue; 12641 12642 default: 12643 panic("softdep_sync_buf: Unknown type %s", 12644 TYPENAME(wk->wk_type)); 12645 /* NOTREACHED */ 12646 } 12647 } 12648out_unlock: 12649 FREE_LOCK(ump); 12650out: 12651 return (error); 12652} 12653 12654/* 12655 * Flush the dependencies associated with an inodedep. 12656 * Called with splbio blocked. 12657 */ 12658static int 12659flush_inodedep_deps(vp, mp, ino) 12660 struct vnode *vp; 12661 struct mount *mp; 12662 ino_t ino; 12663{ 12664 struct inodedep *inodedep; 12665 struct inoref *inoref; 12666 struct ufsmount *ump; 12667 int error, waitfor; 12668 12669 /* 12670 * This work is done in two passes. The first pass grabs most 12671 * of the buffers and begins asynchronously writing them. The 12672 * only way to wait for these asynchronous writes is to sleep 12673 * on the filesystem vnode which may stay busy for a long time 12674 * if the filesystem is active. So, instead, we make a second 12675 * pass over the dependencies blocking on each write. In the 12676 * usual case we will be blocking against a write that we 12677 * initiated, so when it is done the dependency will have been 12678 * resolved. Thus the second pass is expected to end quickly. 12679 * We give a brief window at the top of the loop to allow 12680 * any pending I/O to complete. 12681 */ 12682 ump = VFSTOUFS(mp); 12683 LOCK_OWNED(ump); 12684 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 12685 if (error) 12686 return (error); 12687 FREE_LOCK(ump); 12688 ACQUIRE_LOCK(ump); 12689restart: 12690 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 12691 return (0); 12692 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12693 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12694 == DEPCOMPLETE) { 12695 jwait(&inoref->if_list, MNT_WAIT); 12696 goto restart; 12697 } 12698 } 12699 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 12700 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 12701 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 12702 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 12703 continue; 12704 /* 12705 * If pass2, we are done, otherwise do pass 2. 12706 */ 12707 if (waitfor == MNT_WAIT) 12708 break; 12709 waitfor = MNT_WAIT; 12710 } 12711 /* 12712 * Try freeing inodedep in case all dependencies have been removed. 12713 */ 12714 if (inodedep_lookup(mp, ino, 0, &inodedep) != 0) 12715 (void) free_inodedep(inodedep); 12716 return (0); 12717} 12718 12719/* 12720 * Flush an inode dependency list. 12721 * Called with splbio blocked. 12722 */ 12723static int 12724flush_deplist(listhead, waitfor, errorp) 12725 struct allocdirectlst *listhead; 12726 int waitfor; 12727 int *errorp; 12728{ 12729 struct allocdirect *adp; 12730 struct newblk *newblk; 12731 struct ufsmount *ump; 12732 struct buf *bp; 12733 12734 if ((adp = TAILQ_FIRST(listhead)) == NULL) 12735 return (0); 12736 ump = VFSTOUFS(adp->ad_list.wk_mp); 12737 LOCK_OWNED(ump); 12738 TAILQ_FOREACH(adp, listhead, ad_next) { 12739 newblk = (struct newblk *)adp; 12740 if (newblk->nb_jnewblk != NULL) { 12741 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12742 return (1); 12743 } 12744 if (newblk->nb_state & DEPCOMPLETE) 12745 continue; 12746 bp = newblk->nb_bmsafemap->sm_buf; 12747 bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor); 12748 if (bp == NULL) { 12749 if (waitfor == MNT_NOWAIT) 12750 continue; 12751 return (1); 12752 } 12753 FREE_LOCK(ump); 12754 if (waitfor == MNT_NOWAIT) 12755 bawrite(bp); 12756 else 12757 *errorp = bwrite(bp); 12758 ACQUIRE_LOCK(ump); 12759 return (1); 12760 } 12761 return (0); 12762} 12763 12764/* 12765 * Flush dependencies associated with an allocdirect block. 12766 */ 12767static int 12768flush_newblk_dep(vp, mp, lbn) 12769 struct vnode *vp; 12770 struct mount *mp; 12771 ufs_lbn_t lbn; 12772{ 12773 struct newblk *newblk; 12774 struct ufsmount *ump; 12775 struct bufobj *bo; 12776 struct inode *ip; 12777 struct buf *bp; 12778 ufs2_daddr_t blkno; 12779 int error; 12780 12781 error = 0; 12782 bo = &vp->v_bufobj; 12783 ip = VTOI(vp); 12784 blkno = DIP(ip, i_db[lbn]); 12785 if (blkno == 0) 12786 panic("flush_newblk_dep: Missing block"); 12787 ump = VFSTOUFS(mp); 12788 ACQUIRE_LOCK(ump); 12789 /* 12790 * Loop until all dependencies related to this block are satisfied. 12791 * We must be careful to restart after each sleep in case a write 12792 * completes some part of this process for us. 12793 */ 12794 for (;;) { 12795 if (newblk_lookup(mp, blkno, 0, &newblk) == 0) { 12796 FREE_LOCK(ump); 12797 break; 12798 } 12799 if (newblk->nb_list.wk_type != D_ALLOCDIRECT) 12800 panic("flush_newblk_deps: Bad newblk %p", newblk); 12801 /* 12802 * Flush the journal. 12803 */ 12804 if (newblk->nb_jnewblk != NULL) { 12805 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12806 continue; 12807 } 12808 /* 12809 * Write the bitmap dependency. 12810 */ 12811 if ((newblk->nb_state & DEPCOMPLETE) == 0) { 12812 bp = newblk->nb_bmsafemap->sm_buf; 12813 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 12814 if (bp == NULL) 12815 continue; 12816 FREE_LOCK(ump); 12817 error = bwrite(bp); 12818 if (error) 12819 break; 12820 ACQUIRE_LOCK(ump); 12821 continue; 12822 } 12823 /* 12824 * Write the buffer. 12825 */ 12826 FREE_LOCK(ump); 12827 BO_LOCK(bo); 12828 bp = gbincore(bo, lbn); 12829 if (bp != NULL) { 12830 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 12831 LK_INTERLOCK, BO_LOCKPTR(bo)); 12832 if (error == ENOLCK) { 12833 ACQUIRE_LOCK(ump); 12834 continue; /* Slept, retry */ 12835 } 12836 if (error != 0) 12837 break; /* Failed */ 12838 if (bp->b_flags & B_DELWRI) { 12839 bremfree(bp); 12840 error = bwrite(bp); 12841 if (error) 12842 break; 12843 } else 12844 BUF_UNLOCK(bp); 12845 } else 12846 BO_UNLOCK(bo); 12847 /* 12848 * We have to wait for the direct pointers to 12849 * point at the newdirblk before the dependency 12850 * will go away. 12851 */ 12852 error = ffs_update(vp, 1); 12853 if (error) 12854 break; 12855 ACQUIRE_LOCK(ump); 12856 } 12857 return (error); 12858} 12859 12860/* 12861 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 12862 * Called with splbio blocked. 12863 */ 12864static int 12865flush_pagedep_deps(pvp, mp, diraddhdp) 12866 struct vnode *pvp; 12867 struct mount *mp; 12868 struct diraddhd *diraddhdp; 12869{ 12870 struct inodedep *inodedep; 12871 struct inoref *inoref; 12872 struct ufsmount *ump; 12873 struct diradd *dap; 12874 struct vnode *vp; 12875 int error = 0; 12876 struct buf *bp; 12877 ino_t inum; 12878 struct diraddhd unfinished; 12879 12880 LIST_INIT(&unfinished); 12881 ump = VFSTOUFS(mp); 12882 LOCK_OWNED(ump); 12883restart: 12884 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 12885 /* 12886 * Flush ourselves if this directory entry 12887 * has a MKDIR_PARENT dependency. 12888 */ 12889 if (dap->da_state & MKDIR_PARENT) { 12890 FREE_LOCK(ump); 12891 if ((error = ffs_update(pvp, 1)) != 0) 12892 break; 12893 ACQUIRE_LOCK(ump); 12894 /* 12895 * If that cleared dependencies, go on to next. 12896 */ 12897 if (dap != LIST_FIRST(diraddhdp)) 12898 continue; 12899 /* 12900 * All MKDIR_PARENT dependencies and all the 12901 * NEWBLOCK pagedeps that are contained in direct 12902 * blocks were resolved by doing above ffs_update. 12903 * Pagedeps contained in indirect blocks may 12904 * require a complete sync'ing of the directory. 12905 * We are in the midst of doing a complete sync, 12906 * so if they are not resolved in this pass we 12907 * defer them for now as they will be sync'ed by 12908 * our caller shortly. 12909 */ 12910 LIST_REMOVE(dap, da_pdlist); 12911 LIST_INSERT_HEAD(&unfinished, dap, da_pdlist); 12912 continue; 12913 } 12914 /* 12915 * A newly allocated directory must have its "." and 12916 * ".." entries written out before its name can be 12917 * committed in its parent. 12918 */ 12919 inum = dap->da_newinum; 12920 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12921 panic("flush_pagedep_deps: lost inode1"); 12922 /* 12923 * Wait for any pending journal adds to complete so we don't 12924 * cause rollbacks while syncing. 12925 */ 12926 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12927 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12928 == DEPCOMPLETE) { 12929 jwait(&inoref->if_list, MNT_WAIT); 12930 goto restart; 12931 } 12932 } 12933 if (dap->da_state & MKDIR_BODY) { 12934 FREE_LOCK(ump); 12935 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 12936 FFSV_FORCEINSMQ))) 12937 break; 12938 error = flush_newblk_dep(vp, mp, 0); 12939 /* 12940 * If we still have the dependency we might need to 12941 * update the vnode to sync the new link count to 12942 * disk. 12943 */ 12944 if (error == 0 && dap == LIST_FIRST(diraddhdp)) 12945 error = ffs_update(vp, 1); 12946 vput(vp); 12947 if (error != 0) 12948 break; 12949 ACQUIRE_LOCK(ump); 12950 /* 12951 * If that cleared dependencies, go on to next. 12952 */ 12953 if (dap != LIST_FIRST(diraddhdp)) 12954 continue; 12955 if (dap->da_state & MKDIR_BODY) { 12956 inodedep_lookup(UFSTOVFS(ump), inum, 0, 12957 &inodedep); 12958 panic("flush_pagedep_deps: MKDIR_BODY " 12959 "inodedep %p dap %p vp %p", 12960 inodedep, dap, vp); 12961 } 12962 } 12963 /* 12964 * Flush the inode on which the directory entry depends. 12965 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 12966 * the only remaining dependency is that the updated inode 12967 * count must get pushed to disk. The inode has already 12968 * been pushed into its inode buffer (via VOP_UPDATE) at 12969 * the time of the reference count change. So we need only 12970 * locate that buffer, ensure that there will be no rollback 12971 * caused by a bitmap dependency, then write the inode buffer. 12972 */ 12973retry: 12974 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12975 panic("flush_pagedep_deps: lost inode"); 12976 /* 12977 * If the inode still has bitmap dependencies, 12978 * push them to disk. 12979 */ 12980 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) { 12981 bp = inodedep->id_bmsafemap->sm_buf; 12982 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 12983 if (bp == NULL) 12984 goto retry; 12985 FREE_LOCK(ump); 12986 if ((error = bwrite(bp)) != 0) 12987 break; 12988 ACQUIRE_LOCK(ump); 12989 if (dap != LIST_FIRST(diraddhdp)) 12990 continue; 12991 } 12992 /* 12993 * If the inode is still sitting in a buffer waiting 12994 * to be written or waiting for the link count to be 12995 * adjusted update it here to flush it to disk. 12996 */ 12997 if (dap == LIST_FIRST(diraddhdp)) { 12998 FREE_LOCK(ump); 12999 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 13000 FFSV_FORCEINSMQ))) 13001 break; 13002 error = ffs_update(vp, 1); 13003 vput(vp); 13004 if (error) 13005 break; 13006 ACQUIRE_LOCK(ump); 13007 } 13008 /* 13009 * If we have failed to get rid of all the dependencies 13010 * then something is seriously wrong. 13011 */ 13012 if (dap == LIST_FIRST(diraddhdp)) { 13013 inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); 13014 panic("flush_pagedep_deps: failed to flush " 13015 "inodedep %p ino %ju dap %p", 13016 inodedep, (uintmax_t)inum, dap); 13017 } 13018 } 13019 if (error) 13020 ACQUIRE_LOCK(ump); 13021 while ((dap = LIST_FIRST(&unfinished)) != NULL) { 13022 LIST_REMOVE(dap, da_pdlist); 13023 LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist); 13024 } 13025 return (error); 13026} 13027 13028/* 13029 * A large burst of file addition or deletion activity can drive the 13030 * memory load excessively high. First attempt to slow things down 13031 * using the techniques below. If that fails, this routine requests 13032 * the offending operations to fall back to running synchronously 13033 * until the memory load returns to a reasonable level. 13034 */ 13035int 13036softdep_slowdown(vp) 13037 struct vnode *vp; 13038{ 13039 struct ufsmount *ump; 13040 int jlow; 13041 int max_softdeps_hard; 13042 13043 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 13044 ("softdep_slowdown called on non-softdep filesystem")); 13045 ump = VFSTOUFS(vp->v_mount); 13046 ACQUIRE_LOCK(ump); 13047 jlow = 0; 13048 /* 13049 * Check for journal space if needed. 13050 */ 13051 if (DOINGSUJ(vp)) { 13052 if (journal_space(ump, 0) == 0) 13053 jlow = 1; 13054 } 13055 /* 13056 * If the system is under its limits and our filesystem is 13057 * not responsible for more than our share of the usage and 13058 * we are not low on journal space, then no need to slow down. 13059 */ 13060 max_softdeps_hard = max_softdeps * 11 / 10; 13061 if (dep_current[D_DIRREM] < max_softdeps_hard / 2 && 13062 dep_current[D_INODEDEP] < max_softdeps_hard && 13063 dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 && 13064 dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 && 13065 ump->softdep_curdeps[D_DIRREM] < 13066 (max_softdeps_hard / 2) / stat_flush_threads && 13067 ump->softdep_curdeps[D_INODEDEP] < 13068 max_softdeps_hard / stat_flush_threads && 13069 ump->softdep_curdeps[D_INDIRDEP] < 13070 (max_softdeps_hard / 1000) / stat_flush_threads && 13071 ump->softdep_curdeps[D_FREEBLKS] < 13072 max_softdeps_hard / stat_flush_threads) { 13073 FREE_LOCK(ump); 13074 return (0); 13075 } 13076 /* 13077 * If the journal is low or our filesystem is over its limit 13078 * then speedup the cleanup. 13079 */ 13080 if (ump->softdep_curdeps[D_INDIRDEP] < 13081 (max_softdeps_hard / 1000) / stat_flush_threads || jlow) 13082 softdep_speedup(ump); 13083 stat_sync_limit_hit += 1; 13084 FREE_LOCK(ump); 13085 /* 13086 * We only slow down the rate at which new dependencies are 13087 * generated if we are not using journaling. With journaling, 13088 * the cleanup should always be sufficient to keep things 13089 * under control. 13090 */ 13091 if (DOINGSUJ(vp)) 13092 return (0); 13093 return (1); 13094} 13095 13096/* 13097 * Called by the allocation routines when they are about to fail 13098 * in the hope that we can free up the requested resource (inodes 13099 * or disk space). 13100 * 13101 * First check to see if the work list has anything on it. If it has, 13102 * clean up entries until we successfully free the requested resource. 13103 * Because this process holds inodes locked, we cannot handle any remove 13104 * requests that might block on a locked inode as that could lead to 13105 * deadlock. If the worklist yields none of the requested resource, 13106 * start syncing out vnodes to free up the needed space. 13107 */ 13108int 13109softdep_request_cleanup(fs, vp, cred, resource) 13110 struct fs *fs; 13111 struct vnode *vp; 13112 struct ucred *cred; 13113 int resource; 13114{ 13115 struct ufsmount *ump; 13116 struct mount *mp; 13117 struct vnode *lvp, *mvp; 13118 long starttime; 13119 ufs2_daddr_t needed; 13120 int error; 13121 13122 /* 13123 * If we are being called because of a process doing a 13124 * copy-on-write, then it is not safe to process any 13125 * worklist items as we will recurse into the copyonwrite 13126 * routine. This will result in an incoherent snapshot. 13127 * If the vnode that we hold is a snapshot, we must avoid 13128 * handling other resources that could cause deadlock. 13129 */ 13130 if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp))) 13131 return (0); 13132 13133 if (resource == FLUSH_BLOCKS_WAIT) 13134 stat_cleanup_blkrequests += 1; 13135 else 13136 stat_cleanup_inorequests += 1; 13137 13138 mp = vp->v_mount; 13139 ump = VFSTOUFS(mp); 13140 mtx_assert(UFS_MTX(ump), MA_OWNED); 13141 UFS_UNLOCK(ump); 13142 error = ffs_update(vp, 1); 13143 if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) { 13144 UFS_LOCK(ump); 13145 return (0); 13146 } 13147 /* 13148 * If we are in need of resources, start by cleaning up 13149 * any block removals associated with our inode. 13150 */ 13151 ACQUIRE_LOCK(ump); 13152 process_removes(vp); 13153 process_truncates(vp); 13154 FREE_LOCK(ump); 13155 /* 13156 * Now clean up at least as many resources as we will need. 13157 * 13158 * When requested to clean up inodes, the number that are needed 13159 * is set by the number of simultaneous writers (mnt_writeopcount) 13160 * plus a bit of slop (2) in case some more writers show up while 13161 * we are cleaning. 13162 * 13163 * When requested to free up space, the amount of space that 13164 * we need is enough blocks to allocate a full-sized segment 13165 * (fs_contigsumsize). The number of such segments that will 13166 * be needed is set by the number of simultaneous writers 13167 * (mnt_writeopcount) plus a bit of slop (2) in case some more 13168 * writers show up while we are cleaning. 13169 * 13170 * Additionally, if we are unpriviledged and allocating space, 13171 * we need to ensure that we clean up enough blocks to get the 13172 * needed number of blocks over the threshhold of the minimum 13173 * number of blocks required to be kept free by the filesystem 13174 * (fs_minfree). 13175 */ 13176 if (resource == FLUSH_INODES_WAIT) { 13177 needed = vp->v_mount->mnt_writeopcount + 2; 13178 } else if (resource == FLUSH_BLOCKS_WAIT) { 13179 needed = (vp->v_mount->mnt_writeopcount + 2) * 13180 fs->fs_contigsumsize; 13181 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0)) 13182 needed += fragstoblks(fs, 13183 roundup((fs->fs_dsize * fs->fs_minfree / 100) - 13184 fs->fs_cstotal.cs_nffree, fs->fs_frag)); 13185 } else { 13186 UFS_LOCK(ump); 13187 printf("softdep_request_cleanup: Unknown resource type %d\n", 13188 resource); 13189 return (0); 13190 } 13191 starttime = time_second; 13192retry: 13193 if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 && 13194 fs->fs_cstotal.cs_nbfree <= needed) || 13195 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13196 fs->fs_cstotal.cs_nifree <= needed)) { 13197 ACQUIRE_LOCK(ump); 13198 if (ump->softdep_on_worklist > 0 && 13199 process_worklist_item(UFSTOVFS(ump), 13200 ump->softdep_on_worklist, LK_NOWAIT) != 0) 13201 stat_worklist_push += 1; 13202 FREE_LOCK(ump); 13203 } 13204 /* 13205 * If we still need resources and there are no more worklist 13206 * entries to process to obtain them, we have to start flushing 13207 * the dirty vnodes to force the release of additional requests 13208 * to the worklist that we can then process to reap addition 13209 * resources. We walk the vnodes associated with the mount point 13210 * until we get the needed worklist requests that we can reap. 13211 */ 13212 if ((resource == FLUSH_BLOCKS_WAIT && 13213 fs->fs_cstotal.cs_nbfree <= needed) || 13214 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13215 fs->fs_cstotal.cs_nifree <= needed)) { 13216 MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { 13217 if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { 13218 VI_UNLOCK(lvp); 13219 continue; 13220 } 13221 if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, 13222 curthread)) 13223 continue; 13224 if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ 13225 vput(lvp); 13226 continue; 13227 } 13228 (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); 13229 vput(lvp); 13230 } 13231 lvp = ump->um_devvp; 13232 if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 13233 VOP_FSYNC(lvp, MNT_NOWAIT, curthread); 13234 VOP_UNLOCK(lvp, 0); 13235 } 13236 if (ump->softdep_on_worklist > 0) { 13237 stat_cleanup_retries += 1; 13238 goto retry; 13239 } 13240 stat_cleanup_failures += 1; 13241 } 13242 if (time_second - starttime > stat_cleanup_high_delay) 13243 stat_cleanup_high_delay = time_second - starttime; 13244 UFS_LOCK(ump); 13245 return (1); 13246} 13247 13248static bool 13249softdep_excess_inodes(struct ufsmount *ump) 13250{ 13251 13252 return (dep_current[D_INODEDEP] > max_softdeps && 13253 ump->softdep_curdeps[D_INODEDEP] > max_softdeps / 13254 stat_flush_threads); 13255} 13256 13257static bool 13258softdep_excess_dirrem(struct ufsmount *ump) 13259{ 13260 13261 return (dep_current[D_DIRREM] > max_softdeps / 2 && 13262 ump->softdep_curdeps[D_DIRREM] > (max_softdeps / 2) / 13263 stat_flush_threads); 13264} 13265 13266static void 13267schedule_cleanup(struct mount *mp) 13268{ 13269 struct ufsmount *ump; 13270 struct thread *td; 13271 13272 ump = VFSTOUFS(mp); 13273 LOCK_OWNED(ump); 13274 FREE_LOCK(ump); 13275 td = curthread; 13276 if ((td->td_pflags & TDP_KTHREAD) != 0 && 13277 (td->td_proc->p_flag2 & P2_AST_SU) == 0) { 13278 /* 13279 * No ast is delivered to kernel threads, so nobody 13280 * would deref the mp. Some kernel threads 13281 * explicitely check for AST, e.g. NFS daemon does 13282 * this in the serving loop. 13283 */ 13284 return; 13285 } 13286 if (td->td_su != NULL) 13287 vfs_rel(td->td_su); 13288 vfs_ref(mp); 13289 td->td_su = mp; 13290 thread_lock(td); 13291 td->td_flags |= TDF_ASTPENDING; 13292 thread_unlock(td); 13293} 13294 13295static void 13296softdep_ast_cleanup_proc(void) 13297{ 13298 struct thread *td; 13299 struct mount *mp; 13300 struct ufsmount *ump; 13301 int error; 13302 bool req; 13303 13304 td = curthread; 13305 mp = td->td_su; 13306 if (mp == NULL) 13307 return; 13308 td->td_su = NULL; 13309 error = vfs_busy(mp, MBF_NOWAIT); 13310 vfs_rel(mp); 13311 if (error != 0) 13312 return; 13313 if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) { 13314 ump = VFSTOUFS(mp); 13315 for (;;) { 13316 req = false; 13317 ACQUIRE_LOCK(ump); 13318 if (softdep_excess_inodes(ump)) { 13319 req = true; 13320 request_cleanup(mp, FLUSH_INODES); 13321 } 13322 if (softdep_excess_dirrem(ump)) { 13323 req = true; 13324 request_cleanup(mp, FLUSH_BLOCKS); 13325 } 13326 FREE_LOCK(ump); 13327 if ((td->td_pflags & TDP_KTHREAD) != 0 || !req) 13328 break; 13329 } 13330 } 13331 vfs_unbusy(mp); 13332} 13333 13334/* 13335 * If memory utilization has gotten too high, deliberately slow things 13336 * down and speed up the I/O processing. 13337 */ 13338static int 13339request_cleanup(mp, resource) 13340 struct mount *mp; 13341 int resource; 13342{ 13343 struct thread *td = curthread; 13344 struct ufsmount *ump; 13345 13346 ump = VFSTOUFS(mp); 13347 LOCK_OWNED(ump); 13348 /* 13349 * We never hold up the filesystem syncer or buf daemon. 13350 */ 13351 if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF)) 13352 return (0); 13353 /* 13354 * First check to see if the work list has gotten backlogged. 13355 * If it has, co-opt this process to help clean up two entries. 13356 * Because this process may hold inodes locked, we cannot 13357 * handle any remove requests that might block on a locked 13358 * inode as that could lead to deadlock. We set TDP_SOFTDEP 13359 * to avoid recursively processing the worklist. 13360 */ 13361 if (ump->softdep_on_worklist > max_softdeps / 10) { 13362 td->td_pflags |= TDP_SOFTDEP; 13363 process_worklist_item(mp, 2, LK_NOWAIT); 13364 td->td_pflags &= ~TDP_SOFTDEP; 13365 stat_worklist_push += 2; 13366 return(1); 13367 } 13368 /* 13369 * Next, we attempt to speed up the syncer process. If that 13370 * is successful, then we allow the process to continue. 13371 */ 13372 if (softdep_speedup(ump) && 13373 resource != FLUSH_BLOCKS_WAIT && 13374 resource != FLUSH_INODES_WAIT) 13375 return(0); 13376 /* 13377 * If we are resource constrained on inode dependencies, try 13378 * flushing some dirty inodes. Otherwise, we are constrained 13379 * by file deletions, so try accelerating flushes of directories 13380 * with removal dependencies. We would like to do the cleanup 13381 * here, but we probably hold an inode locked at this point and 13382 * that might deadlock against one that we try to clean. So, 13383 * the best that we can do is request the syncer daemon to do 13384 * the cleanup for us. 13385 */ 13386 switch (resource) { 13387 13388 case FLUSH_INODES: 13389 case FLUSH_INODES_WAIT: 13390 ACQUIRE_GBLLOCK(&lk); 13391 stat_ino_limit_push += 1; 13392 req_clear_inodedeps += 1; 13393 FREE_GBLLOCK(&lk); 13394 stat_countp = &stat_ino_limit_hit; 13395 break; 13396 13397 case FLUSH_BLOCKS: 13398 case FLUSH_BLOCKS_WAIT: 13399 ACQUIRE_GBLLOCK(&lk); 13400 stat_blk_limit_push += 1; 13401 req_clear_remove += 1; 13402 FREE_GBLLOCK(&lk); 13403 stat_countp = &stat_blk_limit_hit; 13404 break; 13405 13406 default: 13407 panic("request_cleanup: unknown type"); 13408 } 13409 /* 13410 * Hopefully the syncer daemon will catch up and awaken us. 13411 * We wait at most tickdelay before proceeding in any case. 13412 */ 13413 ACQUIRE_GBLLOCK(&lk); 13414 FREE_LOCK(ump); 13415 proc_waiting += 1; 13416 if (callout_pending(&softdep_callout) == FALSE) 13417 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, 13418 pause_timer, 0); 13419 13420 if ((td->td_pflags & TDP_KTHREAD) == 0) 13421 msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); 13422 proc_waiting -= 1; 13423 FREE_GBLLOCK(&lk); 13424 ACQUIRE_LOCK(ump); 13425 return (1); 13426} 13427 13428/* 13429 * Awaken processes pausing in request_cleanup and clear proc_waiting 13430 * to indicate that there is no longer a timer running. Pause_timer 13431 * will be called with the global softdep mutex (&lk) locked. 13432 */ 13433static void 13434pause_timer(arg) 13435 void *arg; 13436{ 13437 13438 GBLLOCK_OWNED(&lk); 13439 /* 13440 * The callout_ API has acquired mtx and will hold it around this 13441 * function call. 13442 */ 13443 *stat_countp += proc_waiting; 13444 wakeup(&proc_waiting); 13445} 13446 13447/* 13448 * If requested, try removing inode or removal dependencies. 13449 */ 13450static void 13451check_clear_deps(mp) 13452 struct mount *mp; 13453{ 13454 13455 /* 13456 * If we are suspended, it may be because of our using 13457 * too many inodedeps, so help clear them out. 13458 */ 13459 if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended) 13460 clear_inodedeps(mp); 13461 /* 13462 * General requests for cleanup of backed up dependencies 13463 */ 13464 ACQUIRE_GBLLOCK(&lk); 13465 if (req_clear_inodedeps) { 13466 req_clear_inodedeps -= 1; 13467 FREE_GBLLOCK(&lk); 13468 clear_inodedeps(mp); 13469 ACQUIRE_GBLLOCK(&lk); 13470 wakeup(&proc_waiting); 13471 } 13472 if (req_clear_remove) { 13473 req_clear_remove -= 1; 13474 FREE_GBLLOCK(&lk); 13475 clear_remove(mp); 13476 ACQUIRE_GBLLOCK(&lk); 13477 wakeup(&proc_waiting); 13478 } 13479 FREE_GBLLOCK(&lk); 13480} 13481 13482/* 13483 * Flush out a directory with at least one removal dependency in an effort to 13484 * reduce the number of dirrem, freefile, and freeblks dependency structures. 13485 */ 13486static void 13487clear_remove(mp) 13488 struct mount *mp; 13489{ 13490 struct pagedep_hashhead *pagedephd; 13491 struct pagedep *pagedep; 13492 struct ufsmount *ump; 13493 struct vnode *vp; 13494 struct bufobj *bo; 13495 int error, cnt; 13496 ino_t ino; 13497 13498 ump = VFSTOUFS(mp); 13499 LOCK_OWNED(ump); 13500 13501 for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) { 13502 pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++]; 13503 if (ump->pagedep_nextclean > ump->pagedep_hash_size) 13504 ump->pagedep_nextclean = 0; 13505 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 13506 if (LIST_EMPTY(&pagedep->pd_dirremhd)) 13507 continue; 13508 ino = pagedep->pd_ino; 13509 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13510 continue; 13511 FREE_LOCK(ump); 13512 13513 /* 13514 * Let unmount clear deps 13515 */ 13516 error = vfs_busy(mp, MBF_NOWAIT); 13517 if (error != 0) 13518 goto finish_write; 13519 error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13520 FFSV_FORCEINSMQ); 13521 vfs_unbusy(mp); 13522 if (error != 0) { 13523 softdep_error("clear_remove: vget", error); 13524 goto finish_write; 13525 } 13526 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13527 softdep_error("clear_remove: fsync", error); 13528 bo = &vp->v_bufobj; 13529 BO_LOCK(bo); 13530 drain_output(vp); 13531 BO_UNLOCK(bo); 13532 vput(vp); 13533 finish_write: 13534 vn_finished_write(mp); 13535 ACQUIRE_LOCK(ump); 13536 return; 13537 } 13538 } 13539} 13540 13541/* 13542 * Clear out a block of dirty inodes in an effort to reduce 13543 * the number of inodedep dependency structures. 13544 */ 13545static void 13546clear_inodedeps(mp) 13547 struct mount *mp; 13548{ 13549 struct inodedep_hashhead *inodedephd; 13550 struct inodedep *inodedep; 13551 struct ufsmount *ump; 13552 struct vnode *vp; 13553 struct fs *fs; 13554 int error, cnt; 13555 ino_t firstino, lastino, ino; 13556 13557 ump = VFSTOUFS(mp); 13558 fs = ump->um_fs; 13559 LOCK_OWNED(ump); 13560 /* 13561 * Pick a random inode dependency to be cleared. 13562 * We will then gather up all the inodes in its block 13563 * that have dependencies and flush them out. 13564 */ 13565 for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) { 13566 inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++]; 13567 if (ump->inodedep_nextclean > ump->inodedep_hash_size) 13568 ump->inodedep_nextclean = 0; 13569 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 13570 break; 13571 } 13572 if (inodedep == NULL) 13573 return; 13574 /* 13575 * Find the last inode in the block with dependencies. 13576 */ 13577 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 13578 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 13579 if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0) 13580 break; 13581 /* 13582 * Asynchronously push all but the last inode with dependencies. 13583 * Synchronously push the last inode with dependencies to ensure 13584 * that the inode block gets written to free up the inodedeps. 13585 */ 13586 for (ino = firstino; ino <= lastino; ino++) { 13587 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 13588 continue; 13589 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13590 continue; 13591 FREE_LOCK(ump); 13592 error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */ 13593 if (error != 0) { 13594 vn_finished_write(mp); 13595 ACQUIRE_LOCK(ump); 13596 return; 13597 } 13598 if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13599 FFSV_FORCEINSMQ)) != 0) { 13600 softdep_error("clear_inodedeps: vget", error); 13601 vfs_unbusy(mp); 13602 vn_finished_write(mp); 13603 ACQUIRE_LOCK(ump); 13604 return; 13605 } 13606 vfs_unbusy(mp); 13607 if (ino == lastino) { 13608 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0))) 13609 softdep_error("clear_inodedeps: fsync1", error); 13610 } else { 13611 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13612 softdep_error("clear_inodedeps: fsync2", error); 13613 BO_LOCK(&vp->v_bufobj); 13614 drain_output(vp); 13615 BO_UNLOCK(&vp->v_bufobj); 13616 } 13617 vput(vp); 13618 vn_finished_write(mp); 13619 ACQUIRE_LOCK(ump); 13620 } 13621} 13622 13623void 13624softdep_buf_append(bp, wkhd) 13625 struct buf *bp; 13626 struct workhead *wkhd; 13627{ 13628 struct worklist *wk; 13629 struct ufsmount *ump; 13630 13631 if ((wk = LIST_FIRST(wkhd)) == NULL) 13632 return; 13633 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 13634 ("softdep_buf_append called on non-softdep filesystem")); 13635 ump = VFSTOUFS(wk->wk_mp); 13636 ACQUIRE_LOCK(ump); 13637 while ((wk = LIST_FIRST(wkhd)) != NULL) { 13638 WORKLIST_REMOVE(wk); 13639 WORKLIST_INSERT(&bp->b_dep, wk); 13640 } 13641 FREE_LOCK(ump); 13642 13643} 13644 13645void 13646softdep_inode_append(ip, cred, wkhd) 13647 struct inode *ip; 13648 struct ucred *cred; 13649 struct workhead *wkhd; 13650{ 13651 struct buf *bp; 13652 struct fs *fs; 13653 int error; 13654 13655 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 13656 ("softdep_inode_append called on non-softdep filesystem")); 13657 fs = ip->i_fs; 13658 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 13659 (int)fs->fs_bsize, cred, &bp); 13660 if (error) { 13661 bqrelse(bp); 13662 softdep_freework(wkhd); 13663 return; 13664 } 13665 softdep_buf_append(bp, wkhd); 13666 bqrelse(bp); 13667} 13668 13669void 13670softdep_freework(wkhd) 13671 struct workhead *wkhd; 13672{ 13673 struct worklist *wk; 13674 struct ufsmount *ump; 13675 13676 if ((wk = LIST_FIRST(wkhd)) == NULL) 13677 return; 13678 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 13679 ("softdep_freework called on non-softdep filesystem")); 13680 ump = VFSTOUFS(wk->wk_mp); 13681 ACQUIRE_LOCK(ump); 13682 handle_jwork(wkhd); 13683 FREE_LOCK(ump); 13684} 13685 13686/* 13687 * Function to determine if the buffer has outstanding dependencies 13688 * that will cause a roll-back if the buffer is written. If wantcount 13689 * is set, return number of dependencies, otherwise just yes or no. 13690 */ 13691static int 13692softdep_count_dependencies(bp, wantcount) 13693 struct buf *bp; 13694 int wantcount; 13695{ 13696 struct worklist *wk; 13697 struct ufsmount *ump; 13698 struct bmsafemap *bmsafemap; 13699 struct freework *freework; 13700 struct inodedep *inodedep; 13701 struct indirdep *indirdep; 13702 struct freeblks *freeblks; 13703 struct allocindir *aip; 13704 struct pagedep *pagedep; 13705 struct dirrem *dirrem; 13706 struct newblk *newblk; 13707 struct mkdir *mkdir; 13708 struct diradd *dap; 13709 int i, retval; 13710 13711 retval = 0; 13712 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 13713 return (0); 13714 ump = VFSTOUFS(wk->wk_mp); 13715 ACQUIRE_LOCK(ump); 13716 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 13717 switch (wk->wk_type) { 13718 13719 case D_INODEDEP: 13720 inodedep = WK_INODEDEP(wk); 13721 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 13722 /* bitmap allocation dependency */ 13723 retval += 1; 13724 if (!wantcount) 13725 goto out; 13726 } 13727 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 13728 /* direct block pointer dependency */ 13729 retval += 1; 13730 if (!wantcount) 13731 goto out; 13732 } 13733 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 13734 /* direct block pointer dependency */ 13735 retval += 1; 13736 if (!wantcount) 13737 goto out; 13738 } 13739 if (TAILQ_FIRST(&inodedep->id_inoreflst)) { 13740 /* Add reference dependency. */ 13741 retval += 1; 13742 if (!wantcount) 13743 goto out; 13744 } 13745 continue; 13746 13747 case D_INDIRDEP: 13748 indirdep = WK_INDIRDEP(wk); 13749 13750 TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) { 13751 /* indirect truncation dependency */ 13752 retval += 1; 13753 if (!wantcount) 13754 goto out; 13755 } 13756 13757 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 13758 /* indirect block pointer dependency */ 13759 retval += 1; 13760 if (!wantcount) 13761 goto out; 13762 } 13763 continue; 13764 13765 case D_PAGEDEP: 13766 pagedep = WK_PAGEDEP(wk); 13767 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 13768 if (LIST_FIRST(&dirrem->dm_jremrefhd)) { 13769 /* Journal remove ref dependency. */ 13770 retval += 1; 13771 if (!wantcount) 13772 goto out; 13773 } 13774 } 13775 for (i = 0; i < DAHASHSZ; i++) { 13776 13777 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 13778 /* directory entry dependency */ 13779 retval += 1; 13780 if (!wantcount) 13781 goto out; 13782 } 13783 } 13784 continue; 13785 13786 case D_BMSAFEMAP: 13787 bmsafemap = WK_BMSAFEMAP(wk); 13788 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) { 13789 /* Add reference dependency. */ 13790 retval += 1; 13791 if (!wantcount) 13792 goto out; 13793 } 13794 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) { 13795 /* Allocate block dependency. */ 13796 retval += 1; 13797 if (!wantcount) 13798 goto out; 13799 } 13800 continue; 13801 13802 case D_FREEBLKS: 13803 freeblks = WK_FREEBLKS(wk); 13804 if (LIST_FIRST(&freeblks->fb_jblkdephd)) { 13805 /* Freeblk journal dependency. */ 13806 retval += 1; 13807 if (!wantcount) 13808 goto out; 13809 } 13810 continue; 13811 13812 case D_ALLOCDIRECT: 13813 case D_ALLOCINDIR: 13814 newblk = WK_NEWBLK(wk); 13815 if (newblk->nb_jnewblk) { 13816 /* Journal allocate dependency. */ 13817 retval += 1; 13818 if (!wantcount) 13819 goto out; 13820 } 13821 continue; 13822 13823 case D_MKDIR: 13824 mkdir = WK_MKDIR(wk); 13825 if (mkdir->md_jaddref) { 13826 /* Journal reference dependency. */ 13827 retval += 1; 13828 if (!wantcount) 13829 goto out; 13830 } 13831 continue; 13832 13833 case D_FREEWORK: 13834 case D_FREEDEP: 13835 case D_JSEGDEP: 13836 case D_JSEG: 13837 case D_SBDEP: 13838 /* never a dependency on these blocks */ 13839 continue; 13840 13841 default: 13842 panic("softdep_count_dependencies: Unexpected type %s", 13843 TYPENAME(wk->wk_type)); 13844 /* NOTREACHED */ 13845 } 13846 } 13847out: 13848 FREE_LOCK(ump); 13849 return retval; 13850} 13851 13852/* 13853 * Acquire exclusive access to a buffer. 13854 * Must be called with a locked mtx parameter. 13855 * Return acquired buffer or NULL on failure. 13856 */ 13857static struct buf * 13858getdirtybuf(bp, lock, waitfor) 13859 struct buf *bp; 13860 struct rwlock *lock; 13861 int waitfor; 13862{ 13863 int error; 13864 13865 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) { 13866 if (waitfor != MNT_WAIT) 13867 return (NULL); 13868 error = BUF_LOCK(bp, 13869 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock); 13870 /* 13871 * Even if we sucessfully acquire bp here, we have dropped 13872 * lock, which may violates our guarantee. 13873 */ 13874 if (error == 0) 13875 BUF_UNLOCK(bp); 13876 else if (error != ENOLCK) 13877 panic("getdirtybuf: inconsistent lock: %d", error); 13878 rw_wlock(lock); 13879 return (NULL); 13880 } 13881 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13882 if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) { 13883 rw_wunlock(lock); 13884 BO_LOCK(bp->b_bufobj); 13885 BUF_UNLOCK(bp); 13886 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13887 bp->b_vflags |= BV_BKGRDWAIT; 13888 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), 13889 PRIBIO | PDROP, "getbuf", 0); 13890 } else 13891 BO_UNLOCK(bp->b_bufobj); 13892 rw_wlock(lock); 13893 return (NULL); 13894 } 13895 BUF_UNLOCK(bp); 13896 if (waitfor != MNT_WAIT) 13897 return (NULL); 13898 /* 13899 * The lock argument must be bp->b_vp's mutex in 13900 * this case. 13901 */ 13902#ifdef DEBUG_VFS_LOCKS 13903 if (bp->b_vp->v_type != VCHR) 13904 ASSERT_BO_WLOCKED(bp->b_bufobj); 13905#endif 13906 bp->b_vflags |= BV_BKGRDWAIT; 13907 rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0); 13908 return (NULL); 13909 } 13910 if ((bp->b_flags & B_DELWRI) == 0) { 13911 BUF_UNLOCK(bp); 13912 return (NULL); 13913 } 13914 bremfree(bp); 13915 return (bp); 13916} 13917 13918 13919/* 13920 * Check if it is safe to suspend the file system now. On entry, 13921 * the vnode interlock for devvp should be held. Return 0 with 13922 * the mount interlock held if the file system can be suspended now, 13923 * otherwise return EAGAIN with the mount interlock held. 13924 */ 13925int 13926softdep_check_suspend(struct mount *mp, 13927 struct vnode *devvp, 13928 int softdep_depcnt, 13929 int softdep_accdepcnt, 13930 int secondary_writes, 13931 int secondary_accwrites) 13932{ 13933 struct bufobj *bo; 13934 struct ufsmount *ump; 13935 struct inodedep *inodedep; 13936 int error, unlinked; 13937 13938 bo = &devvp->v_bufobj; 13939 ASSERT_BO_WLOCKED(bo); 13940 13941 /* 13942 * If we are not running with soft updates, then we need only 13943 * deal with secondary writes as we try to suspend. 13944 */ 13945 if (MOUNTEDSOFTDEP(mp) == 0) { 13946 MNT_ILOCK(mp); 13947 while (mp->mnt_secondary_writes != 0) { 13948 BO_UNLOCK(bo); 13949 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 13950 (PUSER - 1) | PDROP, "secwr", 0); 13951 BO_LOCK(bo); 13952 MNT_ILOCK(mp); 13953 } 13954 13955 /* 13956 * Reasons for needing more work before suspend: 13957 * - Dirty buffers on devvp. 13958 * - Secondary writes occurred after start of vnode sync loop 13959 */ 13960 error = 0; 13961 if (bo->bo_numoutput > 0 || 13962 bo->bo_dirty.bv_cnt > 0 || 13963 secondary_writes != 0 || 13964 mp->mnt_secondary_writes != 0 || 13965 secondary_accwrites != mp->mnt_secondary_accwrites) 13966 error = EAGAIN; 13967 BO_UNLOCK(bo); 13968 return (error); 13969 } 13970 13971 /* 13972 * If we are running with soft updates, then we need to coordinate 13973 * with them as we try to suspend. 13974 */ 13975 ump = VFSTOUFS(mp); 13976 for (;;) { 13977 if (!TRY_ACQUIRE_LOCK(ump)) { 13978 BO_UNLOCK(bo); 13979 ACQUIRE_LOCK(ump); 13980 FREE_LOCK(ump); 13981 BO_LOCK(bo); 13982 continue; 13983 } 13984 MNT_ILOCK(mp); 13985 if (mp->mnt_secondary_writes != 0) { 13986 FREE_LOCK(ump); 13987 BO_UNLOCK(bo); 13988 msleep(&mp->mnt_secondary_writes, 13989 MNT_MTX(mp), 13990 (PUSER - 1) | PDROP, "secwr", 0); 13991 BO_LOCK(bo); 13992 continue; 13993 } 13994 break; 13995 } 13996 13997 unlinked = 0; 13998 if (MOUNTEDSUJ(mp)) { 13999 for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked); 14000 inodedep != NULL; 14001 inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 14002 if ((inodedep->id_state & (UNLINKED | UNLINKLINKS | 14003 UNLINKONLIST)) != (UNLINKED | UNLINKLINKS | 14004 UNLINKONLIST) || 14005 !check_inodedep_free(inodedep)) 14006 continue; 14007 unlinked++; 14008 } 14009 } 14010 14011 /* 14012 * Reasons for needing more work before suspend: 14013 * - Dirty buffers on devvp. 14014 * - Softdep activity occurred after start of vnode sync loop 14015 * - Secondary writes occurred after start of vnode sync loop 14016 */ 14017 error = 0; 14018 if (bo->bo_numoutput > 0 || 14019 bo->bo_dirty.bv_cnt > 0 || 14020 softdep_depcnt != unlinked || 14021 ump->softdep_deps != unlinked || 14022 softdep_accdepcnt != ump->softdep_accdeps || 14023 secondary_writes != 0 || 14024 mp->mnt_secondary_writes != 0 || 14025 secondary_accwrites != mp->mnt_secondary_accwrites) 14026 error = EAGAIN; 14027 FREE_LOCK(ump); 14028 BO_UNLOCK(bo); 14029 return (error); 14030} 14031 14032 14033/* 14034 * Get the number of dependency structures for the file system, both 14035 * the current number and the total number allocated. These will 14036 * later be used to detect that softdep processing has occurred. 14037 */ 14038void 14039softdep_get_depcounts(struct mount *mp, 14040 int *softdep_depsp, 14041 int *softdep_accdepsp) 14042{ 14043 struct ufsmount *ump; 14044 14045 if (MOUNTEDSOFTDEP(mp) == 0) { 14046 *softdep_depsp = 0; 14047 *softdep_accdepsp = 0; 14048 return; 14049 } 14050 ump = VFSTOUFS(mp); 14051 ACQUIRE_LOCK(ump); 14052 *softdep_depsp = ump->softdep_deps; 14053 *softdep_accdepsp = ump->softdep_accdeps; 14054 FREE_LOCK(ump); 14055} 14056 14057/* 14058 * Wait for pending output on a vnode to complete. 14059 * Must be called with vnode lock and interlock locked. 14060 * 14061 * XXX: Should just be a call to bufobj_wwait(). 14062 */ 14063static void 14064drain_output(vp) 14065 struct vnode *vp; 14066{ 14067 struct bufobj *bo; 14068 14069 bo = &vp->v_bufobj; 14070 ASSERT_VOP_LOCKED(vp, "drain_output"); 14071 ASSERT_BO_WLOCKED(bo); 14072 14073 while (bo->bo_numoutput) { 14074 bo->bo_flag |= BO_WWAIT; 14075 msleep((caddr_t)&bo->bo_numoutput, 14076 BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0); 14077 } 14078} 14079 14080/* 14081 * Called whenever a buffer that is being invalidated or reallocated 14082 * contains dependencies. This should only happen if an I/O error has 14083 * occurred. The routine is called with the buffer locked. 14084 */ 14085static void 14086softdep_deallocate_dependencies(bp) 14087 struct buf *bp; 14088{ 14089 14090 if ((bp->b_ioflags & BIO_ERROR) == 0) 14091 panic("softdep_deallocate_dependencies: dangling deps"); 14092 if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL) 14093 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 14094 else 14095 printf("softdep_deallocate_dependencies: " 14096 "got error %d while accessing filesystem\n", bp->b_error); 14097 if (bp->b_error != ENXIO) 14098 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 14099} 14100 14101/* 14102 * Function to handle asynchronous write errors in the filesystem. 14103 */ 14104static void 14105softdep_error(func, error) 14106 char *func; 14107 int error; 14108{ 14109 14110 /* XXX should do something better! */ 14111 printf("%s: got error %d while accessing filesystem\n", func, error); 14112} 14113 14114#ifdef DDB 14115 14116static void 14117inodedep_print(struct inodedep *inodedep, int verbose) 14118{ 14119 db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d" 14120 " saveino %p\n", 14121 inodedep, inodedep->id_fs, inodedep->id_state, 14122 (intmax_t)inodedep->id_ino, 14123 (intmax_t)fsbtodb(inodedep->id_fs, 14124 ino_to_fsba(inodedep->id_fs, inodedep->id_ino)), 14125 inodedep->id_nlinkdelta, inodedep->id_savednlink, 14126 inodedep->id_savedino1); 14127 14128 if (verbose == 0) 14129 return; 14130 14131 db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, " 14132 "mkdiradd %p\n", 14133 LIST_FIRST(&inodedep->id_pendinghd), 14134 LIST_FIRST(&inodedep->id_bufwait), 14135 LIST_FIRST(&inodedep->id_inowait), 14136 TAILQ_FIRST(&inodedep->id_inoreflst), 14137 inodedep->id_mkdiradd); 14138 db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n", 14139 TAILQ_FIRST(&inodedep->id_inoupdt), 14140 TAILQ_FIRST(&inodedep->id_newinoupdt), 14141 TAILQ_FIRST(&inodedep->id_extupdt), 14142 TAILQ_FIRST(&inodedep->id_newextupdt)); 14143} 14144 14145DB_SHOW_COMMAND(inodedep, db_show_inodedep) 14146{ 14147 14148 if (have_addr == 0) { 14149 db_printf("Address required\n"); 14150 return; 14151 } 14152 inodedep_print((struct inodedep*)addr, 1); 14153} 14154 14155DB_SHOW_COMMAND(inodedeps, db_show_inodedeps) 14156{ 14157 struct inodedep_hashhead *inodedephd; 14158 struct inodedep *inodedep; 14159 struct ufsmount *ump; 14160 int cnt; 14161 14162 if (have_addr == 0) { 14163 db_printf("Address required\n"); 14164 return; 14165 } 14166 ump = (struct ufsmount *)addr; 14167 for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) { 14168 inodedephd = &ump->inodedep_hashtbl[cnt]; 14169 LIST_FOREACH(inodedep, inodedephd, id_hash) { 14170 inodedep_print(inodedep, 0); 14171 } 14172 } 14173} 14174 14175DB_SHOW_COMMAND(worklist, db_show_worklist) 14176{ 14177 struct worklist *wk; 14178 14179 if (have_addr == 0) { 14180 db_printf("Address required\n"); 14181 return; 14182 } 14183 wk = (struct worklist *)addr; 14184 printf("worklist: %p type %s state 0x%X\n", 14185 wk, TYPENAME(wk->wk_type), wk->wk_state); 14186} 14187 14188DB_SHOW_COMMAND(workhead, db_show_workhead) 14189{ 14190 struct workhead *wkhd; 14191 struct worklist *wk; 14192 int i; 14193 14194 if (have_addr == 0) { 14195 db_printf("Address required\n"); 14196 return; 14197 } 14198 wkhd = (struct workhead *)addr; 14199 wk = LIST_FIRST(wkhd); 14200 for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list)) 14201 db_printf("worklist: %p type %s state 0x%X", 14202 wk, TYPENAME(wk->wk_type), wk->wk_state); 14203 if (i == 100) 14204 db_printf("workhead overflow"); 14205 printf("\n"); 14206} 14207 14208 14209DB_SHOW_COMMAND(mkdirs, db_show_mkdirs) 14210{ 14211 struct mkdirlist *mkdirlisthd; 14212 struct jaddref *jaddref; 14213 struct diradd *diradd; 14214 struct mkdir *mkdir; 14215 14216 if (have_addr == 0) { 14217 db_printf("Address required\n"); 14218 return; 14219 } 14220 mkdirlisthd = (struct mkdirlist *)addr; 14221 LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) { 14222 diradd = mkdir->md_diradd; 14223 db_printf("mkdir: %p state 0x%X dap %p state 0x%X", 14224 mkdir, mkdir->md_state, diradd, diradd->da_state); 14225 if ((jaddref = mkdir->md_jaddref) != NULL) 14226 db_printf(" jaddref %p jaddref state 0x%X", 14227 jaddref, jaddref->ja_state); 14228 db_printf("\n"); 14229 } 14230} 14231 14232/* exported to ffs_vfsops.c */ 14233extern void db_print_ffs(struct ufsmount *ump); 14234void 14235db_print_ffs(struct ufsmount *ump) 14236{ 14237 db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n", 14238 ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname, 14239 ump->um_devvp, ump->um_fs, ump->softdep_on_worklist, 14240 ump->softdep_deps, ump->softdep_req); 14241} 14242 14243#endif /* DDB */ 14244 14245#endif /* SOFTUPDATES */ 14246