ffs_softdep.c revision 270007
1/*- 2 * Copyright 1998, 2000 Marshall Kirk McKusick. 3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org> 4 * All rights reserved. 5 * 6 * The soft updates code is derived from the appendix of a University 7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt, 8 * "Soft Updates: A Solution to the Metadata Update Problem in File 9 * Systems", CSE-TR-254-95, August 1995). 10 * 11 * Further information about soft updates can be obtained from: 12 * 13 * Marshall Kirk McKusick http://www.mckusick.com/softdep/ 14 * 1614 Oxford Street mckusick@mckusick.com 15 * Berkeley, CA 94709-1608 +1-510-843-9542 16 * USA 17 * 18 * Redistribution and use in source and binary forms, with or without 19 * modification, are permitted provided that the following conditions 20 * are met: 21 * 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR 36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE 37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 * 39 * from: @(#)ffs_softdep.c 9.59 (McKusick) 6/21/00 40 */ 41 42#include <sys/cdefs.h> 43__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_softdep.c 270007 2014-08-14 23:38:04Z mckusick $"); 44 45#include "opt_ffs.h" 46#include "opt_quota.h" 47#include "opt_ddb.h" 48 49/* 50 * For now we want the safety net that the DEBUG flag provides. 51 */ 52#ifndef DEBUG 53#define DEBUG 54#endif 55 56#include <sys/param.h> 57#include <sys/kernel.h> 58#include <sys/systm.h> 59#include <sys/bio.h> 60#include <sys/buf.h> 61#include <sys/kdb.h> 62#include <sys/kthread.h> 63#include <sys/ktr.h> 64#include <sys/limits.h> 65#include <sys/lock.h> 66#include <sys/malloc.h> 67#include <sys/mount.h> 68#include <sys/mutex.h> 69#include <sys/namei.h> 70#include <sys/priv.h> 71#include <sys/proc.h> 72#include <sys/rwlock.h> 73#include <sys/stat.h> 74#include <sys/sysctl.h> 75#include <sys/syslog.h> 76#include <sys/vnode.h> 77#include <sys/conf.h> 78 79#include <ufs/ufs/dir.h> 80#include <ufs/ufs/extattr.h> 81#include <ufs/ufs/quota.h> 82#include <ufs/ufs/inode.h> 83#include <ufs/ufs/ufsmount.h> 84#include <ufs/ffs/fs.h> 85#include <ufs/ffs/softdep.h> 86#include <ufs/ffs/ffs_extern.h> 87#include <ufs/ufs/ufs_extern.h> 88 89#include <vm/vm.h> 90#include <vm/vm_extern.h> 91#include <vm/vm_object.h> 92 93#include <geom/geom.h> 94 95#include <ddb/ddb.h> 96 97#define KTR_SUJ 0 /* Define to KTR_SPARE. */ 98 99#ifndef SOFTUPDATES 100 101int 102softdep_flushfiles(oldmnt, flags, td) 103 struct mount *oldmnt; 104 int flags; 105 struct thread *td; 106{ 107 108 panic("softdep_flushfiles called"); 109} 110 111int 112softdep_mount(devvp, mp, fs, cred) 113 struct vnode *devvp; 114 struct mount *mp; 115 struct fs *fs; 116 struct ucred *cred; 117{ 118 119 return (0); 120} 121 122void 123softdep_initialize() 124{ 125 126 return; 127} 128 129void 130softdep_uninitialize() 131{ 132 133 return; 134} 135 136void 137softdep_unmount(mp) 138 struct mount *mp; 139{ 140 141 panic("softdep_unmount called"); 142} 143 144void 145softdep_setup_sbupdate(ump, fs, bp) 146 struct ufsmount *ump; 147 struct fs *fs; 148 struct buf *bp; 149{ 150 151 panic("softdep_setup_sbupdate called"); 152} 153 154void 155softdep_setup_inomapdep(bp, ip, newinum, mode) 156 struct buf *bp; 157 struct inode *ip; 158 ino_t newinum; 159 int mode; 160{ 161 162 panic("softdep_setup_inomapdep called"); 163} 164 165void 166softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 167 struct buf *bp; 168 struct mount *mp; 169 ufs2_daddr_t newblkno; 170 int frags; 171 int oldfrags; 172{ 173 174 panic("softdep_setup_blkmapdep called"); 175} 176 177void 178softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 179 struct inode *ip; 180 ufs_lbn_t lbn; 181 ufs2_daddr_t newblkno; 182 ufs2_daddr_t oldblkno; 183 long newsize; 184 long oldsize; 185 struct buf *bp; 186{ 187 188 panic("softdep_setup_allocdirect called"); 189} 190 191void 192softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp) 193 struct inode *ip; 194 ufs_lbn_t lbn; 195 ufs2_daddr_t newblkno; 196 ufs2_daddr_t oldblkno; 197 long newsize; 198 long oldsize; 199 struct buf *bp; 200{ 201 202 panic("softdep_setup_allocext called"); 203} 204 205void 206softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 207 struct inode *ip; 208 ufs_lbn_t lbn; 209 struct buf *bp; 210 int ptrno; 211 ufs2_daddr_t newblkno; 212 ufs2_daddr_t oldblkno; 213 struct buf *nbp; 214{ 215 216 panic("softdep_setup_allocindir_page called"); 217} 218 219void 220softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 221 struct buf *nbp; 222 struct inode *ip; 223 struct buf *bp; 224 int ptrno; 225 ufs2_daddr_t newblkno; 226{ 227 228 panic("softdep_setup_allocindir_meta called"); 229} 230 231void 232softdep_journal_freeblocks(ip, cred, length, flags) 233 struct inode *ip; 234 struct ucred *cred; 235 off_t length; 236 int flags; 237{ 238 239 panic("softdep_journal_freeblocks called"); 240} 241 242void 243softdep_journal_fsync(ip) 244 struct inode *ip; 245{ 246 247 panic("softdep_journal_fsync called"); 248} 249 250void 251softdep_setup_freeblocks(ip, length, flags) 252 struct inode *ip; 253 off_t length; 254 int flags; 255{ 256 257 panic("softdep_setup_freeblocks called"); 258} 259 260void 261softdep_freefile(pvp, ino, mode) 262 struct vnode *pvp; 263 ino_t ino; 264 int mode; 265{ 266 267 panic("softdep_freefile called"); 268} 269 270int 271softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 272 struct buf *bp; 273 struct inode *dp; 274 off_t diroffset; 275 ino_t newinum; 276 struct buf *newdirbp; 277 int isnewblk; 278{ 279 280 panic("softdep_setup_directory_add called"); 281} 282 283void 284softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 285 struct buf *bp; 286 struct inode *dp; 287 caddr_t base; 288 caddr_t oldloc; 289 caddr_t newloc; 290 int entrysize; 291{ 292 293 panic("softdep_change_directoryentry_offset called"); 294} 295 296void 297softdep_setup_remove(bp, dp, ip, isrmdir) 298 struct buf *bp; 299 struct inode *dp; 300 struct inode *ip; 301 int isrmdir; 302{ 303 304 panic("softdep_setup_remove called"); 305} 306 307void 308softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 309 struct buf *bp; 310 struct inode *dp; 311 struct inode *ip; 312 ino_t newinum; 313 int isrmdir; 314{ 315 316 panic("softdep_setup_directory_change called"); 317} 318 319void 320softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 321 struct mount *mp; 322 struct buf *bp; 323 ufs2_daddr_t blkno; 324 int frags; 325 struct workhead *wkhd; 326{ 327 328 panic("%s called", __FUNCTION__); 329} 330 331void 332softdep_setup_inofree(mp, bp, ino, wkhd) 333 struct mount *mp; 334 struct buf *bp; 335 ino_t ino; 336 struct workhead *wkhd; 337{ 338 339 panic("%s called", __FUNCTION__); 340} 341 342void 343softdep_setup_unlink(dp, ip) 344 struct inode *dp; 345 struct inode *ip; 346{ 347 348 panic("%s called", __FUNCTION__); 349} 350 351void 352softdep_setup_link(dp, ip) 353 struct inode *dp; 354 struct inode *ip; 355{ 356 357 panic("%s called", __FUNCTION__); 358} 359 360void 361softdep_revert_link(dp, ip) 362 struct inode *dp; 363 struct inode *ip; 364{ 365 366 panic("%s called", __FUNCTION__); 367} 368 369void 370softdep_setup_rmdir(dp, ip) 371 struct inode *dp; 372 struct inode *ip; 373{ 374 375 panic("%s called", __FUNCTION__); 376} 377 378void 379softdep_revert_rmdir(dp, ip) 380 struct inode *dp; 381 struct inode *ip; 382{ 383 384 panic("%s called", __FUNCTION__); 385} 386 387void 388softdep_setup_create(dp, ip) 389 struct inode *dp; 390 struct inode *ip; 391{ 392 393 panic("%s called", __FUNCTION__); 394} 395 396void 397softdep_revert_create(dp, ip) 398 struct inode *dp; 399 struct inode *ip; 400{ 401 402 panic("%s called", __FUNCTION__); 403} 404 405void 406softdep_setup_mkdir(dp, ip) 407 struct inode *dp; 408 struct inode *ip; 409{ 410 411 panic("%s called", __FUNCTION__); 412} 413 414void 415softdep_revert_mkdir(dp, ip) 416 struct inode *dp; 417 struct inode *ip; 418{ 419 420 panic("%s called", __FUNCTION__); 421} 422 423void 424softdep_setup_dotdot_link(dp, ip) 425 struct inode *dp; 426 struct inode *ip; 427{ 428 429 panic("%s called", __FUNCTION__); 430} 431 432int 433softdep_prealloc(vp, waitok) 434 struct vnode *vp; 435 int waitok; 436{ 437 438 panic("%s called", __FUNCTION__); 439} 440 441int 442softdep_journal_lookup(mp, vpp) 443 struct mount *mp; 444 struct vnode **vpp; 445{ 446 447 return (ENOENT); 448} 449 450void 451softdep_change_linkcnt(ip) 452 struct inode *ip; 453{ 454 455 panic("softdep_change_linkcnt called"); 456} 457 458void 459softdep_load_inodeblock(ip) 460 struct inode *ip; 461{ 462 463 panic("softdep_load_inodeblock called"); 464} 465 466void 467softdep_update_inodeblock(ip, bp, waitfor) 468 struct inode *ip; 469 struct buf *bp; 470 int waitfor; 471{ 472 473 panic("softdep_update_inodeblock called"); 474} 475 476int 477softdep_fsync(vp) 478 struct vnode *vp; /* the "in_core" copy of the inode */ 479{ 480 481 return (0); 482} 483 484void 485softdep_fsync_mountdev(vp) 486 struct vnode *vp; 487{ 488 489 return; 490} 491 492int 493softdep_flushworklist(oldmnt, countp, td) 494 struct mount *oldmnt; 495 int *countp; 496 struct thread *td; 497{ 498 499 *countp = 0; 500 return (0); 501} 502 503int 504softdep_sync_metadata(struct vnode *vp) 505{ 506 507 panic("softdep_sync_metadata called"); 508} 509 510int 511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 512{ 513 514 panic("softdep_sync_buf called"); 515} 516 517int 518softdep_slowdown(vp) 519 struct vnode *vp; 520{ 521 522 panic("softdep_slowdown called"); 523} 524 525int 526softdep_request_cleanup(fs, vp, cred, resource) 527 struct fs *fs; 528 struct vnode *vp; 529 struct ucred *cred; 530 int resource; 531{ 532 533 return (0); 534} 535 536int 537softdep_check_suspend(struct mount *mp, 538 struct vnode *devvp, 539 int softdep_depcnt, 540 int softdep_accdepcnt, 541 int secondary_writes, 542 int secondary_accwrites) 543{ 544 struct bufobj *bo; 545 int error; 546 547 (void) softdep_depcnt, 548 (void) softdep_accdepcnt; 549 550 bo = &devvp->v_bufobj; 551 ASSERT_BO_WLOCKED(bo); 552 553 MNT_ILOCK(mp); 554 while (mp->mnt_secondary_writes != 0) { 555 BO_UNLOCK(bo); 556 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 557 (PUSER - 1) | PDROP, "secwr", 0); 558 BO_LOCK(bo); 559 MNT_ILOCK(mp); 560 } 561 562 /* 563 * Reasons for needing more work before suspend: 564 * - Dirty buffers on devvp. 565 * - Secondary writes occurred after start of vnode sync loop 566 */ 567 error = 0; 568 if (bo->bo_numoutput > 0 || 569 bo->bo_dirty.bv_cnt > 0 || 570 secondary_writes != 0 || 571 mp->mnt_secondary_writes != 0 || 572 secondary_accwrites != mp->mnt_secondary_accwrites) 573 error = EAGAIN; 574 BO_UNLOCK(bo); 575 return (error); 576} 577 578void 579softdep_get_depcounts(struct mount *mp, 580 int *softdepactivep, 581 int *softdepactiveaccp) 582{ 583 (void) mp; 584 *softdepactivep = 0; 585 *softdepactiveaccp = 0; 586} 587 588void 589softdep_buf_append(bp, wkhd) 590 struct buf *bp; 591 struct workhead *wkhd; 592{ 593 594 panic("softdep_buf_appendwork called"); 595} 596 597void 598softdep_inode_append(ip, cred, wkhd) 599 struct inode *ip; 600 struct ucred *cred; 601 struct workhead *wkhd; 602{ 603 604 panic("softdep_inode_appendwork called"); 605} 606 607void 608softdep_freework(wkhd) 609 struct workhead *wkhd; 610{ 611 612 panic("softdep_freework called"); 613} 614 615#else 616 617FEATURE(softupdates, "FFS soft-updates support"); 618 619static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0, 620 "soft updates stats"); 621static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0, 622 "total dependencies allocated"); 623static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0, 624 "high use dependencies allocated"); 625static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0, 626 "current dependencies allocated"); 627static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0, 628 "current dependencies written"); 629 630unsigned long dep_current[D_LAST + 1]; 631unsigned long dep_highuse[D_LAST + 1]; 632unsigned long dep_total[D_LAST + 1]; 633unsigned long dep_write[D_LAST + 1]; 634 635#define SOFTDEP_TYPE(type, str, long) \ 636 static MALLOC_DEFINE(M_ ## type, #str, long); \ 637 SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD, \ 638 &dep_total[D_ ## type], 0, ""); \ 639 SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, \ 640 &dep_current[D_ ## type], 0, ""); \ 641 SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, \ 642 &dep_highuse[D_ ## type], 0, ""); \ 643 SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, \ 644 &dep_write[D_ ## type], 0, ""); 645 646SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies"); 647SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies"); 648SOFTDEP_TYPE(BMSAFEMAP, bmsafemap, 649 "Block or frag allocated from cyl group map"); 650SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency"); 651SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode"); 652SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies"); 653SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block"); 654SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode"); 655SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode"); 656SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated"); 657SOFTDEP_TYPE(DIRADD, diradd, "New directory entry"); 658SOFTDEP_TYPE(MKDIR, mkdir, "New directory"); 659SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted"); 660SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block"); 661SOFTDEP_TYPE(FREEWORK, freework, "free an inode block"); 662SOFTDEP_TYPE(FREEDEP, freedep, "track a block free"); 663SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add"); 664SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove"); 665SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move"); 666SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block"); 667SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block"); 668SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag"); 669SOFTDEP_TYPE(JSEG, jseg, "Journal segment"); 670SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete"); 671SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency"); 672SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation"); 673SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete"); 674 675static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel"); 676 677static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes"); 678static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations"); 679static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data"); 680 681#define M_SOFTDEP_FLAGS (M_WAITOK) 682 683/* 684 * translate from workitem type to memory type 685 * MUST match the defines above, such that memtype[D_XXX] == M_XXX 686 */ 687static struct malloc_type *memtype[] = { 688 M_PAGEDEP, 689 M_INODEDEP, 690 M_BMSAFEMAP, 691 M_NEWBLK, 692 M_ALLOCDIRECT, 693 M_INDIRDEP, 694 M_ALLOCINDIR, 695 M_FREEFRAG, 696 M_FREEBLKS, 697 M_FREEFILE, 698 M_DIRADD, 699 M_MKDIR, 700 M_DIRREM, 701 M_NEWDIRBLK, 702 M_FREEWORK, 703 M_FREEDEP, 704 M_JADDREF, 705 M_JREMREF, 706 M_JMVREF, 707 M_JNEWBLK, 708 M_JFREEBLK, 709 M_JFREEFRAG, 710 M_JSEG, 711 M_JSEGDEP, 712 M_SBDEP, 713 M_JTRUNC, 714 M_JFSYNC, 715 M_SENTINEL 716}; 717 718#define DtoM(type) (memtype[type]) 719 720/* 721 * Names of malloc types. 722 */ 723#define TYPENAME(type) \ 724 ((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???") 725/* 726 * End system adaptation definitions. 727 */ 728 729#define DOTDOT_OFFSET offsetof(struct dirtemplate, dotdot_ino) 730#define DOT_OFFSET offsetof(struct dirtemplate, dot_ino) 731 732/* 733 * Internal function prototypes. 734 */ 735static void check_clear_deps(struct mount *); 736static void softdep_error(char *, int); 737static int softdep_process_worklist(struct mount *, int); 738static int softdep_waitidle(struct mount *); 739static void drain_output(struct vnode *); 740static struct buf *getdirtybuf(struct buf *, struct rwlock *, int); 741static void clear_remove(struct mount *); 742static void clear_inodedeps(struct mount *); 743static void unlinked_inodedep(struct mount *, struct inodedep *); 744static void clear_unlinked_inodedep(struct inodedep *); 745static struct inodedep *first_unlinked_inodedep(struct ufsmount *); 746static int flush_pagedep_deps(struct vnode *, struct mount *, 747 struct diraddhd *); 748static int free_pagedep(struct pagedep *); 749static int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t); 750static int flush_inodedep_deps(struct vnode *, struct mount *, ino_t); 751static int flush_deplist(struct allocdirectlst *, int, int *); 752static int sync_cgs(struct mount *, int); 753static int handle_written_filepage(struct pagedep *, struct buf *); 754static int handle_written_sbdep(struct sbdep *, struct buf *); 755static void initiate_write_sbdep(struct sbdep *); 756static void diradd_inode_written(struct diradd *, struct inodedep *); 757static int handle_written_indirdep(struct indirdep *, struct buf *, 758 struct buf**); 759static int handle_written_inodeblock(struct inodedep *, struct buf *); 760static int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *, 761 uint8_t *); 762static int handle_written_bmsafemap(struct bmsafemap *, struct buf *); 763static void handle_written_jaddref(struct jaddref *); 764static void handle_written_jremref(struct jremref *); 765static void handle_written_jseg(struct jseg *, struct buf *); 766static void handle_written_jnewblk(struct jnewblk *); 767static void handle_written_jblkdep(struct jblkdep *); 768static void handle_written_jfreefrag(struct jfreefrag *); 769static void complete_jseg(struct jseg *); 770static void complete_jsegs(struct jseg *); 771static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *); 772static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *); 773static void jremref_write(struct jremref *, struct jseg *, uint8_t *); 774static void jmvref_write(struct jmvref *, struct jseg *, uint8_t *); 775static void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *); 776static void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data); 777static void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *); 778static void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *); 779static void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *); 780static inline void inoref_write(struct inoref *, struct jseg *, 781 struct jrefrec *); 782static void handle_allocdirect_partdone(struct allocdirect *, 783 struct workhead *); 784static struct jnewblk *cancel_newblk(struct newblk *, struct worklist *, 785 struct workhead *); 786static void indirdep_complete(struct indirdep *); 787static int indirblk_lookup(struct mount *, ufs2_daddr_t); 788static void indirblk_insert(struct freework *); 789static void indirblk_remove(struct freework *); 790static void handle_allocindir_partdone(struct allocindir *); 791static void initiate_write_filepage(struct pagedep *, struct buf *); 792static void initiate_write_indirdep(struct indirdep*, struct buf *); 793static void handle_written_mkdir(struct mkdir *, int); 794static int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *, 795 uint8_t *); 796static void initiate_write_bmsafemap(struct bmsafemap *, struct buf *); 797static void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *); 798static void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *); 799static void handle_workitem_freefile(struct freefile *); 800static int handle_workitem_remove(struct dirrem *, int); 801static struct dirrem *newdirrem(struct buf *, struct inode *, 802 struct inode *, int, struct dirrem **); 803static struct indirdep *indirdep_lookup(struct mount *, struct inode *, 804 struct buf *); 805static void cancel_indirdep(struct indirdep *, struct buf *, 806 struct freeblks *); 807static void free_indirdep(struct indirdep *); 808static void free_diradd(struct diradd *, struct workhead *); 809static void merge_diradd(struct inodedep *, struct diradd *); 810static void complete_diradd(struct diradd *); 811static struct diradd *diradd_lookup(struct pagedep *, int); 812static struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *, 813 struct jremref *); 814static struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *, 815 struct jremref *); 816static void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *, 817 struct jremref *, struct jremref *); 818static void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *, 819 struct jremref *); 820static void cancel_allocindir(struct allocindir *, struct buf *bp, 821 struct freeblks *, int); 822static int setup_trunc_indir(struct freeblks *, struct inode *, 823 ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t); 824static void complete_trunc_indir(struct freework *); 825static void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *, 826 int); 827static void complete_mkdir(struct mkdir *); 828static void free_newdirblk(struct newdirblk *); 829static void free_jremref(struct jremref *); 830static void free_jaddref(struct jaddref *); 831static void free_jsegdep(struct jsegdep *); 832static void free_jsegs(struct jblocks *); 833static void rele_jseg(struct jseg *); 834static void free_jseg(struct jseg *, struct jblocks *); 835static void free_jnewblk(struct jnewblk *); 836static void free_jblkdep(struct jblkdep *); 837static void free_jfreefrag(struct jfreefrag *); 838static void free_freedep(struct freedep *); 839static void journal_jremref(struct dirrem *, struct jremref *, 840 struct inodedep *); 841static void cancel_jnewblk(struct jnewblk *, struct workhead *); 842static int cancel_jaddref(struct jaddref *, struct inodedep *, 843 struct workhead *); 844static void cancel_jfreefrag(struct jfreefrag *); 845static inline void setup_freedirect(struct freeblks *, struct inode *, 846 int, int); 847static inline void setup_freeext(struct freeblks *, struct inode *, int, int); 848static inline void setup_freeindir(struct freeblks *, struct inode *, int, 849 ufs_lbn_t, int); 850static inline struct freeblks *newfreeblks(struct mount *, struct inode *); 851static void freeblks_free(struct ufsmount *, struct freeblks *, int); 852static void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t); 853static ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t); 854static int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int); 855static void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t, 856 int, int); 857static void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int); 858static int cancel_pagedep(struct pagedep *, struct freeblks *, int); 859static int deallocate_dependencies(struct buf *, struct freeblks *, int); 860static void newblk_freefrag(struct newblk*); 861static void free_newblk(struct newblk *); 862static void cancel_allocdirect(struct allocdirectlst *, 863 struct allocdirect *, struct freeblks *); 864static int check_inode_unwritten(struct inodedep *); 865static int free_inodedep(struct inodedep *); 866static void freework_freeblock(struct freework *); 867static void freework_enqueue(struct freework *); 868static int handle_workitem_freeblocks(struct freeblks *, int); 869static int handle_complete_freeblocks(struct freeblks *, int); 870static void handle_workitem_indirblk(struct freework *); 871static void handle_written_freework(struct freework *); 872static void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *); 873static struct worklist *jnewblk_merge(struct worklist *, struct worklist *, 874 struct workhead *); 875static struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *, 876 struct inodedep *, struct allocindir *, ufs_lbn_t); 877static struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t, 878 ufs2_daddr_t, ufs_lbn_t); 879static void handle_workitem_freefrag(struct freefrag *); 880static struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long, 881 ufs_lbn_t); 882static void allocdirect_merge(struct allocdirectlst *, 883 struct allocdirect *, struct allocdirect *); 884static struct freefrag *allocindir_merge(struct allocindir *, 885 struct allocindir *); 886static int bmsafemap_find(struct bmsafemap_hashhead *, int, 887 struct bmsafemap **); 888static struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *, 889 int cg, struct bmsafemap *); 890static int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int, 891 struct newblk **); 892static int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **); 893static int inodedep_find(struct inodedep_hashhead *, ino_t, 894 struct inodedep **); 895static int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **); 896static int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t, 897 int, struct pagedep **); 898static int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t, 899 struct pagedep **); 900static void pause_timer(void *); 901static int request_cleanup(struct mount *, int); 902static int process_worklist_item(struct mount *, int, int); 903static void process_removes(struct vnode *); 904static void process_truncates(struct vnode *); 905static void jwork_move(struct workhead *, struct workhead *); 906static void jwork_insert(struct workhead *, struct jsegdep *); 907static void add_to_worklist(struct worklist *, int); 908static void wake_worklist(struct worklist *); 909static void wait_worklist(struct worklist *, char *); 910static void remove_from_worklist(struct worklist *); 911static void softdep_flush(void); 912static void softdep_flushjournal(struct mount *); 913static int softdep_speedup(void); 914static void worklist_speedup(struct mount *); 915static int journal_mount(struct mount *, struct fs *, struct ucred *); 916static void journal_unmount(struct ufsmount *); 917static int journal_space(struct ufsmount *, int); 918static void journal_suspend(struct ufsmount *); 919static int journal_unsuspend(struct ufsmount *ump); 920static void softdep_prelink(struct vnode *, struct vnode *); 921static void add_to_journal(struct worklist *); 922static void remove_from_journal(struct worklist *); 923static void softdep_process_journal(struct mount *, struct worklist *, int); 924static struct jremref *newjremref(struct dirrem *, struct inode *, 925 struct inode *ip, off_t, nlink_t); 926static struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t, 927 uint16_t); 928static inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t, 929 uint16_t); 930static inline struct jsegdep *inoref_jseg(struct inoref *); 931static struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t); 932static struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t, 933 ufs2_daddr_t, int); 934static void adjust_newfreework(struct freeblks *, int); 935static struct jtrunc *newjtrunc(struct freeblks *, off_t, int); 936static void move_newblock_dep(struct jaddref *, struct inodedep *); 937static void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t); 938static struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *, 939 ufs2_daddr_t, long, ufs_lbn_t); 940static struct freework *newfreework(struct ufsmount *, struct freeblks *, 941 struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int); 942static int jwait(struct worklist *, int); 943static struct inodedep *inodedep_lookup_ip(struct inode *); 944static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *); 945static struct freefile *handle_bufwait(struct inodedep *, struct workhead *); 946static void handle_jwork(struct workhead *); 947static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *, 948 struct mkdir **); 949static struct jblocks *jblocks_create(void); 950static ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *); 951static void jblocks_free(struct jblocks *, struct mount *, int); 952static void jblocks_destroy(struct jblocks *); 953static void jblocks_add(struct jblocks *, ufs2_daddr_t, int); 954 955/* 956 * Exported softdep operations. 957 */ 958static void softdep_disk_io_initiation(struct buf *); 959static void softdep_disk_write_complete(struct buf *); 960static void softdep_deallocate_dependencies(struct buf *); 961static int softdep_count_dependencies(struct buf *bp, int); 962 963/* 964 * Global lock over all of soft updates. 965 */ 966static struct rwlock lk; 967RW_SYSINIT(softdep_lock, &lk, "Softdep Lock"); 968 969/* 970 * Allow per-filesystem soft-updates locking. 971 * For now all use the same global lock defined above. 972 */ 973#define LOCK_PTR(ump) ((ump)->um_softdep->sd_fslock) 974#define TRY_ACQUIRE_LOCK(ump) rw_try_wlock((ump)->um_softdep->sd_fslock) 975#define ACQUIRE_LOCK(ump) rw_wlock((ump)->um_softdep->sd_fslock) 976#define FREE_LOCK(ump) rw_wunlock((ump)->um_softdep->sd_fslock) 977#define LOCK_OWNED(ump) rw_assert((ump)->um_softdep->sd_fslock, \ 978 RA_WLOCKED) 979 980#define BUF_AREC(bp) lockallowrecurse(&(bp)->b_lock) 981#define BUF_NOREC(bp) lockdisablerecurse(&(bp)->b_lock) 982 983/* 984 * Worklist queue management. 985 * These routines require that the lock be held. 986 */ 987#ifndef /* NOT */ DEBUG 988#define WORKLIST_INSERT(head, item) do { \ 989 (item)->wk_state |= ONWORKLIST; \ 990 LIST_INSERT_HEAD(head, item, wk_list); \ 991} while (0) 992#define WORKLIST_REMOVE(item) do { \ 993 (item)->wk_state &= ~ONWORKLIST; \ 994 LIST_REMOVE(item, wk_list); \ 995} while (0) 996#define WORKLIST_INSERT_UNLOCKED WORKLIST_INSERT 997#define WORKLIST_REMOVE_UNLOCKED WORKLIST_REMOVE 998 999#else /* DEBUG */ 1000static void worklist_insert(struct workhead *, struct worklist *, int); 1001static void worklist_remove(struct worklist *, int); 1002 1003#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1) 1004#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0) 1005#define WORKLIST_REMOVE(item) worklist_remove(item, 1) 1006#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0) 1007 1008static void 1009worklist_insert(head, item, locked) 1010 struct workhead *head; 1011 struct worklist *item; 1012 int locked; 1013{ 1014 1015 if (locked) 1016 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1017 if (item->wk_state & ONWORKLIST) 1018 panic("worklist_insert: %p %s(0x%X) already on list", 1019 item, TYPENAME(item->wk_type), item->wk_state); 1020 item->wk_state |= ONWORKLIST; 1021 LIST_INSERT_HEAD(head, item, wk_list); 1022} 1023 1024static void 1025worklist_remove(item, locked) 1026 struct worklist *item; 1027 int locked; 1028{ 1029 1030 if (locked) 1031 LOCK_OWNED(VFSTOUFS(item->wk_mp)); 1032 if ((item->wk_state & ONWORKLIST) == 0) 1033 panic("worklist_remove: %p %s(0x%X) not on list", 1034 item, TYPENAME(item->wk_type), item->wk_state); 1035 item->wk_state &= ~ONWORKLIST; 1036 LIST_REMOVE(item, wk_list); 1037} 1038#endif /* DEBUG */ 1039 1040/* 1041 * Merge two jsegdeps keeping only the oldest one as newer references 1042 * can't be discarded until after older references. 1043 */ 1044static inline struct jsegdep * 1045jsegdep_merge(struct jsegdep *one, struct jsegdep *two) 1046{ 1047 struct jsegdep *swp; 1048 1049 if (two == NULL) 1050 return (one); 1051 1052 if (one->jd_seg->js_seq > two->jd_seg->js_seq) { 1053 swp = one; 1054 one = two; 1055 two = swp; 1056 } 1057 WORKLIST_REMOVE(&two->jd_list); 1058 free_jsegdep(two); 1059 1060 return (one); 1061} 1062 1063/* 1064 * If two freedeps are compatible free one to reduce list size. 1065 */ 1066static inline struct freedep * 1067freedep_merge(struct freedep *one, struct freedep *two) 1068{ 1069 if (two == NULL) 1070 return (one); 1071 1072 if (one->fd_freework == two->fd_freework) { 1073 WORKLIST_REMOVE(&two->fd_list); 1074 free_freedep(two); 1075 } 1076 return (one); 1077} 1078 1079/* 1080 * Move journal work from one list to another. Duplicate freedeps and 1081 * jsegdeps are coalesced to keep the lists as small as possible. 1082 */ 1083static void 1084jwork_move(dst, src) 1085 struct workhead *dst; 1086 struct workhead *src; 1087{ 1088 struct freedep *freedep; 1089 struct jsegdep *jsegdep; 1090 struct worklist *wkn; 1091 struct worklist *wk; 1092 1093 KASSERT(dst != src, 1094 ("jwork_move: dst == src")); 1095 freedep = NULL; 1096 jsegdep = NULL; 1097 LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) { 1098 if (wk->wk_type == D_JSEGDEP) 1099 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1100 if (wk->wk_type == D_FREEDEP) 1101 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1102 } 1103 1104 while ((wk = LIST_FIRST(src)) != NULL) { 1105 WORKLIST_REMOVE(wk); 1106 WORKLIST_INSERT(dst, wk); 1107 if (wk->wk_type == D_JSEGDEP) { 1108 jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep); 1109 continue; 1110 } 1111 if (wk->wk_type == D_FREEDEP) 1112 freedep = freedep_merge(WK_FREEDEP(wk), freedep); 1113 } 1114} 1115 1116static void 1117jwork_insert(dst, jsegdep) 1118 struct workhead *dst; 1119 struct jsegdep *jsegdep; 1120{ 1121 struct jsegdep *jsegdepn; 1122 struct worklist *wk; 1123 1124 LIST_FOREACH(wk, dst, wk_list) 1125 if (wk->wk_type == D_JSEGDEP) 1126 break; 1127 if (wk == NULL) { 1128 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1129 return; 1130 } 1131 jsegdepn = WK_JSEGDEP(wk); 1132 if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) { 1133 WORKLIST_REMOVE(wk); 1134 free_jsegdep(jsegdepn); 1135 WORKLIST_INSERT(dst, &jsegdep->jd_list); 1136 } else 1137 free_jsegdep(jsegdep); 1138} 1139 1140/* 1141 * Routines for tracking and managing workitems. 1142 */ 1143static void workitem_free(struct worklist *, int); 1144static void workitem_alloc(struct worklist *, int, struct mount *); 1145static void workitem_reassign(struct worklist *, int); 1146 1147#define WORKITEM_FREE(item, type) \ 1148 workitem_free((struct worklist *)(item), (type)) 1149#define WORKITEM_REASSIGN(item, type) \ 1150 workitem_reassign((struct worklist *)(item), (type)) 1151 1152static void 1153workitem_free(item, type) 1154 struct worklist *item; 1155 int type; 1156{ 1157 struct ufsmount *ump; 1158 1159#ifdef DEBUG 1160 if (item->wk_state & ONWORKLIST) 1161 panic("workitem_free: %s(0x%X) still on list", 1162 TYPENAME(item->wk_type), item->wk_state); 1163 if (item->wk_type != type && type != D_NEWBLK) 1164 panic("workitem_free: type mismatch %s != %s", 1165 TYPENAME(item->wk_type), TYPENAME(type)); 1166#endif 1167 if (item->wk_state & IOWAITING) 1168 wakeup(item); 1169 ump = VFSTOUFS(item->wk_mp); 1170 LOCK_OWNED(ump); 1171 KASSERT(ump->softdep_deps > 0, 1172 ("workitem_free: %s: softdep_deps going negative", 1173 ump->um_fs->fs_fsmnt)); 1174 if (--ump->softdep_deps == 0 && ump->softdep_req) 1175 wakeup(&ump->softdep_deps); 1176 KASSERT(dep_current[item->wk_type] > 0, 1177 ("workitem_free: %s: dep_current[%s] going negative", 1178 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1179 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1180 ("workitem_free: %s: softdep_curdeps[%s] going negative", 1181 ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1182 dep_current[item->wk_type]--; 1183 ump->softdep_curdeps[item->wk_type] -= 1; 1184 free(item, DtoM(type)); 1185} 1186 1187static void 1188workitem_alloc(item, type, mp) 1189 struct worklist *item; 1190 int type; 1191 struct mount *mp; 1192{ 1193 struct ufsmount *ump; 1194 1195 item->wk_type = type; 1196 item->wk_mp = mp; 1197 item->wk_state = 0; 1198 1199 ump = VFSTOUFS(mp); 1200 ACQUIRE_LOCK(ump); 1201 dep_current[type]++; 1202 if (dep_current[type] > dep_highuse[type]) 1203 dep_highuse[type] = dep_current[type]; 1204 dep_total[type]++; 1205 ump->softdep_curdeps[type] += 1; 1206 ump->softdep_deps++; 1207 ump->softdep_accdeps++; 1208 FREE_LOCK(ump); 1209} 1210 1211static void 1212workitem_reassign(item, newtype) 1213 struct worklist *item; 1214 int newtype; 1215{ 1216 struct ufsmount *ump; 1217 1218 ump = VFSTOUFS(item->wk_mp); 1219 LOCK_OWNED(ump); 1220 KASSERT(ump->softdep_curdeps[item->wk_type] > 0, 1221 ("workitem_reassign: %s: softdep_curdeps[%s] going negative", 1222 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1223 ump->softdep_curdeps[item->wk_type] -= 1; 1224 ump->softdep_curdeps[newtype] += 1; 1225 KASSERT(dep_current[item->wk_type] > 0, 1226 ("workitem_reassign: %s: dep_current[%s] going negative", 1227 VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type))); 1228 dep_current[item->wk_type]--; 1229 dep_current[newtype]++; 1230 if (dep_current[newtype] > dep_highuse[newtype]) 1231 dep_highuse[newtype] = dep_current[newtype]; 1232 dep_total[newtype]++; 1233 item->wk_type = newtype; 1234} 1235 1236/* 1237 * Workitem queue management 1238 */ 1239static int max_softdeps; /* maximum number of structs before slowdown */ 1240static int maxindirdeps = 50; /* max number of indirdeps before slowdown */ 1241static int tickdelay = 2; /* number of ticks to pause during slowdown */ 1242static int proc_waiting; /* tracks whether we have a timeout posted */ 1243static int *stat_countp; /* statistic to count in proc_waiting timeout */ 1244static struct callout softdep_callout; 1245static struct mount *req_pending; 1246#define ALLCLEAN ((struct mount *)-1) 1247static int req_clear_inodedeps; /* syncer process flush some inodedeps */ 1248static int req_clear_remove; /* syncer process flush some freeblks */ 1249static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */ 1250 1251/* 1252 * runtime statistics 1253 */ 1254static int stat_softdep_mounts; /* number of softdep mounted filesystems */ 1255static int stat_worklist_push; /* number of worklist cleanups */ 1256static int stat_blk_limit_push; /* number of times block limit neared */ 1257static int stat_ino_limit_push; /* number of times inode limit neared */ 1258static int stat_blk_limit_hit; /* number of times block slowdown imposed */ 1259static int stat_ino_limit_hit; /* number of times inode slowdown imposed */ 1260static int stat_sync_limit_hit; /* number of synchronous slowdowns imposed */ 1261static int stat_indir_blk_ptrs; /* bufs redirtied as indir ptrs not written */ 1262static int stat_inode_bitmap; /* bufs redirtied as inode bitmap not written */ 1263static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */ 1264static int stat_dir_entry; /* bufs redirtied as dir entry cannot write */ 1265static int stat_jaddref; /* bufs redirtied as ino bitmap can not write */ 1266static int stat_jnewblk; /* bufs redirtied as blk bitmap can not write */ 1267static int stat_journal_min; /* Times hit journal min threshold */ 1268static int stat_journal_low; /* Times hit journal low threshold */ 1269static int stat_journal_wait; /* Times blocked in jwait(). */ 1270static int stat_jwait_filepage; /* Times blocked in jwait() for filepage. */ 1271static int stat_jwait_freeblks; /* Times blocked in jwait() for freeblks. */ 1272static int stat_jwait_inode; /* Times blocked in jwait() for inodes. */ 1273static int stat_jwait_newblk; /* Times blocked in jwait() for newblks. */ 1274static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */ 1275static int stat_cleanup_blkrequests; /* Number of block cleanup requests */ 1276static int stat_cleanup_inorequests; /* Number of inode cleanup requests */ 1277static int stat_cleanup_retries; /* Number of cleanups that needed to flush */ 1278static int stat_cleanup_failures; /* Number of cleanup requests that failed */ 1279static int stat_emptyjblocks; /* Number of potentially empty journal blocks */ 1280 1281SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW, 1282 &max_softdeps, 0, ""); 1283SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW, 1284 &tickdelay, 0, ""); 1285SYSCTL_INT(_debug_softdep, OID_AUTO, maxindirdeps, CTLFLAG_RW, 1286 &maxindirdeps, 0, ""); 1287SYSCTL_INT(_debug_softdep, OID_AUTO, softdep_mounts, CTLFLAG_RD, 1288 &stat_softdep_mounts, 0, ""); 1289SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW, 1290 &stat_worklist_push, 0,""); 1291SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW, 1292 &stat_blk_limit_push, 0,""); 1293SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW, 1294 &stat_ino_limit_push, 0,""); 1295SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW, 1296 &stat_blk_limit_hit, 0, ""); 1297SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW, 1298 &stat_ino_limit_hit, 0, ""); 1299SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW, 1300 &stat_sync_limit_hit, 0, ""); 1301SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, 1302 &stat_indir_blk_ptrs, 0, ""); 1303SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW, 1304 &stat_inode_bitmap, 0, ""); 1305SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, 1306 &stat_direct_blk_ptrs, 0, ""); 1307SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW, 1308 &stat_dir_entry, 0, ""); 1309SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW, 1310 &stat_jaddref, 0, ""); 1311SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW, 1312 &stat_jnewblk, 0, ""); 1313SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW, 1314 &stat_journal_low, 0, ""); 1315SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW, 1316 &stat_journal_min, 0, ""); 1317SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW, 1318 &stat_journal_wait, 0, ""); 1319SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW, 1320 &stat_jwait_filepage, 0, ""); 1321SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW, 1322 &stat_jwait_freeblks, 0, ""); 1323SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW, 1324 &stat_jwait_inode, 0, ""); 1325SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW, 1326 &stat_jwait_newblk, 0, ""); 1327SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW, 1328 &stat_cleanup_blkrequests, 0, ""); 1329SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW, 1330 &stat_cleanup_inorequests, 0, ""); 1331SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW, 1332 &stat_cleanup_high_delay, 0, ""); 1333SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW, 1334 &stat_cleanup_retries, 0, ""); 1335SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW, 1336 &stat_cleanup_failures, 0, ""); 1337SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW, 1338 &softdep_flushcache, 0, ""); 1339SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD, 1340 &stat_emptyjblocks, 0, ""); 1341 1342SYSCTL_DECL(_vfs_ffs); 1343 1344/* Whether to recompute the summary at mount time */ 1345static int compute_summary_at_mount = 0; 1346SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW, 1347 &compute_summary_at_mount, 0, "Recompute summary at mount"); 1348static struct proc *softdepproc; 1349static struct kproc_desc softdep_kp = { 1350 "softdepflush", 1351 softdep_flush, 1352 &softdepproc 1353}; 1354SYSINIT(sdproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, 1355 &softdep_kp); 1356 1357static void 1358softdep_flush(void) 1359{ 1360 struct mount *nmp; 1361 struct mount *mp; 1362 struct ufsmount *ump; 1363 struct thread *td; 1364 int remaining; 1365 int progress; 1366 1367 td = curthread; 1368 td->td_pflags |= TDP_NORUNNINGBUF; 1369 1370 for (;;) { 1371 kproc_suspend_check(softdepproc); 1372 remaining = progress = 0; 1373 mtx_lock(&mountlist_mtx); 1374 for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { 1375 nmp = TAILQ_NEXT(mp, mnt_list); 1376 if (MOUNTEDSOFTDEP(mp) == 0) 1377 continue; 1378 if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) 1379 continue; 1380 ump = VFSTOUFS(mp); 1381 progress += softdep_process_worklist(mp, 0); 1382 remaining += ump->softdep_on_worklist; 1383 mtx_lock(&mountlist_mtx); 1384 nmp = TAILQ_NEXT(mp, mnt_list); 1385 vfs_unbusy(mp); 1386 } 1387 mtx_unlock(&mountlist_mtx); 1388 if (remaining && progress) 1389 continue; 1390 rw_wlock(&lk); 1391 if (req_pending == NULL) 1392 msleep(&req_pending, &lk, PVM, "sdflush", hz); 1393 req_pending = NULL; 1394 rw_wunlock(&lk); 1395 } 1396} 1397 1398static void 1399worklist_speedup(mp) 1400 struct mount *mp; 1401{ 1402 rw_assert(&lk, RA_WLOCKED); 1403 if (req_pending == 0) { 1404 req_pending = mp; 1405 wakeup(&req_pending); 1406 } 1407} 1408 1409static int 1410softdep_speedup(void) 1411{ 1412 1413 worklist_speedup(ALLCLEAN); 1414 bd_speedup(); 1415 return (speedup_syncer()); 1416} 1417 1418/* 1419 * Add an item to the end of the work queue. 1420 * This routine requires that the lock be held. 1421 * This is the only routine that adds items to the list. 1422 * The following routine is the only one that removes items 1423 * and does so in order from first to last. 1424 */ 1425 1426#define WK_HEAD 0x0001 /* Add to HEAD. */ 1427#define WK_NODELAY 0x0002 /* Process immediately. */ 1428 1429static void 1430add_to_worklist(wk, flags) 1431 struct worklist *wk; 1432 int flags; 1433{ 1434 struct ufsmount *ump; 1435 1436 ump = VFSTOUFS(wk->wk_mp); 1437 LOCK_OWNED(ump); 1438 if (wk->wk_state & ONWORKLIST) 1439 panic("add_to_worklist: %s(0x%X) already on list", 1440 TYPENAME(wk->wk_type), wk->wk_state); 1441 wk->wk_state |= ONWORKLIST; 1442 if (ump->softdep_on_worklist == 0) { 1443 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1444 ump->softdep_worklist_tail = wk; 1445 } else if (flags & WK_HEAD) { 1446 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list); 1447 } else { 1448 LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list); 1449 ump->softdep_worklist_tail = wk; 1450 } 1451 ump->softdep_on_worklist += 1; 1452 if (flags & WK_NODELAY) 1453 worklist_speedup(wk->wk_mp); 1454} 1455 1456/* 1457 * Remove the item to be processed. If we are removing the last 1458 * item on the list, we need to recalculate the tail pointer. 1459 */ 1460static void 1461remove_from_worklist(wk) 1462 struct worklist *wk; 1463{ 1464 struct ufsmount *ump; 1465 1466 ump = VFSTOUFS(wk->wk_mp); 1467 WORKLIST_REMOVE(wk); 1468 if (ump->softdep_worklist_tail == wk) 1469 ump->softdep_worklist_tail = 1470 (struct worklist *)wk->wk_list.le_prev; 1471 ump->softdep_on_worklist -= 1; 1472} 1473 1474static void 1475wake_worklist(wk) 1476 struct worklist *wk; 1477{ 1478 if (wk->wk_state & IOWAITING) { 1479 wk->wk_state &= ~IOWAITING; 1480 wakeup(wk); 1481 } 1482} 1483 1484static void 1485wait_worklist(wk, wmesg) 1486 struct worklist *wk; 1487 char *wmesg; 1488{ 1489 struct ufsmount *ump; 1490 1491 ump = VFSTOUFS(wk->wk_mp); 1492 wk->wk_state |= IOWAITING; 1493 msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0); 1494} 1495 1496/* 1497 * Process that runs once per second to handle items in the background queue. 1498 * 1499 * Note that we ensure that everything is done in the order in which they 1500 * appear in the queue. The code below depends on this property to ensure 1501 * that blocks of a file are freed before the inode itself is freed. This 1502 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated 1503 * until all the old ones have been purged from the dependency lists. 1504 */ 1505static int 1506softdep_process_worklist(mp, full) 1507 struct mount *mp; 1508 int full; 1509{ 1510 int cnt, matchcnt; 1511 struct ufsmount *ump; 1512 long starttime; 1513 1514 KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp")); 1515 if (MOUNTEDSOFTDEP(mp) == 0) 1516 return (0); 1517 matchcnt = 0; 1518 ump = VFSTOUFS(mp); 1519 ACQUIRE_LOCK(ump); 1520 starttime = time_second; 1521 softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0); 1522 check_clear_deps(mp); 1523 while (ump->softdep_on_worklist > 0) { 1524 if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0) 1525 break; 1526 else 1527 matchcnt += cnt; 1528 check_clear_deps(mp); 1529 /* 1530 * We do not generally want to stop for buffer space, but if 1531 * we are really being a buffer hog, we will stop and wait. 1532 */ 1533 if (should_yield()) { 1534 FREE_LOCK(ump); 1535 kern_yield(PRI_USER); 1536 bwillwrite(); 1537 ACQUIRE_LOCK(ump); 1538 } 1539 /* 1540 * Never allow processing to run for more than one 1541 * second. This gives the syncer thread the opportunity 1542 * to pause if appropriate. 1543 */ 1544 if (!full && starttime != time_second) 1545 break; 1546 } 1547 if (full == 0) 1548 journal_unsuspend(ump); 1549 FREE_LOCK(ump); 1550 return (matchcnt); 1551} 1552 1553/* 1554 * Process all removes associated with a vnode if we are running out of 1555 * journal space. Any other process which attempts to flush these will 1556 * be unable as we have the vnodes locked. 1557 */ 1558static void 1559process_removes(vp) 1560 struct vnode *vp; 1561{ 1562 struct inodedep *inodedep; 1563 struct dirrem *dirrem; 1564 struct ufsmount *ump; 1565 struct mount *mp; 1566 ino_t inum; 1567 1568 mp = vp->v_mount; 1569 ump = VFSTOUFS(mp); 1570 LOCK_OWNED(ump); 1571 inum = VTOI(vp)->i_number; 1572 for (;;) { 1573top: 1574 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1575 return; 1576 LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) { 1577 /* 1578 * If another thread is trying to lock this vnode 1579 * it will fail but we must wait for it to do so 1580 * before we can proceed. 1581 */ 1582 if (dirrem->dm_state & INPROGRESS) { 1583 wait_worklist(&dirrem->dm_list, "pwrwait"); 1584 goto top; 1585 } 1586 if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) == 1587 (COMPLETE | ONWORKLIST)) 1588 break; 1589 } 1590 if (dirrem == NULL) 1591 return; 1592 remove_from_worklist(&dirrem->dm_list); 1593 FREE_LOCK(ump); 1594 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1595 panic("process_removes: suspended filesystem"); 1596 handle_workitem_remove(dirrem, 0); 1597 vn_finished_secondary_write(mp); 1598 ACQUIRE_LOCK(ump); 1599 } 1600} 1601 1602/* 1603 * Process all truncations associated with a vnode if we are running out 1604 * of journal space. This is called when the vnode lock is already held 1605 * and no other process can clear the truncation. This function returns 1606 * a value greater than zero if it did any work. 1607 */ 1608static void 1609process_truncates(vp) 1610 struct vnode *vp; 1611{ 1612 struct inodedep *inodedep; 1613 struct freeblks *freeblks; 1614 struct ufsmount *ump; 1615 struct mount *mp; 1616 ino_t inum; 1617 int cgwait; 1618 1619 mp = vp->v_mount; 1620 ump = VFSTOUFS(mp); 1621 LOCK_OWNED(ump); 1622 inum = VTOI(vp)->i_number; 1623 for (;;) { 1624 if (inodedep_lookup(mp, inum, 0, &inodedep) == 0) 1625 return; 1626 cgwait = 0; 1627 TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) { 1628 /* Journal entries not yet written. */ 1629 if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) { 1630 jwait(&LIST_FIRST( 1631 &freeblks->fb_jblkdephd)->jb_list, 1632 MNT_WAIT); 1633 break; 1634 } 1635 /* Another thread is executing this item. */ 1636 if (freeblks->fb_state & INPROGRESS) { 1637 wait_worklist(&freeblks->fb_list, "ptrwait"); 1638 break; 1639 } 1640 /* Freeblks is waiting on a inode write. */ 1641 if ((freeblks->fb_state & COMPLETE) == 0) { 1642 FREE_LOCK(ump); 1643 ffs_update(vp, 1); 1644 ACQUIRE_LOCK(ump); 1645 break; 1646 } 1647 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) == 1648 (ALLCOMPLETE | ONWORKLIST)) { 1649 remove_from_worklist(&freeblks->fb_list); 1650 freeblks->fb_state |= INPROGRESS; 1651 FREE_LOCK(ump); 1652 if (vn_start_secondary_write(NULL, &mp, 1653 V_NOWAIT)) 1654 panic("process_truncates: " 1655 "suspended filesystem"); 1656 handle_workitem_freeblocks(freeblks, 0); 1657 vn_finished_secondary_write(mp); 1658 ACQUIRE_LOCK(ump); 1659 break; 1660 } 1661 if (freeblks->fb_cgwait) 1662 cgwait++; 1663 } 1664 if (cgwait) { 1665 FREE_LOCK(ump); 1666 sync_cgs(mp, MNT_WAIT); 1667 ffs_sync_snap(mp, MNT_WAIT); 1668 ACQUIRE_LOCK(ump); 1669 continue; 1670 } 1671 if (freeblks == NULL) 1672 break; 1673 } 1674 return; 1675} 1676 1677/* 1678 * Process one item on the worklist. 1679 */ 1680static int 1681process_worklist_item(mp, target, flags) 1682 struct mount *mp; 1683 int target; 1684 int flags; 1685{ 1686 struct worklist sentinel; 1687 struct worklist *wk; 1688 struct ufsmount *ump; 1689 int matchcnt; 1690 int error; 1691 1692 KASSERT(mp != NULL, ("process_worklist_item: NULL mp")); 1693 /* 1694 * If we are being called because of a process doing a 1695 * copy-on-write, then it is not safe to write as we may 1696 * recurse into the copy-on-write routine. 1697 */ 1698 if (curthread->td_pflags & TDP_COWINPROGRESS) 1699 return (-1); 1700 PHOLD(curproc); /* Don't let the stack go away. */ 1701 ump = VFSTOUFS(mp); 1702 LOCK_OWNED(ump); 1703 matchcnt = 0; 1704 sentinel.wk_mp = NULL; 1705 sentinel.wk_type = D_SENTINEL; 1706 LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list); 1707 for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL; 1708 wk = LIST_NEXT(&sentinel, wk_list)) { 1709 if (wk->wk_type == D_SENTINEL) { 1710 LIST_REMOVE(&sentinel, wk_list); 1711 LIST_INSERT_AFTER(wk, &sentinel, wk_list); 1712 continue; 1713 } 1714 if (wk->wk_state & INPROGRESS) 1715 panic("process_worklist_item: %p already in progress.", 1716 wk); 1717 wk->wk_state |= INPROGRESS; 1718 remove_from_worklist(wk); 1719 FREE_LOCK(ump); 1720 if (vn_start_secondary_write(NULL, &mp, V_NOWAIT)) 1721 panic("process_worklist_item: suspended filesystem"); 1722 switch (wk->wk_type) { 1723 case D_DIRREM: 1724 /* removal of a directory entry */ 1725 error = handle_workitem_remove(WK_DIRREM(wk), flags); 1726 break; 1727 1728 case D_FREEBLKS: 1729 /* releasing blocks and/or fragments from a file */ 1730 error = handle_workitem_freeblocks(WK_FREEBLKS(wk), 1731 flags); 1732 break; 1733 1734 case D_FREEFRAG: 1735 /* releasing a fragment when replaced as a file grows */ 1736 handle_workitem_freefrag(WK_FREEFRAG(wk)); 1737 error = 0; 1738 break; 1739 1740 case D_FREEFILE: 1741 /* releasing an inode when its link count drops to 0 */ 1742 handle_workitem_freefile(WK_FREEFILE(wk)); 1743 error = 0; 1744 break; 1745 1746 default: 1747 panic("%s_process_worklist: Unknown type %s", 1748 "softdep", TYPENAME(wk->wk_type)); 1749 /* NOTREACHED */ 1750 } 1751 vn_finished_secondary_write(mp); 1752 ACQUIRE_LOCK(ump); 1753 if (error == 0) { 1754 if (++matchcnt == target) 1755 break; 1756 continue; 1757 } 1758 /* 1759 * We have to retry the worklist item later. Wake up any 1760 * waiters who may be able to complete it immediately and 1761 * add the item back to the head so we don't try to execute 1762 * it again. 1763 */ 1764 wk->wk_state &= ~INPROGRESS; 1765 wake_worklist(wk); 1766 add_to_worklist(wk, WK_HEAD); 1767 } 1768 LIST_REMOVE(&sentinel, wk_list); 1769 /* Sentinal could've become the tail from remove_from_worklist. */ 1770 if (ump->softdep_worklist_tail == &sentinel) 1771 ump->softdep_worklist_tail = 1772 (struct worklist *)sentinel.wk_list.le_prev; 1773 PRELE(curproc); 1774 return (matchcnt); 1775} 1776 1777/* 1778 * Move dependencies from one buffer to another. 1779 */ 1780int 1781softdep_move_dependencies(oldbp, newbp) 1782 struct buf *oldbp; 1783 struct buf *newbp; 1784{ 1785 struct worklist *wk, *wktail; 1786 struct ufsmount *ump; 1787 int dirty; 1788 1789 if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL) 1790 return (0); 1791 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 1792 ("softdep_move_dependencies called on non-softdep filesystem")); 1793 dirty = 0; 1794 wktail = NULL; 1795 ump = VFSTOUFS(wk->wk_mp); 1796 ACQUIRE_LOCK(ump); 1797 while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) { 1798 LIST_REMOVE(wk, wk_list); 1799 if (wk->wk_type == D_BMSAFEMAP && 1800 bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp)) 1801 dirty = 1; 1802 if (wktail == 0) 1803 LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list); 1804 else 1805 LIST_INSERT_AFTER(wktail, wk, wk_list); 1806 wktail = wk; 1807 } 1808 FREE_LOCK(ump); 1809 1810 return (dirty); 1811} 1812 1813/* 1814 * Purge the work list of all items associated with a particular mount point. 1815 */ 1816int 1817softdep_flushworklist(oldmnt, countp, td) 1818 struct mount *oldmnt; 1819 int *countp; 1820 struct thread *td; 1821{ 1822 struct vnode *devvp; 1823 int count, error = 0; 1824 struct ufsmount *ump; 1825 1826 /* 1827 * Alternately flush the block device associated with the mount 1828 * point and process any dependencies that the flushing 1829 * creates. We continue until no more worklist dependencies 1830 * are found. 1831 */ 1832 *countp = 0; 1833 ump = VFSTOUFS(oldmnt); 1834 devvp = ump->um_devvp; 1835 while ((count = softdep_process_worklist(oldmnt, 1)) > 0) { 1836 *countp += count; 1837 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 1838 error = VOP_FSYNC(devvp, MNT_WAIT, td); 1839 VOP_UNLOCK(devvp, 0); 1840 if (error) 1841 break; 1842 } 1843 return (error); 1844} 1845 1846static int 1847softdep_waitidle(struct mount *mp) 1848{ 1849 struct ufsmount *ump; 1850 int error; 1851 int i; 1852 1853 ump = VFSTOUFS(mp); 1854 ACQUIRE_LOCK(ump); 1855 for (i = 0; i < 10 && ump->softdep_deps; i++) { 1856 ump->softdep_req = 1; 1857 if (ump->softdep_on_worklist) 1858 panic("softdep_waitidle: work added after flush."); 1859 msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM, "softdeps", 1); 1860 } 1861 ump->softdep_req = 0; 1862 FREE_LOCK(ump); 1863 error = 0; 1864 if (i == 10) { 1865 error = EBUSY; 1866 printf("softdep_waitidle: Failed to flush worklist for %p\n", 1867 mp); 1868 } 1869 1870 return (error); 1871} 1872 1873/* 1874 * Flush all vnodes and worklist items associated with a specified mount point. 1875 */ 1876int 1877softdep_flushfiles(oldmnt, flags, td) 1878 struct mount *oldmnt; 1879 int flags; 1880 struct thread *td; 1881{ 1882#ifdef QUOTA 1883 struct ufsmount *ump; 1884 int i; 1885#endif 1886 int error, early, depcount, loopcnt, retry_flush_count, retry; 1887 int morework; 1888 1889 KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0, 1890 ("softdep_flushfiles called on non-softdep filesystem")); 1891 loopcnt = 10; 1892 retry_flush_count = 3; 1893retry_flush: 1894 error = 0; 1895 1896 /* 1897 * Alternately flush the vnodes associated with the mount 1898 * point and process any dependencies that the flushing 1899 * creates. In theory, this loop can happen at most twice, 1900 * but we give it a few extra just to be sure. 1901 */ 1902 for (; loopcnt > 0; loopcnt--) { 1903 /* 1904 * Do another flush in case any vnodes were brought in 1905 * as part of the cleanup operations. 1906 */ 1907 early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag & 1908 MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH; 1909 if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0) 1910 break; 1911 if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 || 1912 depcount == 0) 1913 break; 1914 } 1915 /* 1916 * If we are unmounting then it is an error to fail. If we 1917 * are simply trying to downgrade to read-only, then filesystem 1918 * activity can keep us busy forever, so we just fail with EBUSY. 1919 */ 1920 if (loopcnt == 0) { 1921 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) 1922 panic("softdep_flushfiles: looping"); 1923 error = EBUSY; 1924 } 1925 if (!error) 1926 error = softdep_waitidle(oldmnt); 1927 if (!error) { 1928 if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) { 1929 retry = 0; 1930 MNT_ILOCK(oldmnt); 1931 KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0, 1932 ("softdep_flushfiles: !MNTK_NOINSMNTQ")); 1933 morework = oldmnt->mnt_nvnodelistsize > 0; 1934#ifdef QUOTA 1935 ump = VFSTOUFS(oldmnt); 1936 UFS_LOCK(ump); 1937 for (i = 0; i < MAXQUOTAS; i++) { 1938 if (ump->um_quotas[i] != NULLVP) 1939 morework = 1; 1940 } 1941 UFS_UNLOCK(ump); 1942#endif 1943 if (morework) { 1944 if (--retry_flush_count > 0) { 1945 retry = 1; 1946 loopcnt = 3; 1947 } else 1948 error = EBUSY; 1949 } 1950 MNT_IUNLOCK(oldmnt); 1951 if (retry) 1952 goto retry_flush; 1953 } 1954 } 1955 return (error); 1956} 1957 1958/* 1959 * Structure hashing. 1960 * 1961 * There are four types of structures that can be looked up: 1962 * 1) pagedep structures identified by mount point, inode number, 1963 * and logical block. 1964 * 2) inodedep structures identified by mount point and inode number. 1965 * 3) newblk structures identified by mount point and 1966 * physical block number. 1967 * 4) bmsafemap structures identified by mount point and 1968 * cylinder group number. 1969 * 1970 * The "pagedep" and "inodedep" dependency structures are hashed 1971 * separately from the file blocks and inodes to which they correspond. 1972 * This separation helps when the in-memory copy of an inode or 1973 * file block must be replaced. It also obviates the need to access 1974 * an inode or file page when simply updating (or de-allocating) 1975 * dependency structures. Lookup of newblk structures is needed to 1976 * find newly allocated blocks when trying to associate them with 1977 * their allocdirect or allocindir structure. 1978 * 1979 * The lookup routines optionally create and hash a new instance when 1980 * an existing entry is not found. The bmsafemap lookup routine always 1981 * allocates a new structure if an existing one is not found. 1982 */ 1983#define DEPALLOC 0x0001 /* allocate structure if lookup fails */ 1984#define NODELAY 0x0002 /* cannot do background work */ 1985 1986/* 1987 * Structures and routines associated with pagedep caching. 1988 */ 1989#define PAGEDEP_HASH(ump, inum, lbn) \ 1990 (&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size]) 1991 1992static int 1993pagedep_find(pagedephd, ino, lbn, pagedeppp) 1994 struct pagedep_hashhead *pagedephd; 1995 ino_t ino; 1996 ufs_lbn_t lbn; 1997 struct pagedep **pagedeppp; 1998{ 1999 struct pagedep *pagedep; 2000 2001 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 2002 if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) { 2003 *pagedeppp = pagedep; 2004 return (1); 2005 } 2006 } 2007 *pagedeppp = NULL; 2008 return (0); 2009} 2010/* 2011 * Look up a pagedep. Return 1 if found, 0 otherwise. 2012 * If not found, allocate if DEPALLOC flag is passed. 2013 * Found or allocated entry is returned in pagedeppp. 2014 * This routine must be called with splbio interrupts blocked. 2015 */ 2016static int 2017pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp) 2018 struct mount *mp; 2019 struct buf *bp; 2020 ino_t ino; 2021 ufs_lbn_t lbn; 2022 int flags; 2023 struct pagedep **pagedeppp; 2024{ 2025 struct pagedep *pagedep; 2026 struct pagedep_hashhead *pagedephd; 2027 struct worklist *wk; 2028 struct ufsmount *ump; 2029 int ret; 2030 int i; 2031 2032 ump = VFSTOUFS(mp); 2033 LOCK_OWNED(ump); 2034 if (bp) { 2035 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 2036 if (wk->wk_type == D_PAGEDEP) { 2037 *pagedeppp = WK_PAGEDEP(wk); 2038 return (1); 2039 } 2040 } 2041 } 2042 pagedephd = PAGEDEP_HASH(ump, ino, lbn); 2043 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2044 if (ret) { 2045 if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp) 2046 WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list); 2047 return (1); 2048 } 2049 if ((flags & DEPALLOC) == 0) 2050 return (0); 2051 FREE_LOCK(ump); 2052 pagedep = malloc(sizeof(struct pagedep), 2053 M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO); 2054 workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp); 2055 ACQUIRE_LOCK(ump); 2056 ret = pagedep_find(pagedephd, ino, lbn, pagedeppp); 2057 if (*pagedeppp) { 2058 /* 2059 * This should never happen since we only create pagedeps 2060 * with the vnode lock held. Could be an assert. 2061 */ 2062 WORKITEM_FREE(pagedep, D_PAGEDEP); 2063 return (ret); 2064 } 2065 pagedep->pd_ino = ino; 2066 pagedep->pd_lbn = lbn; 2067 LIST_INIT(&pagedep->pd_dirremhd); 2068 LIST_INIT(&pagedep->pd_pendinghd); 2069 for (i = 0; i < DAHASHSZ; i++) 2070 LIST_INIT(&pagedep->pd_diraddhd[i]); 2071 LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash); 2072 WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list); 2073 *pagedeppp = pagedep; 2074 return (0); 2075} 2076 2077/* 2078 * Structures and routines associated with inodedep caching. 2079 */ 2080#define INODEDEP_HASH(ump, inum) \ 2081 (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size]) 2082 2083static int 2084inodedep_find(inodedephd, inum, inodedeppp) 2085 struct inodedep_hashhead *inodedephd; 2086 ino_t inum; 2087 struct inodedep **inodedeppp; 2088{ 2089 struct inodedep *inodedep; 2090 2091 LIST_FOREACH(inodedep, inodedephd, id_hash) 2092 if (inum == inodedep->id_ino) 2093 break; 2094 if (inodedep) { 2095 *inodedeppp = inodedep; 2096 return (1); 2097 } 2098 *inodedeppp = NULL; 2099 2100 return (0); 2101} 2102/* 2103 * Look up an inodedep. Return 1 if found, 0 if not found. 2104 * If not found, allocate if DEPALLOC flag is passed. 2105 * Found or allocated entry is returned in inodedeppp. 2106 * This routine must be called with splbio interrupts blocked. 2107 */ 2108static int 2109inodedep_lookup(mp, inum, flags, inodedeppp) 2110 struct mount *mp; 2111 ino_t inum; 2112 int flags; 2113 struct inodedep **inodedeppp; 2114{ 2115 struct inodedep *inodedep; 2116 struct inodedep_hashhead *inodedephd; 2117 struct ufsmount *ump; 2118 struct fs *fs; 2119 2120 ump = VFSTOUFS(mp); 2121 LOCK_OWNED(ump); 2122 fs = ump->um_fs; 2123 inodedephd = INODEDEP_HASH(ump, inum); 2124 2125 if (inodedep_find(inodedephd, inum, inodedeppp)) 2126 return (1); 2127 if ((flags & DEPALLOC) == 0) 2128 return (0); 2129 /* 2130 * If we are over our limit, try to improve the situation. 2131 */ 2132 if (dep_current[D_INODEDEP] > max_softdeps && (flags & NODELAY) == 0) 2133 request_cleanup(mp, FLUSH_INODES); 2134 FREE_LOCK(ump); 2135 inodedep = malloc(sizeof(struct inodedep), 2136 M_INODEDEP, M_SOFTDEP_FLAGS); 2137 workitem_alloc(&inodedep->id_list, D_INODEDEP, mp); 2138 ACQUIRE_LOCK(ump); 2139 if (inodedep_find(inodedephd, inum, inodedeppp)) { 2140 WORKITEM_FREE(inodedep, D_INODEDEP); 2141 return (1); 2142 } 2143 inodedep->id_fs = fs; 2144 inodedep->id_ino = inum; 2145 inodedep->id_state = ALLCOMPLETE; 2146 inodedep->id_nlinkdelta = 0; 2147 inodedep->id_savedino1 = NULL; 2148 inodedep->id_savedsize = -1; 2149 inodedep->id_savedextsize = -1; 2150 inodedep->id_savednlink = -1; 2151 inodedep->id_bmsafemap = NULL; 2152 inodedep->id_mkdiradd = NULL; 2153 LIST_INIT(&inodedep->id_dirremhd); 2154 LIST_INIT(&inodedep->id_pendinghd); 2155 LIST_INIT(&inodedep->id_inowait); 2156 LIST_INIT(&inodedep->id_bufwait); 2157 TAILQ_INIT(&inodedep->id_inoreflst); 2158 TAILQ_INIT(&inodedep->id_inoupdt); 2159 TAILQ_INIT(&inodedep->id_newinoupdt); 2160 TAILQ_INIT(&inodedep->id_extupdt); 2161 TAILQ_INIT(&inodedep->id_newextupdt); 2162 TAILQ_INIT(&inodedep->id_freeblklst); 2163 LIST_INSERT_HEAD(inodedephd, inodedep, id_hash); 2164 *inodedeppp = inodedep; 2165 return (0); 2166} 2167 2168/* 2169 * Structures and routines associated with newblk caching. 2170 */ 2171#define NEWBLK_HASH(ump, inum) \ 2172 (&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size]) 2173 2174static int 2175newblk_find(newblkhd, newblkno, flags, newblkpp) 2176 struct newblk_hashhead *newblkhd; 2177 ufs2_daddr_t newblkno; 2178 int flags; 2179 struct newblk **newblkpp; 2180{ 2181 struct newblk *newblk; 2182 2183 LIST_FOREACH(newblk, newblkhd, nb_hash) { 2184 if (newblkno != newblk->nb_newblkno) 2185 continue; 2186 /* 2187 * If we're creating a new dependency don't match those that 2188 * have already been converted to allocdirects. This is for 2189 * a frag extend. 2190 */ 2191 if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK) 2192 continue; 2193 break; 2194 } 2195 if (newblk) { 2196 *newblkpp = newblk; 2197 return (1); 2198 } 2199 *newblkpp = NULL; 2200 return (0); 2201} 2202 2203/* 2204 * Look up a newblk. Return 1 if found, 0 if not found. 2205 * If not found, allocate if DEPALLOC flag is passed. 2206 * Found or allocated entry is returned in newblkpp. 2207 */ 2208static int 2209newblk_lookup(mp, newblkno, flags, newblkpp) 2210 struct mount *mp; 2211 ufs2_daddr_t newblkno; 2212 int flags; 2213 struct newblk **newblkpp; 2214{ 2215 struct newblk *newblk; 2216 struct newblk_hashhead *newblkhd; 2217 struct ufsmount *ump; 2218 2219 ump = VFSTOUFS(mp); 2220 LOCK_OWNED(ump); 2221 newblkhd = NEWBLK_HASH(ump, newblkno); 2222 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) 2223 return (1); 2224 if ((flags & DEPALLOC) == 0) 2225 return (0); 2226 FREE_LOCK(ump); 2227 newblk = malloc(sizeof(union allblk), M_NEWBLK, 2228 M_SOFTDEP_FLAGS | M_ZERO); 2229 workitem_alloc(&newblk->nb_list, D_NEWBLK, mp); 2230 ACQUIRE_LOCK(ump); 2231 if (newblk_find(newblkhd, newblkno, flags, newblkpp)) { 2232 WORKITEM_FREE(newblk, D_NEWBLK); 2233 return (1); 2234 } 2235 newblk->nb_freefrag = NULL; 2236 LIST_INIT(&newblk->nb_indirdeps); 2237 LIST_INIT(&newblk->nb_newdirblk); 2238 LIST_INIT(&newblk->nb_jwork); 2239 newblk->nb_state = ATTACHED; 2240 newblk->nb_newblkno = newblkno; 2241 LIST_INSERT_HEAD(newblkhd, newblk, nb_hash); 2242 *newblkpp = newblk; 2243 return (0); 2244} 2245 2246/* 2247 * Structures and routines associated with freed indirect block caching. 2248 */ 2249#define INDIR_HASH(ump, blkno) \ 2250 (&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size]) 2251 2252/* 2253 * Lookup an indirect block in the indir hash table. The freework is 2254 * removed and potentially freed. The caller must do a blocking journal 2255 * write before writing to the blkno. 2256 */ 2257static int 2258indirblk_lookup(mp, blkno) 2259 struct mount *mp; 2260 ufs2_daddr_t blkno; 2261{ 2262 struct freework *freework; 2263 struct indir_hashhead *wkhd; 2264 struct ufsmount *ump; 2265 2266 ump = VFSTOUFS(mp); 2267 wkhd = INDIR_HASH(ump, blkno); 2268 TAILQ_FOREACH(freework, wkhd, fw_next) { 2269 if (freework->fw_blkno != blkno) 2270 continue; 2271 indirblk_remove(freework); 2272 return (1); 2273 } 2274 return (0); 2275} 2276 2277/* 2278 * Insert an indirect block represented by freework into the indirblk 2279 * hash table so that it may prevent the block from being re-used prior 2280 * to the journal being written. 2281 */ 2282static void 2283indirblk_insert(freework) 2284 struct freework *freework; 2285{ 2286 struct jblocks *jblocks; 2287 struct jseg *jseg; 2288 struct ufsmount *ump; 2289 2290 ump = VFSTOUFS(freework->fw_list.wk_mp); 2291 jblocks = ump->softdep_jblocks; 2292 jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst); 2293 if (jseg == NULL) 2294 return; 2295 2296 LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs); 2297 TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework, 2298 fw_next); 2299 freework->fw_state &= ~DEPCOMPLETE; 2300} 2301 2302static void 2303indirblk_remove(freework) 2304 struct freework *freework; 2305{ 2306 struct ufsmount *ump; 2307 2308 ump = VFSTOUFS(freework->fw_list.wk_mp); 2309 LIST_REMOVE(freework, fw_segs); 2310 TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next); 2311 freework->fw_state |= DEPCOMPLETE; 2312 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 2313 WORKITEM_FREE(freework, D_FREEWORK); 2314} 2315 2316/* 2317 * Executed during filesystem system initialization before 2318 * mounting any filesystems. 2319 */ 2320void 2321softdep_initialize() 2322{ 2323 2324 max_softdeps = desiredvnodes * 4; 2325 2326 /* initialise bioops hack */ 2327 bioops.io_start = softdep_disk_io_initiation; 2328 bioops.io_complete = softdep_disk_write_complete; 2329 bioops.io_deallocate = softdep_deallocate_dependencies; 2330 bioops.io_countdeps = softdep_count_dependencies; 2331 2332 /* Initialize the callout with an mtx. */ 2333 callout_init_mtx(&softdep_callout, &lk, 0); 2334} 2335 2336/* 2337 * Executed after all filesystems have been unmounted during 2338 * filesystem module unload. 2339 */ 2340void 2341softdep_uninitialize() 2342{ 2343 2344 /* clear bioops hack */ 2345 bioops.io_start = NULL; 2346 bioops.io_complete = NULL; 2347 bioops.io_deallocate = NULL; 2348 bioops.io_countdeps = NULL; 2349 2350 callout_drain(&softdep_callout); 2351} 2352 2353/* 2354 * Called at mount time to notify the dependency code that a 2355 * filesystem wishes to use it. 2356 */ 2357int 2358softdep_mount(devvp, mp, fs, cred) 2359 struct vnode *devvp; 2360 struct mount *mp; 2361 struct fs *fs; 2362 struct ucred *cred; 2363{ 2364 struct csum_total cstotal; 2365 struct mount_softdeps *sdp; 2366 struct ufsmount *ump; 2367 struct cg *cgp; 2368 struct buf *bp; 2369 int i, error, cyl; 2370 2371 sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA, 2372 M_WAITOK | M_ZERO); 2373 MNT_ILOCK(mp); 2374 mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP; 2375 if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) { 2376 mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) | 2377 MNTK_SOFTDEP | MNTK_NOASYNC; 2378 } 2379 ump = VFSTOUFS(mp); 2380 ump->um_softdep = sdp; 2381 MNT_IUNLOCK(mp); 2382 LOCK_PTR(ump) = &lk; 2383 LIST_INIT(&ump->softdep_workitem_pending); 2384 LIST_INIT(&ump->softdep_journal_pending); 2385 TAILQ_INIT(&ump->softdep_unlinked); 2386 LIST_INIT(&ump->softdep_dirtycg); 2387 ump->softdep_worklist_tail = NULL; 2388 ump->softdep_on_worklist = 0; 2389 ump->softdep_deps = 0; 2390 LIST_INIT(&ump->softdep_mkdirlisthd); 2391 ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, 2392 &ump->pagedep_hash_size); 2393 ump->pagedep_nextclean = 0; 2394 ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, 2395 &ump->inodedep_hash_size); 2396 ump->inodedep_nextclean = 0; 2397 ump->newblk_hashtbl = hashinit(max_softdeps / 2, M_NEWBLK, 2398 &ump->newblk_hash_size); 2399 ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, 2400 &ump->bmsafemap_hash_size); 2401 i = 1 << (ffs(desiredvnodes / 10) - 1); 2402 ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead), 2403 M_FREEWORK, M_WAITOK); 2404 ump->indir_hash_size = i - 1; 2405 for (i = 0; i <= ump->indir_hash_size; i++) 2406 TAILQ_INIT(&ump->indir_hashtbl[i]); 2407 if ((fs->fs_flags & FS_SUJ) && 2408 (error = journal_mount(mp, fs, cred)) != 0) { 2409 printf("Failed to start journal: %d\n", error); 2410 softdep_unmount(mp); 2411 return (error); 2412 } 2413 atomic_add_int(&stat_softdep_mounts, 1); 2414 /* 2415 * When doing soft updates, the counters in the 2416 * superblock may have gotten out of sync. Recomputation 2417 * can take a long time and can be deferred for background 2418 * fsck. However, the old behavior of scanning the cylinder 2419 * groups and recalculating them at mount time is available 2420 * by setting vfs.ffs.compute_summary_at_mount to one. 2421 */ 2422 if (compute_summary_at_mount == 0 || fs->fs_clean != 0) 2423 return (0); 2424 bzero(&cstotal, sizeof cstotal); 2425 for (cyl = 0; cyl < fs->fs_ncg; cyl++) { 2426 if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)), 2427 fs->fs_cgsize, cred, &bp)) != 0) { 2428 brelse(bp); 2429 softdep_unmount(mp); 2430 return (error); 2431 } 2432 cgp = (struct cg *)bp->b_data; 2433 cstotal.cs_nffree += cgp->cg_cs.cs_nffree; 2434 cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree; 2435 cstotal.cs_nifree += cgp->cg_cs.cs_nifree; 2436 cstotal.cs_ndir += cgp->cg_cs.cs_ndir; 2437 fs->fs_cs(fs, cyl) = cgp->cg_cs; 2438 brelse(bp); 2439 } 2440#ifdef DEBUG 2441 if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal)) 2442 printf("%s: superblock summary recomputed\n", fs->fs_fsmnt); 2443#endif 2444 bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal); 2445 return (0); 2446} 2447 2448void 2449softdep_unmount(mp) 2450 struct mount *mp; 2451{ 2452 struct ufsmount *ump; 2453#ifdef INVARIANTS 2454 int i; 2455#endif 2456 2457 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 2458 ("softdep_unmount called on non-softdep filesystem")); 2459 ump = VFSTOUFS(mp); 2460 MNT_ILOCK(mp); 2461 mp->mnt_flag &= ~MNT_SOFTDEP; 2462 if (MOUNTEDSUJ(mp) == 0) { 2463 MNT_IUNLOCK(mp); 2464 } else { 2465 mp->mnt_flag &= ~MNT_SUJ; 2466 MNT_IUNLOCK(mp); 2467 journal_unmount(ump); 2468 } 2469 atomic_subtract_int(&stat_softdep_mounts, 1); 2470 hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size); 2471 hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size); 2472 hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size); 2473 hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP, 2474 ump->bmsafemap_hash_size); 2475 free(ump->indir_hashtbl, M_FREEWORK); 2476#ifdef INVARIANTS 2477 for (i = 0; i <= D_LAST; i++) 2478 KASSERT(ump->softdep_curdeps[i] == 0, 2479 ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt, 2480 TYPENAME(i), ump->softdep_curdeps[i])); 2481#endif 2482 free(ump->um_softdep, M_MOUNTDATA); 2483} 2484 2485static struct jblocks * 2486jblocks_create(void) 2487{ 2488 struct jblocks *jblocks; 2489 2490 jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO); 2491 TAILQ_INIT(&jblocks->jb_segs); 2492 jblocks->jb_avail = 10; 2493 jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2494 M_JBLOCKS, M_WAITOK | M_ZERO); 2495 2496 return (jblocks); 2497} 2498 2499static ufs2_daddr_t 2500jblocks_alloc(jblocks, bytes, actual) 2501 struct jblocks *jblocks; 2502 int bytes; 2503 int *actual; 2504{ 2505 ufs2_daddr_t daddr; 2506 struct jextent *jext; 2507 int freecnt; 2508 int blocks; 2509 2510 blocks = bytes / DEV_BSIZE; 2511 jext = &jblocks->jb_extent[jblocks->jb_head]; 2512 freecnt = jext->je_blocks - jblocks->jb_off; 2513 if (freecnt == 0) { 2514 jblocks->jb_off = 0; 2515 if (++jblocks->jb_head > jblocks->jb_used) 2516 jblocks->jb_head = 0; 2517 jext = &jblocks->jb_extent[jblocks->jb_head]; 2518 freecnt = jext->je_blocks; 2519 } 2520 if (freecnt > blocks) 2521 freecnt = blocks; 2522 *actual = freecnt * DEV_BSIZE; 2523 daddr = jext->je_daddr + jblocks->jb_off; 2524 jblocks->jb_off += freecnt; 2525 jblocks->jb_free -= freecnt; 2526 2527 return (daddr); 2528} 2529 2530static void 2531jblocks_free(jblocks, mp, bytes) 2532 struct jblocks *jblocks; 2533 struct mount *mp; 2534 int bytes; 2535{ 2536 2537 LOCK_OWNED(VFSTOUFS(mp)); 2538 jblocks->jb_free += bytes / DEV_BSIZE; 2539 if (jblocks->jb_suspended) 2540 worklist_speedup(mp); 2541 wakeup(jblocks); 2542} 2543 2544static void 2545jblocks_destroy(jblocks) 2546 struct jblocks *jblocks; 2547{ 2548 2549 if (jblocks->jb_extent) 2550 free(jblocks->jb_extent, M_JBLOCKS); 2551 free(jblocks, M_JBLOCKS); 2552} 2553 2554static void 2555jblocks_add(jblocks, daddr, blocks) 2556 struct jblocks *jblocks; 2557 ufs2_daddr_t daddr; 2558 int blocks; 2559{ 2560 struct jextent *jext; 2561 2562 jblocks->jb_blocks += blocks; 2563 jblocks->jb_free += blocks; 2564 jext = &jblocks->jb_extent[jblocks->jb_used]; 2565 /* Adding the first block. */ 2566 if (jext->je_daddr == 0) { 2567 jext->je_daddr = daddr; 2568 jext->je_blocks = blocks; 2569 return; 2570 } 2571 /* Extending the last extent. */ 2572 if (jext->je_daddr + jext->je_blocks == daddr) { 2573 jext->je_blocks += blocks; 2574 return; 2575 } 2576 /* Adding a new extent. */ 2577 if (++jblocks->jb_used == jblocks->jb_avail) { 2578 jblocks->jb_avail *= 2; 2579 jext = malloc(sizeof(struct jextent) * jblocks->jb_avail, 2580 M_JBLOCKS, M_WAITOK | M_ZERO); 2581 memcpy(jext, jblocks->jb_extent, 2582 sizeof(struct jextent) * jblocks->jb_used); 2583 free(jblocks->jb_extent, M_JBLOCKS); 2584 jblocks->jb_extent = jext; 2585 } 2586 jext = &jblocks->jb_extent[jblocks->jb_used]; 2587 jext->je_daddr = daddr; 2588 jext->je_blocks = blocks; 2589 return; 2590} 2591 2592int 2593softdep_journal_lookup(mp, vpp) 2594 struct mount *mp; 2595 struct vnode **vpp; 2596{ 2597 struct componentname cnp; 2598 struct vnode *dvp; 2599 ino_t sujournal; 2600 int error; 2601 2602 error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp); 2603 if (error) 2604 return (error); 2605 bzero(&cnp, sizeof(cnp)); 2606 cnp.cn_nameiop = LOOKUP; 2607 cnp.cn_flags = ISLASTCN; 2608 cnp.cn_thread = curthread; 2609 cnp.cn_cred = curthread->td_ucred; 2610 cnp.cn_pnbuf = SUJ_FILE; 2611 cnp.cn_nameptr = SUJ_FILE; 2612 cnp.cn_namelen = strlen(SUJ_FILE); 2613 error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal); 2614 vput(dvp); 2615 if (error != 0) 2616 return (error); 2617 error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp); 2618 return (error); 2619} 2620 2621/* 2622 * Open and verify the journal file. 2623 */ 2624static int 2625journal_mount(mp, fs, cred) 2626 struct mount *mp; 2627 struct fs *fs; 2628 struct ucred *cred; 2629{ 2630 struct jblocks *jblocks; 2631 struct ufsmount *ump; 2632 struct vnode *vp; 2633 struct inode *ip; 2634 ufs2_daddr_t blkno; 2635 int bcount; 2636 int error; 2637 int i; 2638 2639 ump = VFSTOUFS(mp); 2640 ump->softdep_journal_tail = NULL; 2641 ump->softdep_on_journal = 0; 2642 ump->softdep_accdeps = 0; 2643 ump->softdep_req = 0; 2644 ump->softdep_jblocks = NULL; 2645 error = softdep_journal_lookup(mp, &vp); 2646 if (error != 0) { 2647 printf("Failed to find journal. Use tunefs to create one\n"); 2648 return (error); 2649 } 2650 ip = VTOI(vp); 2651 if (ip->i_size < SUJ_MIN) { 2652 error = ENOSPC; 2653 goto out; 2654 } 2655 bcount = lblkno(fs, ip->i_size); /* Only use whole blocks. */ 2656 jblocks = jblocks_create(); 2657 for (i = 0; i < bcount; i++) { 2658 error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL); 2659 if (error) 2660 break; 2661 jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag)); 2662 } 2663 if (error) { 2664 jblocks_destroy(jblocks); 2665 goto out; 2666 } 2667 jblocks->jb_low = jblocks->jb_free / 3; /* Reserve 33%. */ 2668 jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */ 2669 ump->softdep_jblocks = jblocks; 2670out: 2671 if (error == 0) { 2672 MNT_ILOCK(mp); 2673 mp->mnt_flag |= MNT_SUJ; 2674 mp->mnt_flag &= ~MNT_SOFTDEP; 2675 MNT_IUNLOCK(mp); 2676 /* 2677 * Only validate the journal contents if the 2678 * filesystem is clean, otherwise we write the logs 2679 * but they'll never be used. If the filesystem was 2680 * still dirty when we mounted it the journal is 2681 * invalid and a new journal can only be valid if it 2682 * starts from a clean mount. 2683 */ 2684 if (fs->fs_clean) { 2685 DIP_SET(ip, i_modrev, fs->fs_mtime); 2686 ip->i_flags |= IN_MODIFIED; 2687 ffs_update(vp, 1); 2688 } 2689 } 2690 vput(vp); 2691 return (error); 2692} 2693 2694static void 2695journal_unmount(ump) 2696 struct ufsmount *ump; 2697{ 2698 2699 if (ump->softdep_jblocks) 2700 jblocks_destroy(ump->softdep_jblocks); 2701 ump->softdep_jblocks = NULL; 2702} 2703 2704/* 2705 * Called when a journal record is ready to be written. Space is allocated 2706 * and the journal entry is created when the journal is flushed to stable 2707 * store. 2708 */ 2709static void 2710add_to_journal(wk) 2711 struct worklist *wk; 2712{ 2713 struct ufsmount *ump; 2714 2715 ump = VFSTOUFS(wk->wk_mp); 2716 LOCK_OWNED(ump); 2717 if (wk->wk_state & ONWORKLIST) 2718 panic("add_to_journal: %s(0x%X) already on list", 2719 TYPENAME(wk->wk_type), wk->wk_state); 2720 wk->wk_state |= ONWORKLIST | DEPCOMPLETE; 2721 if (LIST_EMPTY(&ump->softdep_journal_pending)) { 2722 ump->softdep_jblocks->jb_age = ticks; 2723 LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list); 2724 } else 2725 LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list); 2726 ump->softdep_journal_tail = wk; 2727 ump->softdep_on_journal += 1; 2728} 2729 2730/* 2731 * Remove an arbitrary item for the journal worklist maintain the tail 2732 * pointer. This happens when a new operation obviates the need to 2733 * journal an old operation. 2734 */ 2735static void 2736remove_from_journal(wk) 2737 struct worklist *wk; 2738{ 2739 struct ufsmount *ump; 2740 2741 ump = VFSTOUFS(wk->wk_mp); 2742 LOCK_OWNED(ump); 2743#ifdef SUJ_DEBUG 2744 { 2745 struct worklist *wkn; 2746 2747 LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list) 2748 if (wkn == wk) 2749 break; 2750 if (wkn == NULL) 2751 panic("remove_from_journal: %p is not in journal", wk); 2752 } 2753#endif 2754 /* 2755 * We emulate a TAILQ to save space in most structures which do not 2756 * require TAILQ semantics. Here we must update the tail position 2757 * when removing the tail which is not the final entry. This works 2758 * only if the worklist linkage are at the beginning of the structure. 2759 */ 2760 if (ump->softdep_journal_tail == wk) 2761 ump->softdep_journal_tail = 2762 (struct worklist *)wk->wk_list.le_prev; 2763 2764 WORKLIST_REMOVE(wk); 2765 ump->softdep_on_journal -= 1; 2766} 2767 2768/* 2769 * Check for journal space as well as dependency limits so the prelink 2770 * code can throttle both journaled and non-journaled filesystems. 2771 * Threshold is 0 for low and 1 for min. 2772 */ 2773static int 2774journal_space(ump, thresh) 2775 struct ufsmount *ump; 2776 int thresh; 2777{ 2778 struct jblocks *jblocks; 2779 int limit, avail; 2780 2781 jblocks = ump->softdep_jblocks; 2782 if (jblocks == NULL) 2783 return (1); 2784 /* 2785 * We use a tighter restriction here to prevent request_cleanup() 2786 * running in threads from running into locks we currently hold. 2787 * We have to be over the limit and our filesystem has to be 2788 * responsible for more than our share of that usage. 2789 */ 2790 limit = (max_softdeps / 10) * 9; 2791 if (dep_current[D_INODEDEP] > limit && 2792 ump->softdep_curdeps[D_INODEDEP] > limit / stat_softdep_mounts) 2793 return (0); 2794 if (thresh) 2795 thresh = jblocks->jb_min; 2796 else 2797 thresh = jblocks->jb_low; 2798 avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE; 2799 avail = jblocks->jb_free - avail; 2800 2801 return (avail > thresh); 2802} 2803 2804static void 2805journal_suspend(ump) 2806 struct ufsmount *ump; 2807{ 2808 struct jblocks *jblocks; 2809 struct mount *mp; 2810 2811 mp = UFSTOVFS(ump); 2812 jblocks = ump->softdep_jblocks; 2813 MNT_ILOCK(mp); 2814 if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { 2815 stat_journal_min++; 2816 mp->mnt_kern_flag |= MNTK_SUSPEND; 2817 mp->mnt_susp_owner = FIRST_THREAD_IN_PROC(softdepproc); 2818 } 2819 jblocks->jb_suspended = 1; 2820 MNT_IUNLOCK(mp); 2821} 2822 2823static int 2824journal_unsuspend(struct ufsmount *ump) 2825{ 2826 struct jblocks *jblocks; 2827 struct mount *mp; 2828 2829 mp = UFSTOVFS(ump); 2830 jblocks = ump->softdep_jblocks; 2831 2832 if (jblocks != NULL && jblocks->jb_suspended && 2833 journal_space(ump, jblocks->jb_min)) { 2834 jblocks->jb_suspended = 0; 2835 FREE_LOCK(ump); 2836 mp->mnt_susp_owner = curthread; 2837 vfs_write_resume(mp, 0); 2838 ACQUIRE_LOCK(ump); 2839 return (1); 2840 } 2841 return (0); 2842} 2843 2844/* 2845 * Called before any allocation function to be certain that there is 2846 * sufficient space in the journal prior to creating any new records. 2847 * Since in the case of block allocation we may have multiple locked 2848 * buffers at the time of the actual allocation we can not block 2849 * when the journal records are created. Doing so would create a deadlock 2850 * if any of these buffers needed to be flushed to reclaim space. Instead 2851 * we require a sufficiently large amount of available space such that 2852 * each thread in the system could have passed this allocation check and 2853 * still have sufficient free space. With 20% of a minimum journal size 2854 * of 1MB we have 6553 records available. 2855 */ 2856int 2857softdep_prealloc(vp, waitok) 2858 struct vnode *vp; 2859 int waitok; 2860{ 2861 struct ufsmount *ump; 2862 2863 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 2864 ("softdep_prealloc called on non-softdep filesystem")); 2865 /* 2866 * Nothing to do if we are not running journaled soft updates. 2867 * If we currently hold the snapshot lock, we must avoid handling 2868 * other resources that could cause deadlock. 2869 */ 2870 if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp))) 2871 return (0); 2872 ump = VFSTOUFS(vp->v_mount); 2873 ACQUIRE_LOCK(ump); 2874 if (journal_space(ump, 0)) { 2875 FREE_LOCK(ump); 2876 return (0); 2877 } 2878 stat_journal_low++; 2879 FREE_LOCK(ump); 2880 if (waitok == MNT_NOWAIT) 2881 return (ENOSPC); 2882 /* 2883 * Attempt to sync this vnode once to flush any journal 2884 * work attached to it. 2885 */ 2886 if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0) 2887 ffs_syncvnode(vp, waitok, 0); 2888 ACQUIRE_LOCK(ump); 2889 process_removes(vp); 2890 process_truncates(vp); 2891 if (journal_space(ump, 0) == 0) { 2892 softdep_speedup(); 2893 if (journal_space(ump, 1) == 0) 2894 journal_suspend(ump); 2895 } 2896 FREE_LOCK(ump); 2897 2898 return (0); 2899} 2900 2901/* 2902 * Before adjusting a link count on a vnode verify that we have sufficient 2903 * journal space. If not, process operations that depend on the currently 2904 * locked pair of vnodes to try to flush space as the syncer, buf daemon, 2905 * and softdep flush threads can not acquire these locks to reclaim space. 2906 */ 2907static void 2908softdep_prelink(dvp, vp) 2909 struct vnode *dvp; 2910 struct vnode *vp; 2911{ 2912 struct ufsmount *ump; 2913 2914 ump = VFSTOUFS(dvp->v_mount); 2915 LOCK_OWNED(ump); 2916 /* 2917 * Nothing to do if we have sufficient journal space. 2918 * If we currently hold the snapshot lock, we must avoid 2919 * handling other resources that could cause deadlock. 2920 */ 2921 if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp)))) 2922 return; 2923 stat_journal_low++; 2924 FREE_LOCK(ump); 2925 if (vp) 2926 ffs_syncvnode(vp, MNT_NOWAIT, 0); 2927 ffs_syncvnode(dvp, MNT_WAIT, 0); 2928 ACQUIRE_LOCK(ump); 2929 /* Process vp before dvp as it may create .. removes. */ 2930 if (vp) { 2931 process_removes(vp); 2932 process_truncates(vp); 2933 } 2934 process_removes(dvp); 2935 process_truncates(dvp); 2936 softdep_speedup(); 2937 process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT); 2938 if (journal_space(ump, 0) == 0) { 2939 softdep_speedup(); 2940 if (journal_space(ump, 1) == 0) 2941 journal_suspend(ump); 2942 } 2943} 2944 2945static void 2946jseg_write(ump, jseg, data) 2947 struct ufsmount *ump; 2948 struct jseg *jseg; 2949 uint8_t *data; 2950{ 2951 struct jsegrec *rec; 2952 2953 rec = (struct jsegrec *)data; 2954 rec->jsr_seq = jseg->js_seq; 2955 rec->jsr_oldest = jseg->js_oldseq; 2956 rec->jsr_cnt = jseg->js_cnt; 2957 rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize; 2958 rec->jsr_crc = 0; 2959 rec->jsr_time = ump->um_fs->fs_mtime; 2960} 2961 2962static inline void 2963inoref_write(inoref, jseg, rec) 2964 struct inoref *inoref; 2965 struct jseg *jseg; 2966 struct jrefrec *rec; 2967{ 2968 2969 inoref->if_jsegdep->jd_seg = jseg; 2970 rec->jr_ino = inoref->if_ino; 2971 rec->jr_parent = inoref->if_parent; 2972 rec->jr_nlink = inoref->if_nlink; 2973 rec->jr_mode = inoref->if_mode; 2974 rec->jr_diroff = inoref->if_diroff; 2975} 2976 2977static void 2978jaddref_write(jaddref, jseg, data) 2979 struct jaddref *jaddref; 2980 struct jseg *jseg; 2981 uint8_t *data; 2982{ 2983 struct jrefrec *rec; 2984 2985 rec = (struct jrefrec *)data; 2986 rec->jr_op = JOP_ADDREF; 2987 inoref_write(&jaddref->ja_ref, jseg, rec); 2988} 2989 2990static void 2991jremref_write(jremref, jseg, data) 2992 struct jremref *jremref; 2993 struct jseg *jseg; 2994 uint8_t *data; 2995{ 2996 struct jrefrec *rec; 2997 2998 rec = (struct jrefrec *)data; 2999 rec->jr_op = JOP_REMREF; 3000 inoref_write(&jremref->jr_ref, jseg, rec); 3001} 3002 3003static void 3004jmvref_write(jmvref, jseg, data) 3005 struct jmvref *jmvref; 3006 struct jseg *jseg; 3007 uint8_t *data; 3008{ 3009 struct jmvrec *rec; 3010 3011 rec = (struct jmvrec *)data; 3012 rec->jm_op = JOP_MVREF; 3013 rec->jm_ino = jmvref->jm_ino; 3014 rec->jm_parent = jmvref->jm_parent; 3015 rec->jm_oldoff = jmvref->jm_oldoff; 3016 rec->jm_newoff = jmvref->jm_newoff; 3017} 3018 3019static void 3020jnewblk_write(jnewblk, jseg, data) 3021 struct jnewblk *jnewblk; 3022 struct jseg *jseg; 3023 uint8_t *data; 3024{ 3025 struct jblkrec *rec; 3026 3027 jnewblk->jn_jsegdep->jd_seg = jseg; 3028 rec = (struct jblkrec *)data; 3029 rec->jb_op = JOP_NEWBLK; 3030 rec->jb_ino = jnewblk->jn_ino; 3031 rec->jb_blkno = jnewblk->jn_blkno; 3032 rec->jb_lbn = jnewblk->jn_lbn; 3033 rec->jb_frags = jnewblk->jn_frags; 3034 rec->jb_oldfrags = jnewblk->jn_oldfrags; 3035} 3036 3037static void 3038jfreeblk_write(jfreeblk, jseg, data) 3039 struct jfreeblk *jfreeblk; 3040 struct jseg *jseg; 3041 uint8_t *data; 3042{ 3043 struct jblkrec *rec; 3044 3045 jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg; 3046 rec = (struct jblkrec *)data; 3047 rec->jb_op = JOP_FREEBLK; 3048 rec->jb_ino = jfreeblk->jf_ino; 3049 rec->jb_blkno = jfreeblk->jf_blkno; 3050 rec->jb_lbn = jfreeblk->jf_lbn; 3051 rec->jb_frags = jfreeblk->jf_frags; 3052 rec->jb_oldfrags = 0; 3053} 3054 3055static void 3056jfreefrag_write(jfreefrag, jseg, data) 3057 struct jfreefrag *jfreefrag; 3058 struct jseg *jseg; 3059 uint8_t *data; 3060{ 3061 struct jblkrec *rec; 3062 3063 jfreefrag->fr_jsegdep->jd_seg = jseg; 3064 rec = (struct jblkrec *)data; 3065 rec->jb_op = JOP_FREEBLK; 3066 rec->jb_ino = jfreefrag->fr_ino; 3067 rec->jb_blkno = jfreefrag->fr_blkno; 3068 rec->jb_lbn = jfreefrag->fr_lbn; 3069 rec->jb_frags = jfreefrag->fr_frags; 3070 rec->jb_oldfrags = 0; 3071} 3072 3073static void 3074jtrunc_write(jtrunc, jseg, data) 3075 struct jtrunc *jtrunc; 3076 struct jseg *jseg; 3077 uint8_t *data; 3078{ 3079 struct jtrncrec *rec; 3080 3081 jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg; 3082 rec = (struct jtrncrec *)data; 3083 rec->jt_op = JOP_TRUNC; 3084 rec->jt_ino = jtrunc->jt_ino; 3085 rec->jt_size = jtrunc->jt_size; 3086 rec->jt_extsize = jtrunc->jt_extsize; 3087} 3088 3089static void 3090jfsync_write(jfsync, jseg, data) 3091 struct jfsync *jfsync; 3092 struct jseg *jseg; 3093 uint8_t *data; 3094{ 3095 struct jtrncrec *rec; 3096 3097 rec = (struct jtrncrec *)data; 3098 rec->jt_op = JOP_SYNC; 3099 rec->jt_ino = jfsync->jfs_ino; 3100 rec->jt_size = jfsync->jfs_size; 3101 rec->jt_extsize = jfsync->jfs_extsize; 3102} 3103 3104static void 3105softdep_flushjournal(mp) 3106 struct mount *mp; 3107{ 3108 struct jblocks *jblocks; 3109 struct ufsmount *ump; 3110 3111 if (MOUNTEDSUJ(mp) == 0) 3112 return; 3113 ump = VFSTOUFS(mp); 3114 jblocks = ump->softdep_jblocks; 3115 ACQUIRE_LOCK(ump); 3116 while (ump->softdep_on_journal) { 3117 jblocks->jb_needseg = 1; 3118 softdep_process_journal(mp, NULL, MNT_WAIT); 3119 } 3120 FREE_LOCK(ump); 3121} 3122 3123static void softdep_synchronize_completed(struct bio *); 3124static void softdep_synchronize(struct bio *, struct ufsmount *, void *); 3125 3126static void 3127softdep_synchronize_completed(bp) 3128 struct bio *bp; 3129{ 3130 struct jseg *oldest; 3131 struct jseg *jseg; 3132 struct ufsmount *ump; 3133 3134 /* 3135 * caller1 marks the last segment written before we issued the 3136 * synchronize cache. 3137 */ 3138 jseg = bp->bio_caller1; 3139 if (jseg == NULL) { 3140 g_destroy_bio(bp); 3141 return; 3142 } 3143 ump = VFSTOUFS(jseg->js_list.wk_mp); 3144 ACQUIRE_LOCK(ump); 3145 oldest = NULL; 3146 /* 3147 * Mark all the journal entries waiting on the synchronize cache 3148 * as completed so they may continue on. 3149 */ 3150 while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) { 3151 jseg->js_state |= COMPLETE; 3152 oldest = jseg; 3153 jseg = TAILQ_PREV(jseg, jseglst, js_next); 3154 } 3155 /* 3156 * Restart deferred journal entry processing from the oldest 3157 * completed jseg. 3158 */ 3159 if (oldest) 3160 complete_jsegs(oldest); 3161 3162 FREE_LOCK(ump); 3163 g_destroy_bio(bp); 3164} 3165 3166/* 3167 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering 3168 * barriers. The journal must be written prior to any blocks that depend 3169 * on it and the journal can not be released until the blocks have be 3170 * written. This code handles both barriers simultaneously. 3171 */ 3172static void 3173softdep_synchronize(bp, ump, caller1) 3174 struct bio *bp; 3175 struct ufsmount *ump; 3176 void *caller1; 3177{ 3178 3179 bp->bio_cmd = BIO_FLUSH; 3180 bp->bio_flags |= BIO_ORDERED; 3181 bp->bio_data = NULL; 3182 bp->bio_offset = ump->um_cp->provider->mediasize; 3183 bp->bio_length = 0; 3184 bp->bio_done = softdep_synchronize_completed; 3185 bp->bio_caller1 = caller1; 3186 g_io_request(bp, 3187 (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private); 3188} 3189 3190/* 3191 * Flush some journal records to disk. 3192 */ 3193static void 3194softdep_process_journal(mp, needwk, flags) 3195 struct mount *mp; 3196 struct worklist *needwk; 3197 int flags; 3198{ 3199 struct jblocks *jblocks; 3200 struct ufsmount *ump; 3201 struct worklist *wk; 3202 struct jseg *jseg; 3203 struct buf *bp; 3204 struct bio *bio; 3205 uint8_t *data; 3206 struct fs *fs; 3207 int shouldflush; 3208 int segwritten; 3209 int jrecmin; /* Minimum records per block. */ 3210 int jrecmax; /* Maximum records per block. */ 3211 int size; 3212 int cnt; 3213 int off; 3214 int devbsize; 3215 3216 if (MOUNTEDSUJ(mp) == 0) 3217 return; 3218 shouldflush = softdep_flushcache; 3219 bio = NULL; 3220 jseg = NULL; 3221 ump = VFSTOUFS(mp); 3222 LOCK_OWNED(ump); 3223 fs = ump->um_fs; 3224 jblocks = ump->softdep_jblocks; 3225 devbsize = ump->um_devvp->v_bufobj.bo_bsize; 3226 /* 3227 * We write anywhere between a disk block and fs block. The upper 3228 * bound is picked to prevent buffer cache fragmentation and limit 3229 * processing time per I/O. 3230 */ 3231 jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */ 3232 jrecmax = (fs->fs_bsize / devbsize) * jrecmin; 3233 segwritten = 0; 3234 for (;;) { 3235 cnt = ump->softdep_on_journal; 3236 /* 3237 * Criteria for writing a segment: 3238 * 1) We have a full block. 3239 * 2) We're called from jwait() and haven't found the 3240 * journal item yet. 3241 * 3) Always write if needseg is set. 3242 * 4) If we are called from process_worklist and have 3243 * not yet written anything we write a partial block 3244 * to enforce a 1 second maximum latency on journal 3245 * entries. 3246 */ 3247 if (cnt < (jrecmax - 1) && needwk == NULL && 3248 jblocks->jb_needseg == 0 && (segwritten || cnt == 0)) 3249 break; 3250 cnt++; 3251 /* 3252 * Verify some free journal space. softdep_prealloc() should 3253 * guarantee that we don't run out so this is indicative of 3254 * a problem with the flow control. Try to recover 3255 * gracefully in any event. 3256 */ 3257 while (jblocks->jb_free == 0) { 3258 if (flags != MNT_WAIT) 3259 break; 3260 printf("softdep: Out of journal space!\n"); 3261 softdep_speedup(); 3262 msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz); 3263 } 3264 FREE_LOCK(ump); 3265 jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS); 3266 workitem_alloc(&jseg->js_list, D_JSEG, mp); 3267 LIST_INIT(&jseg->js_entries); 3268 LIST_INIT(&jseg->js_indirs); 3269 jseg->js_state = ATTACHED; 3270 if (shouldflush == 0) 3271 jseg->js_state |= COMPLETE; 3272 else if (bio == NULL) 3273 bio = g_alloc_bio(); 3274 jseg->js_jblocks = jblocks; 3275 bp = geteblk(fs->fs_bsize, 0); 3276 ACQUIRE_LOCK(ump); 3277 /* 3278 * If there was a race while we were allocating the block 3279 * and jseg the entry we care about was likely written. 3280 * We bail out in both the WAIT and NOWAIT case and assume 3281 * the caller will loop if the entry it cares about is 3282 * not written. 3283 */ 3284 cnt = ump->softdep_on_journal; 3285 if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) { 3286 bp->b_flags |= B_INVAL | B_NOCACHE; 3287 WORKITEM_FREE(jseg, D_JSEG); 3288 FREE_LOCK(ump); 3289 brelse(bp); 3290 ACQUIRE_LOCK(ump); 3291 break; 3292 } 3293 /* 3294 * Calculate the disk block size required for the available 3295 * records rounded to the min size. 3296 */ 3297 if (cnt == 0) 3298 size = devbsize; 3299 else if (cnt < jrecmax) 3300 size = howmany(cnt, jrecmin) * devbsize; 3301 else 3302 size = fs->fs_bsize; 3303 /* 3304 * Allocate a disk block for this journal data and account 3305 * for truncation of the requested size if enough contiguous 3306 * space was not available. 3307 */ 3308 bp->b_blkno = jblocks_alloc(jblocks, size, &size); 3309 bp->b_lblkno = bp->b_blkno; 3310 bp->b_offset = bp->b_blkno * DEV_BSIZE; 3311 bp->b_bcount = size; 3312 bp->b_flags &= ~B_INVAL; 3313 bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY; 3314 /* 3315 * Initialize our jseg with cnt records. Assign the next 3316 * sequence number to it and link it in-order. 3317 */ 3318 cnt = MIN(cnt, (size / devbsize) * jrecmin); 3319 jseg->js_buf = bp; 3320 jseg->js_cnt = cnt; 3321 jseg->js_refs = cnt + 1; /* Self ref. */ 3322 jseg->js_size = size; 3323 jseg->js_seq = jblocks->jb_nextseq++; 3324 if (jblocks->jb_oldestseg == NULL) 3325 jblocks->jb_oldestseg = jseg; 3326 jseg->js_oldseq = jblocks->jb_oldestseg->js_seq; 3327 TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next); 3328 if (jblocks->jb_writeseg == NULL) 3329 jblocks->jb_writeseg = jseg; 3330 /* 3331 * Start filling in records from the pending list. 3332 */ 3333 data = bp->b_data; 3334 off = 0; 3335 3336 /* 3337 * Always put a header on the first block. 3338 * XXX As with below, there might not be a chance to get 3339 * into the loop. Ensure that something valid is written. 3340 */ 3341 jseg_write(ump, jseg, data); 3342 off += JREC_SIZE; 3343 data = bp->b_data + off; 3344 3345 /* 3346 * XXX Something is wrong here. There's no work to do, 3347 * but we need to perform and I/O and allow it to complete 3348 * anyways. 3349 */ 3350 if (LIST_EMPTY(&ump->softdep_journal_pending)) 3351 stat_emptyjblocks++; 3352 3353 while ((wk = LIST_FIRST(&ump->softdep_journal_pending)) 3354 != NULL) { 3355 if (cnt == 0) 3356 break; 3357 /* Place a segment header on every device block. */ 3358 if ((off % devbsize) == 0) { 3359 jseg_write(ump, jseg, data); 3360 off += JREC_SIZE; 3361 data = bp->b_data + off; 3362 } 3363 if (wk == needwk) 3364 needwk = NULL; 3365 remove_from_journal(wk); 3366 wk->wk_state |= INPROGRESS; 3367 WORKLIST_INSERT(&jseg->js_entries, wk); 3368 switch (wk->wk_type) { 3369 case D_JADDREF: 3370 jaddref_write(WK_JADDREF(wk), jseg, data); 3371 break; 3372 case D_JREMREF: 3373 jremref_write(WK_JREMREF(wk), jseg, data); 3374 break; 3375 case D_JMVREF: 3376 jmvref_write(WK_JMVREF(wk), jseg, data); 3377 break; 3378 case D_JNEWBLK: 3379 jnewblk_write(WK_JNEWBLK(wk), jseg, data); 3380 break; 3381 case D_JFREEBLK: 3382 jfreeblk_write(WK_JFREEBLK(wk), jseg, data); 3383 break; 3384 case D_JFREEFRAG: 3385 jfreefrag_write(WK_JFREEFRAG(wk), jseg, data); 3386 break; 3387 case D_JTRUNC: 3388 jtrunc_write(WK_JTRUNC(wk), jseg, data); 3389 break; 3390 case D_JFSYNC: 3391 jfsync_write(WK_JFSYNC(wk), jseg, data); 3392 break; 3393 default: 3394 panic("process_journal: Unknown type %s", 3395 TYPENAME(wk->wk_type)); 3396 /* NOTREACHED */ 3397 } 3398 off += JREC_SIZE; 3399 data = bp->b_data + off; 3400 cnt--; 3401 } 3402 3403 /* Clear any remaining space so we don't leak kernel data */ 3404 if (size > off) 3405 bzero(data, size - off); 3406 3407 /* 3408 * Write this one buffer and continue. 3409 */ 3410 segwritten = 1; 3411 jblocks->jb_needseg = 0; 3412 WORKLIST_INSERT(&bp->b_dep, &jseg->js_list); 3413 FREE_LOCK(ump); 3414 pbgetvp(ump->um_devvp, bp); 3415 /* 3416 * We only do the blocking wait once we find the journal 3417 * entry we're looking for. 3418 */ 3419 if (needwk == NULL && flags == MNT_WAIT) 3420 bwrite(bp); 3421 else 3422 bawrite(bp); 3423 ACQUIRE_LOCK(ump); 3424 } 3425 /* 3426 * If we wrote a segment issue a synchronize cache so the journal 3427 * is reflected on disk before the data is written. Since reclaiming 3428 * journal space also requires writing a journal record this 3429 * process also enforces a barrier before reclamation. 3430 */ 3431 if (segwritten && shouldflush) { 3432 softdep_synchronize(bio, ump, 3433 TAILQ_LAST(&jblocks->jb_segs, jseglst)); 3434 } else if (bio) 3435 g_destroy_bio(bio); 3436 /* 3437 * If we've suspended the filesystem because we ran out of journal 3438 * space either try to sync it here to make some progress or 3439 * unsuspend it if we already have. 3440 */ 3441 if (flags == 0 && jblocks->jb_suspended) { 3442 if (journal_unsuspend(ump)) 3443 return; 3444 FREE_LOCK(ump); 3445 VFS_SYNC(mp, MNT_NOWAIT); 3446 ffs_sbupdate(ump, MNT_WAIT, 0); 3447 ACQUIRE_LOCK(ump); 3448 } 3449} 3450 3451/* 3452 * Complete a jseg, allowing all dependencies awaiting journal writes 3453 * to proceed. Each journal dependency also attaches a jsegdep to dependent 3454 * structures so that the journal segment can be freed to reclaim space. 3455 */ 3456static void 3457complete_jseg(jseg) 3458 struct jseg *jseg; 3459{ 3460 struct worklist *wk; 3461 struct jmvref *jmvref; 3462 int waiting; 3463#ifdef INVARIANTS 3464 int i = 0; 3465#endif 3466 3467 while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) { 3468 WORKLIST_REMOVE(wk); 3469 waiting = wk->wk_state & IOWAITING; 3470 wk->wk_state &= ~(INPROGRESS | IOWAITING); 3471 wk->wk_state |= COMPLETE; 3472 KASSERT(i++ < jseg->js_cnt, 3473 ("handle_written_jseg: overflow %d >= %d", 3474 i - 1, jseg->js_cnt)); 3475 switch (wk->wk_type) { 3476 case D_JADDREF: 3477 handle_written_jaddref(WK_JADDREF(wk)); 3478 break; 3479 case D_JREMREF: 3480 handle_written_jremref(WK_JREMREF(wk)); 3481 break; 3482 case D_JMVREF: 3483 rele_jseg(jseg); /* No jsegdep. */ 3484 jmvref = WK_JMVREF(wk); 3485 LIST_REMOVE(jmvref, jm_deps); 3486 if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0) 3487 free_pagedep(jmvref->jm_pagedep); 3488 WORKITEM_FREE(jmvref, D_JMVREF); 3489 break; 3490 case D_JNEWBLK: 3491 handle_written_jnewblk(WK_JNEWBLK(wk)); 3492 break; 3493 case D_JFREEBLK: 3494 handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep); 3495 break; 3496 case D_JTRUNC: 3497 handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep); 3498 break; 3499 case D_JFSYNC: 3500 rele_jseg(jseg); /* No jsegdep. */ 3501 WORKITEM_FREE(wk, D_JFSYNC); 3502 break; 3503 case D_JFREEFRAG: 3504 handle_written_jfreefrag(WK_JFREEFRAG(wk)); 3505 break; 3506 default: 3507 panic("handle_written_jseg: Unknown type %s", 3508 TYPENAME(wk->wk_type)); 3509 /* NOTREACHED */ 3510 } 3511 if (waiting) 3512 wakeup(wk); 3513 } 3514 /* Release the self reference so the structure may be freed. */ 3515 rele_jseg(jseg); 3516} 3517 3518/* 3519 * Determine which jsegs are ready for completion processing. Waits for 3520 * synchronize cache to complete as well as forcing in-order completion 3521 * of journal entries. 3522 */ 3523static void 3524complete_jsegs(jseg) 3525 struct jseg *jseg; 3526{ 3527 struct jblocks *jblocks; 3528 struct jseg *jsegn; 3529 3530 jblocks = jseg->js_jblocks; 3531 /* 3532 * Don't allow out of order completions. If this isn't the first 3533 * block wait for it to write before we're done. 3534 */ 3535 if (jseg != jblocks->jb_writeseg) 3536 return; 3537 /* Iterate through available jsegs processing their entries. */ 3538 while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) { 3539 jblocks->jb_oldestwrseq = jseg->js_oldseq; 3540 jsegn = TAILQ_NEXT(jseg, js_next); 3541 complete_jseg(jseg); 3542 jseg = jsegn; 3543 } 3544 jblocks->jb_writeseg = jseg; 3545 /* 3546 * Attempt to free jsegs now that oldestwrseq may have advanced. 3547 */ 3548 free_jsegs(jblocks); 3549} 3550 3551/* 3552 * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle 3553 * the final completions. 3554 */ 3555static void 3556handle_written_jseg(jseg, bp) 3557 struct jseg *jseg; 3558 struct buf *bp; 3559{ 3560 3561 if (jseg->js_refs == 0) 3562 panic("handle_written_jseg: No self-reference on %p", jseg); 3563 jseg->js_state |= DEPCOMPLETE; 3564 /* 3565 * We'll never need this buffer again, set flags so it will be 3566 * discarded. 3567 */ 3568 bp->b_flags |= B_INVAL | B_NOCACHE; 3569 pbrelvp(bp); 3570 complete_jsegs(jseg); 3571} 3572 3573static inline struct jsegdep * 3574inoref_jseg(inoref) 3575 struct inoref *inoref; 3576{ 3577 struct jsegdep *jsegdep; 3578 3579 jsegdep = inoref->if_jsegdep; 3580 inoref->if_jsegdep = NULL; 3581 3582 return (jsegdep); 3583} 3584 3585/* 3586 * Called once a jremref has made it to stable store. The jremref is marked 3587 * complete and we attempt to free it. Any pagedeps writes sleeping waiting 3588 * for the jremref to complete will be awoken by free_jremref. 3589 */ 3590static void 3591handle_written_jremref(jremref) 3592 struct jremref *jremref; 3593{ 3594 struct inodedep *inodedep; 3595 struct jsegdep *jsegdep; 3596 struct dirrem *dirrem; 3597 3598 /* Grab the jsegdep. */ 3599 jsegdep = inoref_jseg(&jremref->jr_ref); 3600 /* 3601 * Remove us from the inoref list. 3602 */ 3603 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 3604 0, &inodedep) == 0) 3605 panic("handle_written_jremref: Lost inodedep"); 3606 TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 3607 /* 3608 * Complete the dirrem. 3609 */ 3610 dirrem = jremref->jr_dirrem; 3611 jremref->jr_dirrem = NULL; 3612 LIST_REMOVE(jremref, jr_deps); 3613 jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT; 3614 jwork_insert(&dirrem->dm_jwork, jsegdep); 3615 if (LIST_EMPTY(&dirrem->dm_jremrefhd) && 3616 (dirrem->dm_state & COMPLETE) != 0) 3617 add_to_worklist(&dirrem->dm_list, 0); 3618 free_jremref(jremref); 3619} 3620 3621/* 3622 * Called once a jaddref has made it to stable store. The dependency is 3623 * marked complete and any dependent structures are added to the inode 3624 * bufwait list to be completed as soon as it is written. If a bitmap write 3625 * depends on this entry we move the inode into the inodedephd of the 3626 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap. 3627 */ 3628static void 3629handle_written_jaddref(jaddref) 3630 struct jaddref *jaddref; 3631{ 3632 struct jsegdep *jsegdep; 3633 struct inodedep *inodedep; 3634 struct diradd *diradd; 3635 struct mkdir *mkdir; 3636 3637 /* Grab the jsegdep. */ 3638 jsegdep = inoref_jseg(&jaddref->ja_ref); 3639 mkdir = NULL; 3640 diradd = NULL; 3641 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 3642 0, &inodedep) == 0) 3643 panic("handle_written_jaddref: Lost inodedep."); 3644 if (jaddref->ja_diradd == NULL) 3645 panic("handle_written_jaddref: No dependency"); 3646 if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) { 3647 diradd = jaddref->ja_diradd; 3648 WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list); 3649 } else if (jaddref->ja_state & MKDIR_PARENT) { 3650 mkdir = jaddref->ja_mkdir; 3651 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list); 3652 } else if (jaddref->ja_state & MKDIR_BODY) 3653 mkdir = jaddref->ja_mkdir; 3654 else 3655 panic("handle_written_jaddref: Unknown dependency %p", 3656 jaddref->ja_diradd); 3657 jaddref->ja_diradd = NULL; /* also clears ja_mkdir */ 3658 /* 3659 * Remove us from the inode list. 3660 */ 3661 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps); 3662 /* 3663 * The mkdir may be waiting on the jaddref to clear before freeing. 3664 */ 3665 if (mkdir) { 3666 KASSERT(mkdir->md_list.wk_type == D_MKDIR, 3667 ("handle_written_jaddref: Incorrect type for mkdir %s", 3668 TYPENAME(mkdir->md_list.wk_type))); 3669 mkdir->md_jaddref = NULL; 3670 diradd = mkdir->md_diradd; 3671 mkdir->md_state |= DEPCOMPLETE; 3672 complete_mkdir(mkdir); 3673 } 3674 jwork_insert(&diradd->da_jwork, jsegdep); 3675 if (jaddref->ja_state & NEWBLOCK) { 3676 inodedep->id_state |= ONDEPLIST; 3677 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd, 3678 inodedep, id_deps); 3679 } 3680 free_jaddref(jaddref); 3681} 3682 3683/* 3684 * Called once a jnewblk journal is written. The allocdirect or allocindir 3685 * is placed in the bmsafemap to await notification of a written bitmap. If 3686 * the operation was canceled we add the segdep to the appropriate 3687 * dependency to free the journal space once the canceling operation 3688 * completes. 3689 */ 3690static void 3691handle_written_jnewblk(jnewblk) 3692 struct jnewblk *jnewblk; 3693{ 3694 struct bmsafemap *bmsafemap; 3695 struct freefrag *freefrag; 3696 struct freework *freework; 3697 struct jsegdep *jsegdep; 3698 struct newblk *newblk; 3699 3700 /* Grab the jsegdep. */ 3701 jsegdep = jnewblk->jn_jsegdep; 3702 jnewblk->jn_jsegdep = NULL; 3703 if (jnewblk->jn_dep == NULL) 3704 panic("handle_written_jnewblk: No dependency for the segdep."); 3705 switch (jnewblk->jn_dep->wk_type) { 3706 case D_NEWBLK: 3707 case D_ALLOCDIRECT: 3708 case D_ALLOCINDIR: 3709 /* 3710 * Add the written block to the bmsafemap so it can 3711 * be notified when the bitmap is on disk. 3712 */ 3713 newblk = WK_NEWBLK(jnewblk->jn_dep); 3714 newblk->nb_jnewblk = NULL; 3715 if ((newblk->nb_state & GOINGAWAY) == 0) { 3716 bmsafemap = newblk->nb_bmsafemap; 3717 newblk->nb_state |= ONDEPLIST; 3718 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, 3719 nb_deps); 3720 } 3721 jwork_insert(&newblk->nb_jwork, jsegdep); 3722 break; 3723 case D_FREEFRAG: 3724 /* 3725 * A newblock being removed by a freefrag when replaced by 3726 * frag extension. 3727 */ 3728 freefrag = WK_FREEFRAG(jnewblk->jn_dep); 3729 freefrag->ff_jdep = NULL; 3730 jwork_insert(&freefrag->ff_jwork, jsegdep); 3731 break; 3732 case D_FREEWORK: 3733 /* 3734 * A direct block was removed by truncate. 3735 */ 3736 freework = WK_FREEWORK(jnewblk->jn_dep); 3737 freework->fw_jnewblk = NULL; 3738 jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep); 3739 break; 3740 default: 3741 panic("handle_written_jnewblk: Unknown type %d.", 3742 jnewblk->jn_dep->wk_type); 3743 } 3744 jnewblk->jn_dep = NULL; 3745 free_jnewblk(jnewblk); 3746} 3747 3748/* 3749 * Cancel a jfreefrag that won't be needed, probably due to colliding with 3750 * an in-flight allocation that has not yet been committed. Divorce us 3751 * from the freefrag and mark it DEPCOMPLETE so that it may be added 3752 * to the worklist. 3753 */ 3754static void 3755cancel_jfreefrag(jfreefrag) 3756 struct jfreefrag *jfreefrag; 3757{ 3758 struct freefrag *freefrag; 3759 3760 if (jfreefrag->fr_jsegdep) { 3761 free_jsegdep(jfreefrag->fr_jsegdep); 3762 jfreefrag->fr_jsegdep = NULL; 3763 } 3764 freefrag = jfreefrag->fr_freefrag; 3765 jfreefrag->fr_freefrag = NULL; 3766 free_jfreefrag(jfreefrag); 3767 freefrag->ff_state |= DEPCOMPLETE; 3768 CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno); 3769} 3770 3771/* 3772 * Free a jfreefrag when the parent freefrag is rendered obsolete. 3773 */ 3774static void 3775free_jfreefrag(jfreefrag) 3776 struct jfreefrag *jfreefrag; 3777{ 3778 3779 if (jfreefrag->fr_state & INPROGRESS) 3780 WORKLIST_REMOVE(&jfreefrag->fr_list); 3781 else if (jfreefrag->fr_state & ONWORKLIST) 3782 remove_from_journal(&jfreefrag->fr_list); 3783 if (jfreefrag->fr_freefrag != NULL) 3784 panic("free_jfreefrag: Still attached to a freefrag."); 3785 WORKITEM_FREE(jfreefrag, D_JFREEFRAG); 3786} 3787 3788/* 3789 * Called when the journal write for a jfreefrag completes. The parent 3790 * freefrag is added to the worklist if this completes its dependencies. 3791 */ 3792static void 3793handle_written_jfreefrag(jfreefrag) 3794 struct jfreefrag *jfreefrag; 3795{ 3796 struct jsegdep *jsegdep; 3797 struct freefrag *freefrag; 3798 3799 /* Grab the jsegdep. */ 3800 jsegdep = jfreefrag->fr_jsegdep; 3801 jfreefrag->fr_jsegdep = NULL; 3802 freefrag = jfreefrag->fr_freefrag; 3803 if (freefrag == NULL) 3804 panic("handle_written_jfreefrag: No freefrag."); 3805 freefrag->ff_state |= DEPCOMPLETE; 3806 freefrag->ff_jdep = NULL; 3807 jwork_insert(&freefrag->ff_jwork, jsegdep); 3808 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 3809 add_to_worklist(&freefrag->ff_list, 0); 3810 jfreefrag->fr_freefrag = NULL; 3811 free_jfreefrag(jfreefrag); 3812} 3813 3814/* 3815 * Called when the journal write for a jfreeblk completes. The jfreeblk 3816 * is removed from the freeblks list of pending journal writes and the 3817 * jsegdep is moved to the freeblks jwork to be completed when all blocks 3818 * have been reclaimed. 3819 */ 3820static void 3821handle_written_jblkdep(jblkdep) 3822 struct jblkdep *jblkdep; 3823{ 3824 struct freeblks *freeblks; 3825 struct jsegdep *jsegdep; 3826 3827 /* Grab the jsegdep. */ 3828 jsegdep = jblkdep->jb_jsegdep; 3829 jblkdep->jb_jsegdep = NULL; 3830 freeblks = jblkdep->jb_freeblks; 3831 LIST_REMOVE(jblkdep, jb_deps); 3832 jwork_insert(&freeblks->fb_jwork, jsegdep); 3833 /* 3834 * If the freeblks is all journaled, we can add it to the worklist. 3835 */ 3836 if (LIST_EMPTY(&freeblks->fb_jblkdephd) && 3837 (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 3838 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 3839 3840 free_jblkdep(jblkdep); 3841} 3842 3843static struct jsegdep * 3844newjsegdep(struct worklist *wk) 3845{ 3846 struct jsegdep *jsegdep; 3847 3848 jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS); 3849 workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp); 3850 jsegdep->jd_seg = NULL; 3851 3852 return (jsegdep); 3853} 3854 3855static struct jmvref * 3856newjmvref(dp, ino, oldoff, newoff) 3857 struct inode *dp; 3858 ino_t ino; 3859 off_t oldoff; 3860 off_t newoff; 3861{ 3862 struct jmvref *jmvref; 3863 3864 jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS); 3865 workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump)); 3866 jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE; 3867 jmvref->jm_parent = dp->i_number; 3868 jmvref->jm_ino = ino; 3869 jmvref->jm_oldoff = oldoff; 3870 jmvref->jm_newoff = newoff; 3871 3872 return (jmvref); 3873} 3874 3875/* 3876 * Allocate a new jremref that tracks the removal of ip from dp with the 3877 * directory entry offset of diroff. Mark the entry as ATTACHED and 3878 * DEPCOMPLETE as we have all the information required for the journal write 3879 * and the directory has already been removed from the buffer. The caller 3880 * is responsible for linking the jremref into the pagedep and adding it 3881 * to the journal to write. The MKDIR_PARENT flag is set if we're doing 3882 * a DOTDOT addition so handle_workitem_remove() can properly assign 3883 * the jsegdep when we're done. 3884 */ 3885static struct jremref * 3886newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip, 3887 off_t diroff, nlink_t nlink) 3888{ 3889 struct jremref *jremref; 3890 3891 jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS); 3892 workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump)); 3893 jremref->jr_state = ATTACHED; 3894 newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff, 3895 nlink, ip->i_mode); 3896 jremref->jr_dirrem = dirrem; 3897 3898 return (jremref); 3899} 3900 3901static inline void 3902newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff, 3903 nlink_t nlink, uint16_t mode) 3904{ 3905 3906 inoref->if_jsegdep = newjsegdep(&inoref->if_list); 3907 inoref->if_diroff = diroff; 3908 inoref->if_ino = ino; 3909 inoref->if_parent = parent; 3910 inoref->if_nlink = nlink; 3911 inoref->if_mode = mode; 3912} 3913 3914/* 3915 * Allocate a new jaddref to track the addition of ino to dp at diroff. The 3916 * directory offset may not be known until later. The caller is responsible 3917 * adding the entry to the journal when this information is available. nlink 3918 * should be the link count prior to the addition and mode is only required 3919 * to have the correct FMT. 3920 */ 3921static struct jaddref * 3922newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink, 3923 uint16_t mode) 3924{ 3925 struct jaddref *jaddref; 3926 3927 jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS); 3928 workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump)); 3929 jaddref->ja_state = ATTACHED; 3930 jaddref->ja_mkdir = NULL; 3931 newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode); 3932 3933 return (jaddref); 3934} 3935 3936/* 3937 * Create a new free dependency for a freework. The caller is responsible 3938 * for adjusting the reference count when it has the lock held. The freedep 3939 * will track an outstanding bitmap write that will ultimately clear the 3940 * freework to continue. 3941 */ 3942static struct freedep * 3943newfreedep(struct freework *freework) 3944{ 3945 struct freedep *freedep; 3946 3947 freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS); 3948 workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp); 3949 freedep->fd_freework = freework; 3950 3951 return (freedep); 3952} 3953 3954/* 3955 * Free a freedep structure once the buffer it is linked to is written. If 3956 * this is the last reference to the freework schedule it for completion. 3957 */ 3958static void 3959free_freedep(freedep) 3960 struct freedep *freedep; 3961{ 3962 struct freework *freework; 3963 3964 freework = freedep->fd_freework; 3965 freework->fw_freeblks->fb_cgwait--; 3966 if (--freework->fw_ref == 0) 3967 freework_enqueue(freework); 3968 WORKITEM_FREE(freedep, D_FREEDEP); 3969} 3970 3971/* 3972 * Allocate a new freework structure that may be a level in an indirect 3973 * when parent is not NULL or a top level block when it is. The top level 3974 * freework structures are allocated without the soft updates lock held 3975 * and before the freeblks is visible outside of softdep_setup_freeblocks(). 3976 */ 3977static struct freework * 3978newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal) 3979 struct ufsmount *ump; 3980 struct freeblks *freeblks; 3981 struct freework *parent; 3982 ufs_lbn_t lbn; 3983 ufs2_daddr_t nb; 3984 int frags; 3985 int off; 3986 int journal; 3987{ 3988 struct freework *freework; 3989 3990 freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS); 3991 workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp); 3992 freework->fw_state = ATTACHED; 3993 freework->fw_jnewblk = NULL; 3994 freework->fw_freeblks = freeblks; 3995 freework->fw_parent = parent; 3996 freework->fw_lbn = lbn; 3997 freework->fw_blkno = nb; 3998 freework->fw_frags = frags; 3999 freework->fw_indir = NULL; 4000 freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR) 4001 ? 0 : NINDIR(ump->um_fs) + 1; 4002 freework->fw_start = freework->fw_off = off; 4003 if (journal) 4004 newjfreeblk(freeblks, lbn, nb, frags); 4005 if (parent == NULL) { 4006 ACQUIRE_LOCK(ump); 4007 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 4008 freeblks->fb_ref++; 4009 FREE_LOCK(ump); 4010 } 4011 4012 return (freework); 4013} 4014 4015/* 4016 * Eliminate a jfreeblk for a block that does not need journaling. 4017 */ 4018static void 4019cancel_jfreeblk(freeblks, blkno) 4020 struct freeblks *freeblks; 4021 ufs2_daddr_t blkno; 4022{ 4023 struct jfreeblk *jfreeblk; 4024 struct jblkdep *jblkdep; 4025 4026 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) { 4027 if (jblkdep->jb_list.wk_type != D_JFREEBLK) 4028 continue; 4029 jfreeblk = WK_JFREEBLK(&jblkdep->jb_list); 4030 if (jfreeblk->jf_blkno == blkno) 4031 break; 4032 } 4033 if (jblkdep == NULL) 4034 return; 4035 CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno); 4036 free_jsegdep(jblkdep->jb_jsegdep); 4037 LIST_REMOVE(jblkdep, jb_deps); 4038 WORKITEM_FREE(jfreeblk, D_JFREEBLK); 4039} 4040 4041/* 4042 * Allocate a new jfreeblk to journal top level block pointer when truncating 4043 * a file. The caller must add this to the worklist when the soft updates 4044 * lock is held. 4045 */ 4046static struct jfreeblk * 4047newjfreeblk(freeblks, lbn, blkno, frags) 4048 struct freeblks *freeblks; 4049 ufs_lbn_t lbn; 4050 ufs2_daddr_t blkno; 4051 int frags; 4052{ 4053 struct jfreeblk *jfreeblk; 4054 4055 jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS); 4056 workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK, 4057 freeblks->fb_list.wk_mp); 4058 jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list); 4059 jfreeblk->jf_dep.jb_freeblks = freeblks; 4060 jfreeblk->jf_ino = freeblks->fb_inum; 4061 jfreeblk->jf_lbn = lbn; 4062 jfreeblk->jf_blkno = blkno; 4063 jfreeblk->jf_frags = frags; 4064 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps); 4065 4066 return (jfreeblk); 4067} 4068 4069/* 4070 * The journal is only prepared to handle full-size block numbers, so we 4071 * have to adjust the record to reflect the change to a full-size block. 4072 * For example, suppose we have a block made up of fragments 8-15 and 4073 * want to free its last two fragments. We are given a request that says: 4074 * FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0 4075 * where frags are the number of fragments to free and oldfrags are the 4076 * number of fragments to keep. To block align it, we have to change it to 4077 * have a valid full-size blkno, so it becomes: 4078 * FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6 4079 */ 4080static void 4081adjust_newfreework(freeblks, frag_offset) 4082 struct freeblks *freeblks; 4083 int frag_offset; 4084{ 4085 struct jfreeblk *jfreeblk; 4086 4087 KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL && 4088 LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK), 4089 ("adjust_newfreework: Missing freeblks dependency")); 4090 4091 jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd)); 4092 jfreeblk->jf_blkno -= frag_offset; 4093 jfreeblk->jf_frags += frag_offset; 4094} 4095 4096/* 4097 * Allocate a new jtrunc to track a partial truncation. 4098 */ 4099static struct jtrunc * 4100newjtrunc(freeblks, size, extsize) 4101 struct freeblks *freeblks; 4102 off_t size; 4103 int extsize; 4104{ 4105 struct jtrunc *jtrunc; 4106 4107 jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS); 4108 workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC, 4109 freeblks->fb_list.wk_mp); 4110 jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list); 4111 jtrunc->jt_dep.jb_freeblks = freeblks; 4112 jtrunc->jt_ino = freeblks->fb_inum; 4113 jtrunc->jt_size = size; 4114 jtrunc->jt_extsize = extsize; 4115 LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps); 4116 4117 return (jtrunc); 4118} 4119 4120/* 4121 * If we're canceling a new bitmap we have to search for another ref 4122 * to move into the bmsafemap dep. This might be better expressed 4123 * with another structure. 4124 */ 4125static void 4126move_newblock_dep(jaddref, inodedep) 4127 struct jaddref *jaddref; 4128 struct inodedep *inodedep; 4129{ 4130 struct inoref *inoref; 4131 struct jaddref *jaddrefn; 4132 4133 jaddrefn = NULL; 4134 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4135 inoref = TAILQ_NEXT(inoref, if_deps)) { 4136 if ((jaddref->ja_state & NEWBLOCK) && 4137 inoref->if_list.wk_type == D_JADDREF) { 4138 jaddrefn = (struct jaddref *)inoref; 4139 break; 4140 } 4141 } 4142 if (jaddrefn == NULL) 4143 return; 4144 jaddrefn->ja_state &= ~(ATTACHED | UNDONE); 4145 jaddrefn->ja_state |= jaddref->ja_state & 4146 (ATTACHED | UNDONE | NEWBLOCK); 4147 jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK); 4148 jaddref->ja_state |= ATTACHED; 4149 LIST_REMOVE(jaddref, ja_bmdeps); 4150 LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn, 4151 ja_bmdeps); 4152} 4153 4154/* 4155 * Cancel a jaddref either before it has been written or while it is being 4156 * written. This happens when a link is removed before the add reaches 4157 * the disk. The jaddref dependency is kept linked into the bmsafemap 4158 * and inode to prevent the link count or bitmap from reaching the disk 4159 * until handle_workitem_remove() re-adjusts the counts and bitmaps as 4160 * required. 4161 * 4162 * Returns 1 if the canceled addref requires journaling of the remove and 4163 * 0 otherwise. 4164 */ 4165static int 4166cancel_jaddref(jaddref, inodedep, wkhd) 4167 struct jaddref *jaddref; 4168 struct inodedep *inodedep; 4169 struct workhead *wkhd; 4170{ 4171 struct inoref *inoref; 4172 struct jsegdep *jsegdep; 4173 int needsj; 4174 4175 KASSERT((jaddref->ja_state & COMPLETE) == 0, 4176 ("cancel_jaddref: Canceling complete jaddref")); 4177 if (jaddref->ja_state & (INPROGRESS | COMPLETE)) 4178 needsj = 1; 4179 else 4180 needsj = 0; 4181 if (inodedep == NULL) 4182 if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino, 4183 0, &inodedep) == 0) 4184 panic("cancel_jaddref: Lost inodedep"); 4185 /* 4186 * We must adjust the nlink of any reference operation that follows 4187 * us so that it is consistent with the in-memory reference. This 4188 * ensures that inode nlink rollbacks always have the correct link. 4189 */ 4190 if (needsj == 0) { 4191 for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref; 4192 inoref = TAILQ_NEXT(inoref, if_deps)) { 4193 if (inoref->if_state & GOINGAWAY) 4194 break; 4195 inoref->if_nlink--; 4196 } 4197 } 4198 jsegdep = inoref_jseg(&jaddref->ja_ref); 4199 if (jaddref->ja_state & NEWBLOCK) 4200 move_newblock_dep(jaddref, inodedep); 4201 wake_worklist(&jaddref->ja_list); 4202 jaddref->ja_mkdir = NULL; 4203 if (jaddref->ja_state & INPROGRESS) { 4204 jaddref->ja_state &= ~INPROGRESS; 4205 WORKLIST_REMOVE(&jaddref->ja_list); 4206 jwork_insert(wkhd, jsegdep); 4207 } else { 4208 free_jsegdep(jsegdep); 4209 if (jaddref->ja_state & DEPCOMPLETE) 4210 remove_from_journal(&jaddref->ja_list); 4211 } 4212 jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE); 4213 /* 4214 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove 4215 * can arrange for them to be freed with the bitmap. Otherwise we 4216 * no longer need this addref attached to the inoreflst and it 4217 * will incorrectly adjust nlink if we leave it. 4218 */ 4219 if ((jaddref->ja_state & NEWBLOCK) == 0) { 4220 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 4221 if_deps); 4222 jaddref->ja_state |= COMPLETE; 4223 free_jaddref(jaddref); 4224 return (needsj); 4225 } 4226 /* 4227 * Leave the head of the list for jsegdeps for fast merging. 4228 */ 4229 if (LIST_FIRST(wkhd) != NULL) { 4230 jaddref->ja_state |= ONWORKLIST; 4231 LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list); 4232 } else 4233 WORKLIST_INSERT(wkhd, &jaddref->ja_list); 4234 4235 return (needsj); 4236} 4237 4238/* 4239 * Attempt to free a jaddref structure when some work completes. This 4240 * should only succeed once the entry is written and all dependencies have 4241 * been notified. 4242 */ 4243static void 4244free_jaddref(jaddref) 4245 struct jaddref *jaddref; 4246{ 4247 4248 if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE) 4249 return; 4250 if (jaddref->ja_ref.if_jsegdep) 4251 panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n", 4252 jaddref, jaddref->ja_state); 4253 if (jaddref->ja_state & NEWBLOCK) 4254 LIST_REMOVE(jaddref, ja_bmdeps); 4255 if (jaddref->ja_state & (INPROGRESS | ONWORKLIST)) 4256 panic("free_jaddref: Bad state %p(0x%X)", 4257 jaddref, jaddref->ja_state); 4258 if (jaddref->ja_mkdir != NULL) 4259 panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state); 4260 WORKITEM_FREE(jaddref, D_JADDREF); 4261} 4262 4263/* 4264 * Free a jremref structure once it has been written or discarded. 4265 */ 4266static void 4267free_jremref(jremref) 4268 struct jremref *jremref; 4269{ 4270 4271 if (jremref->jr_ref.if_jsegdep) 4272 free_jsegdep(jremref->jr_ref.if_jsegdep); 4273 if (jremref->jr_state & INPROGRESS) 4274 panic("free_jremref: IO still pending"); 4275 WORKITEM_FREE(jremref, D_JREMREF); 4276} 4277 4278/* 4279 * Free a jnewblk structure. 4280 */ 4281static void 4282free_jnewblk(jnewblk) 4283 struct jnewblk *jnewblk; 4284{ 4285 4286 if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE) 4287 return; 4288 LIST_REMOVE(jnewblk, jn_deps); 4289 if (jnewblk->jn_dep != NULL) 4290 panic("free_jnewblk: Dependency still attached."); 4291 WORKITEM_FREE(jnewblk, D_JNEWBLK); 4292} 4293 4294/* 4295 * Cancel a jnewblk which has been been made redundant by frag extension. 4296 */ 4297static void 4298cancel_jnewblk(jnewblk, wkhd) 4299 struct jnewblk *jnewblk; 4300 struct workhead *wkhd; 4301{ 4302 struct jsegdep *jsegdep; 4303 4304 CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno); 4305 jsegdep = jnewblk->jn_jsegdep; 4306 if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL) 4307 panic("cancel_jnewblk: Invalid state"); 4308 jnewblk->jn_jsegdep = NULL; 4309 jnewblk->jn_dep = NULL; 4310 jnewblk->jn_state |= GOINGAWAY; 4311 if (jnewblk->jn_state & INPROGRESS) { 4312 jnewblk->jn_state &= ~INPROGRESS; 4313 WORKLIST_REMOVE(&jnewblk->jn_list); 4314 jwork_insert(wkhd, jsegdep); 4315 } else { 4316 free_jsegdep(jsegdep); 4317 remove_from_journal(&jnewblk->jn_list); 4318 } 4319 wake_worklist(&jnewblk->jn_list); 4320 WORKLIST_INSERT(wkhd, &jnewblk->jn_list); 4321} 4322 4323static void 4324free_jblkdep(jblkdep) 4325 struct jblkdep *jblkdep; 4326{ 4327 4328 if (jblkdep->jb_list.wk_type == D_JFREEBLK) 4329 WORKITEM_FREE(jblkdep, D_JFREEBLK); 4330 else if (jblkdep->jb_list.wk_type == D_JTRUNC) 4331 WORKITEM_FREE(jblkdep, D_JTRUNC); 4332 else 4333 panic("free_jblkdep: Unexpected type %s", 4334 TYPENAME(jblkdep->jb_list.wk_type)); 4335} 4336 4337/* 4338 * Free a single jseg once it is no longer referenced in memory or on 4339 * disk. Reclaim journal blocks and dependencies waiting for the segment 4340 * to disappear. 4341 */ 4342static void 4343free_jseg(jseg, jblocks) 4344 struct jseg *jseg; 4345 struct jblocks *jblocks; 4346{ 4347 struct freework *freework; 4348 4349 /* 4350 * Free freework structures that were lingering to indicate freed 4351 * indirect blocks that forced journal write ordering on reallocate. 4352 */ 4353 while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL) 4354 indirblk_remove(freework); 4355 if (jblocks->jb_oldestseg == jseg) 4356 jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next); 4357 TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next); 4358 jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size); 4359 KASSERT(LIST_EMPTY(&jseg->js_entries), 4360 ("free_jseg: Freed jseg has valid entries.")); 4361 WORKITEM_FREE(jseg, D_JSEG); 4362} 4363 4364/* 4365 * Free all jsegs that meet the criteria for being reclaimed and update 4366 * oldestseg. 4367 */ 4368static void 4369free_jsegs(jblocks) 4370 struct jblocks *jblocks; 4371{ 4372 struct jseg *jseg; 4373 4374 /* 4375 * Free only those jsegs which have none allocated before them to 4376 * preserve the journal space ordering. 4377 */ 4378 while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) { 4379 /* 4380 * Only reclaim space when nothing depends on this journal 4381 * set and another set has written that it is no longer 4382 * valid. 4383 */ 4384 if (jseg->js_refs != 0) { 4385 jblocks->jb_oldestseg = jseg; 4386 return; 4387 } 4388 if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE) 4389 break; 4390 if (jseg->js_seq > jblocks->jb_oldestwrseq) 4391 break; 4392 /* 4393 * We can free jsegs that didn't write entries when 4394 * oldestwrseq == js_seq. 4395 */ 4396 if (jseg->js_seq == jblocks->jb_oldestwrseq && 4397 jseg->js_cnt != 0) 4398 break; 4399 free_jseg(jseg, jblocks); 4400 } 4401 /* 4402 * If we exited the loop above we still must discover the 4403 * oldest valid segment. 4404 */ 4405 if (jseg) 4406 for (jseg = jblocks->jb_oldestseg; jseg != NULL; 4407 jseg = TAILQ_NEXT(jseg, js_next)) 4408 if (jseg->js_refs != 0) 4409 break; 4410 jblocks->jb_oldestseg = jseg; 4411 /* 4412 * The journal has no valid records but some jsegs may still be 4413 * waiting on oldestwrseq to advance. We force a small record 4414 * out to permit these lingering records to be reclaimed. 4415 */ 4416 if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs)) 4417 jblocks->jb_needseg = 1; 4418} 4419 4420/* 4421 * Release one reference to a jseg and free it if the count reaches 0. This 4422 * should eventually reclaim journal space as well. 4423 */ 4424static void 4425rele_jseg(jseg) 4426 struct jseg *jseg; 4427{ 4428 4429 KASSERT(jseg->js_refs > 0, 4430 ("free_jseg: Invalid refcnt %d", jseg->js_refs)); 4431 if (--jseg->js_refs != 0) 4432 return; 4433 free_jsegs(jseg->js_jblocks); 4434} 4435 4436/* 4437 * Release a jsegdep and decrement the jseg count. 4438 */ 4439static void 4440free_jsegdep(jsegdep) 4441 struct jsegdep *jsegdep; 4442{ 4443 4444 if (jsegdep->jd_seg) 4445 rele_jseg(jsegdep->jd_seg); 4446 WORKITEM_FREE(jsegdep, D_JSEGDEP); 4447} 4448 4449/* 4450 * Wait for a journal item to make it to disk. Initiate journal processing 4451 * if required. 4452 */ 4453static int 4454jwait(wk, waitfor) 4455 struct worklist *wk; 4456 int waitfor; 4457{ 4458 4459 LOCK_OWNED(VFSTOUFS(wk->wk_mp)); 4460 /* 4461 * Blocking journal waits cause slow synchronous behavior. Record 4462 * stats on the frequency of these blocking operations. 4463 */ 4464 if (waitfor == MNT_WAIT) { 4465 stat_journal_wait++; 4466 switch (wk->wk_type) { 4467 case D_JREMREF: 4468 case D_JMVREF: 4469 stat_jwait_filepage++; 4470 break; 4471 case D_JTRUNC: 4472 case D_JFREEBLK: 4473 stat_jwait_freeblks++; 4474 break; 4475 case D_JNEWBLK: 4476 stat_jwait_newblk++; 4477 break; 4478 case D_JADDREF: 4479 stat_jwait_inode++; 4480 break; 4481 default: 4482 break; 4483 } 4484 } 4485 /* 4486 * If IO has not started we process the journal. We can't mark the 4487 * worklist item as IOWAITING because we drop the lock while 4488 * processing the journal and the worklist entry may be freed after 4489 * this point. The caller may call back in and re-issue the request. 4490 */ 4491 if ((wk->wk_state & INPROGRESS) == 0) { 4492 softdep_process_journal(wk->wk_mp, wk, waitfor); 4493 if (waitfor != MNT_WAIT) 4494 return (EBUSY); 4495 return (0); 4496 } 4497 if (waitfor != MNT_WAIT) 4498 return (EBUSY); 4499 wait_worklist(wk, "jwait"); 4500 return (0); 4501} 4502 4503/* 4504 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as 4505 * appropriate. This is a convenience function to reduce duplicate code 4506 * for the setup and revert functions below. 4507 */ 4508static struct inodedep * 4509inodedep_lookup_ip(ip) 4510 struct inode *ip; 4511{ 4512 struct inodedep *inodedep; 4513 int dflags; 4514 4515 KASSERT(ip->i_nlink >= ip->i_effnlink, 4516 ("inodedep_lookup_ip: bad delta")); 4517 dflags = DEPALLOC; 4518 if (IS_SNAPSHOT(ip)) 4519 dflags |= NODELAY; 4520 (void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, 4521 &inodedep); 4522 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 4523 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 4524 4525 return (inodedep); 4526} 4527 4528/* 4529 * Called prior to creating a new inode and linking it to a directory. The 4530 * jaddref structure must already be allocated by softdep_setup_inomapdep 4531 * and it is discovered here so we can initialize the mode and update 4532 * nlinkdelta. 4533 */ 4534void 4535softdep_setup_create(dp, ip) 4536 struct inode *dp; 4537 struct inode *ip; 4538{ 4539 struct inodedep *inodedep; 4540 struct jaddref *jaddref; 4541 struct vnode *dvp; 4542 4543 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4544 ("softdep_setup_create called on non-softdep filesystem")); 4545 KASSERT(ip->i_nlink == 1, 4546 ("softdep_setup_create: Invalid link count.")); 4547 dvp = ITOV(dp); 4548 ACQUIRE_LOCK(dp->i_ump); 4549 inodedep = inodedep_lookup_ip(ip); 4550 if (DOINGSUJ(dvp)) { 4551 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4552 inoreflst); 4553 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 4554 ("softdep_setup_create: No addref structure present.")); 4555 } 4556 softdep_prelink(dvp, NULL); 4557 FREE_LOCK(dp->i_ump); 4558} 4559 4560/* 4561 * Create a jaddref structure to track the addition of a DOTDOT link when 4562 * we are reparenting an inode as part of a rename. This jaddref will be 4563 * found by softdep_setup_directory_change. Adjusts nlinkdelta for 4564 * non-journaling softdep. 4565 */ 4566void 4567softdep_setup_dotdot_link(dp, ip) 4568 struct inode *dp; 4569 struct inode *ip; 4570{ 4571 struct inodedep *inodedep; 4572 struct jaddref *jaddref; 4573 struct vnode *dvp; 4574 struct vnode *vp; 4575 4576 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4577 ("softdep_setup_dotdot_link called on non-softdep filesystem")); 4578 dvp = ITOV(dp); 4579 vp = ITOV(ip); 4580 jaddref = NULL; 4581 /* 4582 * We don't set MKDIR_PARENT as this is not tied to a mkdir and 4583 * is used as a normal link would be. 4584 */ 4585 if (DOINGSUJ(dvp)) 4586 jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4587 dp->i_effnlink - 1, dp->i_mode); 4588 ACQUIRE_LOCK(dp->i_ump); 4589 inodedep = inodedep_lookup_ip(dp); 4590 if (jaddref) 4591 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4592 if_deps); 4593 softdep_prelink(dvp, ITOV(ip)); 4594 FREE_LOCK(dp->i_ump); 4595} 4596 4597/* 4598 * Create a jaddref structure to track a new link to an inode. The directory 4599 * offset is not known until softdep_setup_directory_add or 4600 * softdep_setup_directory_change. Adjusts nlinkdelta for non-journaling 4601 * softdep. 4602 */ 4603void 4604softdep_setup_link(dp, ip) 4605 struct inode *dp; 4606 struct inode *ip; 4607{ 4608 struct inodedep *inodedep; 4609 struct jaddref *jaddref; 4610 struct vnode *dvp; 4611 4612 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4613 ("softdep_setup_link called on non-softdep filesystem")); 4614 dvp = ITOV(dp); 4615 jaddref = NULL; 4616 if (DOINGSUJ(dvp)) 4617 jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1, 4618 ip->i_mode); 4619 ACQUIRE_LOCK(dp->i_ump); 4620 inodedep = inodedep_lookup_ip(ip); 4621 if (jaddref) 4622 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4623 if_deps); 4624 softdep_prelink(dvp, ITOV(ip)); 4625 FREE_LOCK(dp->i_ump); 4626} 4627 4628/* 4629 * Called to create the jaddref structures to track . and .. references as 4630 * well as lookup and further initialize the incomplete jaddref created 4631 * by softdep_setup_inomapdep when the inode was allocated. Adjusts 4632 * nlinkdelta for non-journaling softdep. 4633 */ 4634void 4635softdep_setup_mkdir(dp, ip) 4636 struct inode *dp; 4637 struct inode *ip; 4638{ 4639 struct inodedep *inodedep; 4640 struct jaddref *dotdotaddref; 4641 struct jaddref *dotaddref; 4642 struct jaddref *jaddref; 4643 struct vnode *dvp; 4644 4645 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4646 ("softdep_setup_mkdir called on non-softdep filesystem")); 4647 dvp = ITOV(dp); 4648 dotaddref = dotdotaddref = NULL; 4649 if (DOINGSUJ(dvp)) { 4650 dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1, 4651 ip->i_mode); 4652 dotaddref->ja_state |= MKDIR_BODY; 4653 dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET, 4654 dp->i_effnlink - 1, dp->i_mode); 4655 dotdotaddref->ja_state |= MKDIR_PARENT; 4656 } 4657 ACQUIRE_LOCK(dp->i_ump); 4658 inodedep = inodedep_lookup_ip(ip); 4659 if (DOINGSUJ(dvp)) { 4660 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4661 inoreflst); 4662 KASSERT(jaddref != NULL, 4663 ("softdep_setup_mkdir: No addref structure present.")); 4664 KASSERT(jaddref->ja_parent == dp->i_number, 4665 ("softdep_setup_mkdir: bad parent %ju", 4666 (uintmax_t)jaddref->ja_parent)); 4667 TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref, 4668 if_deps); 4669 } 4670 inodedep = inodedep_lookup_ip(dp); 4671 if (DOINGSUJ(dvp)) 4672 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, 4673 &dotdotaddref->ja_ref, if_deps); 4674 softdep_prelink(ITOV(dp), NULL); 4675 FREE_LOCK(dp->i_ump); 4676} 4677 4678/* 4679 * Called to track nlinkdelta of the inode and parent directories prior to 4680 * unlinking a directory. 4681 */ 4682void 4683softdep_setup_rmdir(dp, ip) 4684 struct inode *dp; 4685 struct inode *ip; 4686{ 4687 struct vnode *dvp; 4688 4689 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4690 ("softdep_setup_rmdir called on non-softdep filesystem")); 4691 dvp = ITOV(dp); 4692 ACQUIRE_LOCK(dp->i_ump); 4693 (void) inodedep_lookup_ip(ip); 4694 (void) inodedep_lookup_ip(dp); 4695 softdep_prelink(dvp, ITOV(ip)); 4696 FREE_LOCK(dp->i_ump); 4697} 4698 4699/* 4700 * Called to track nlinkdelta of the inode and parent directories prior to 4701 * unlink. 4702 */ 4703void 4704softdep_setup_unlink(dp, ip) 4705 struct inode *dp; 4706 struct inode *ip; 4707{ 4708 struct vnode *dvp; 4709 4710 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4711 ("softdep_setup_unlink called on non-softdep filesystem")); 4712 dvp = ITOV(dp); 4713 ACQUIRE_LOCK(dp->i_ump); 4714 (void) inodedep_lookup_ip(ip); 4715 (void) inodedep_lookup_ip(dp); 4716 softdep_prelink(dvp, ITOV(ip)); 4717 FREE_LOCK(dp->i_ump); 4718} 4719 4720/* 4721 * Called to release the journal structures created by a failed non-directory 4722 * creation. Adjusts nlinkdelta for non-journaling softdep. 4723 */ 4724void 4725softdep_revert_create(dp, ip) 4726 struct inode *dp; 4727 struct inode *ip; 4728{ 4729 struct inodedep *inodedep; 4730 struct jaddref *jaddref; 4731 struct vnode *dvp; 4732 4733 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4734 ("softdep_revert_create called on non-softdep filesystem")); 4735 dvp = ITOV(dp); 4736 ACQUIRE_LOCK(dp->i_ump); 4737 inodedep = inodedep_lookup_ip(ip); 4738 if (DOINGSUJ(dvp)) { 4739 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4740 inoreflst); 4741 KASSERT(jaddref->ja_parent == dp->i_number, 4742 ("softdep_revert_create: addref parent mismatch")); 4743 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4744 } 4745 FREE_LOCK(dp->i_ump); 4746} 4747 4748/* 4749 * Called to release the journal structures created by a failed link 4750 * addition. Adjusts nlinkdelta for non-journaling softdep. 4751 */ 4752void 4753softdep_revert_link(dp, ip) 4754 struct inode *dp; 4755 struct inode *ip; 4756{ 4757 struct inodedep *inodedep; 4758 struct jaddref *jaddref; 4759 struct vnode *dvp; 4760 4761 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4762 ("softdep_revert_link called on non-softdep filesystem")); 4763 dvp = ITOV(dp); 4764 ACQUIRE_LOCK(dp->i_ump); 4765 inodedep = inodedep_lookup_ip(ip); 4766 if (DOINGSUJ(dvp)) { 4767 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4768 inoreflst); 4769 KASSERT(jaddref->ja_parent == dp->i_number, 4770 ("softdep_revert_link: addref parent mismatch")); 4771 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4772 } 4773 FREE_LOCK(dp->i_ump); 4774} 4775 4776/* 4777 * Called to release the journal structures created by a failed mkdir 4778 * attempt. Adjusts nlinkdelta for non-journaling softdep. 4779 */ 4780void 4781softdep_revert_mkdir(dp, ip) 4782 struct inode *dp; 4783 struct inode *ip; 4784{ 4785 struct inodedep *inodedep; 4786 struct jaddref *jaddref; 4787 struct jaddref *dotaddref; 4788 struct vnode *dvp; 4789 4790 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4791 ("softdep_revert_mkdir called on non-softdep filesystem")); 4792 dvp = ITOV(dp); 4793 4794 ACQUIRE_LOCK(dp->i_ump); 4795 inodedep = inodedep_lookup_ip(dp); 4796 if (DOINGSUJ(dvp)) { 4797 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4798 inoreflst); 4799 KASSERT(jaddref->ja_parent == ip->i_number, 4800 ("softdep_revert_mkdir: dotdot addref parent mismatch")); 4801 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4802 } 4803 inodedep = inodedep_lookup_ip(ip); 4804 if (DOINGSUJ(dvp)) { 4805 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 4806 inoreflst); 4807 KASSERT(jaddref->ja_parent == dp->i_number, 4808 ("softdep_revert_mkdir: addref parent mismatch")); 4809 dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 4810 inoreflst, if_deps); 4811 cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait); 4812 KASSERT(dotaddref->ja_parent == ip->i_number, 4813 ("softdep_revert_mkdir: dot addref parent mismatch")); 4814 cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait); 4815 } 4816 FREE_LOCK(dp->i_ump); 4817} 4818 4819/* 4820 * Called to correct nlinkdelta after a failed rmdir. 4821 */ 4822void 4823softdep_revert_rmdir(dp, ip) 4824 struct inode *dp; 4825 struct inode *ip; 4826{ 4827 4828 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0, 4829 ("softdep_revert_rmdir called on non-softdep filesystem")); 4830 ACQUIRE_LOCK(dp->i_ump); 4831 (void) inodedep_lookup_ip(ip); 4832 (void) inodedep_lookup_ip(dp); 4833 FREE_LOCK(dp->i_ump); 4834} 4835 4836/* 4837 * Protecting the freemaps (or bitmaps). 4838 * 4839 * To eliminate the need to execute fsck before mounting a filesystem 4840 * after a power failure, one must (conservatively) guarantee that the 4841 * on-disk copy of the bitmaps never indicate that a live inode or block is 4842 * free. So, when a block or inode is allocated, the bitmap should be 4843 * updated (on disk) before any new pointers. When a block or inode is 4844 * freed, the bitmap should not be updated until all pointers have been 4845 * reset. The latter dependency is handled by the delayed de-allocation 4846 * approach described below for block and inode de-allocation. The former 4847 * dependency is handled by calling the following procedure when a block or 4848 * inode is allocated. When an inode is allocated an "inodedep" is created 4849 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk. 4850 * Each "inodedep" is also inserted into the hash indexing structure so 4851 * that any additional link additions can be made dependent on the inode 4852 * allocation. 4853 * 4854 * The ufs filesystem maintains a number of free block counts (e.g., per 4855 * cylinder group, per cylinder and per <cylinder, rotational position> pair) 4856 * in addition to the bitmaps. These counts are used to improve efficiency 4857 * during allocation and therefore must be consistent with the bitmaps. 4858 * There is no convenient way to guarantee post-crash consistency of these 4859 * counts with simple update ordering, for two main reasons: (1) The counts 4860 * and bitmaps for a single cylinder group block are not in the same disk 4861 * sector. If a disk write is interrupted (e.g., by power failure), one may 4862 * be written and the other not. (2) Some of the counts are located in the 4863 * superblock rather than the cylinder group block. So, we focus our soft 4864 * updates implementation on protecting the bitmaps. When mounting a 4865 * filesystem, we recompute the auxiliary counts from the bitmaps. 4866 */ 4867 4868/* 4869 * Called just after updating the cylinder group block to allocate an inode. 4870 */ 4871void 4872softdep_setup_inomapdep(bp, ip, newinum, mode) 4873 struct buf *bp; /* buffer for cylgroup block with inode map */ 4874 struct inode *ip; /* inode related to allocation */ 4875 ino_t newinum; /* new inode number being allocated */ 4876 int mode; 4877{ 4878 struct inodedep *inodedep; 4879 struct bmsafemap *bmsafemap; 4880 struct jaddref *jaddref; 4881 struct mount *mp; 4882 struct fs *fs; 4883 4884 mp = UFSTOVFS(ip->i_ump); 4885 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 4886 ("softdep_setup_inomapdep called on non-softdep filesystem")); 4887 fs = ip->i_ump->um_fs; 4888 jaddref = NULL; 4889 4890 /* 4891 * Allocate the journal reference add structure so that the bitmap 4892 * can be dependent on it. 4893 */ 4894 if (MOUNTEDSUJ(mp)) { 4895 jaddref = newjaddref(ip, newinum, 0, 0, mode); 4896 jaddref->ja_state |= NEWBLOCK; 4897 } 4898 4899 /* 4900 * Create a dependency for the newly allocated inode. 4901 * Panic if it already exists as something is seriously wrong. 4902 * Otherwise add it to the dependency list for the buffer holding 4903 * the cylinder group map from which it was allocated. 4904 * 4905 * We have to preallocate a bmsafemap entry in case it is needed 4906 * in bmsafemap_lookup since once we allocate the inodedep, we 4907 * have to finish initializing it before we can FREE_LOCK(). 4908 * By preallocating, we avoid FREE_LOCK() while doing a malloc 4909 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before 4910 * creating the inodedep as it can be freed during the time 4911 * that we FREE_LOCK() while allocating the inodedep. We must 4912 * call workitem_alloc() before entering the locked section as 4913 * it also acquires the lock and we must avoid trying doing so 4914 * recursively. 4915 */ 4916 bmsafemap = malloc(sizeof(struct bmsafemap), 4917 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 4918 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 4919 ACQUIRE_LOCK(ip->i_ump); 4920 if ((inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep))) 4921 panic("softdep_setup_inomapdep: dependency %p for new" 4922 "inode already exists", inodedep); 4923 bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap); 4924 if (jaddref) { 4925 LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps); 4926 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref, 4927 if_deps); 4928 } else { 4929 inodedep->id_state |= ONDEPLIST; 4930 LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps); 4931 } 4932 inodedep->id_bmsafemap = bmsafemap; 4933 inodedep->id_state &= ~DEPCOMPLETE; 4934 FREE_LOCK(ip->i_ump); 4935} 4936 4937/* 4938 * Called just after updating the cylinder group block to 4939 * allocate block or fragment. 4940 */ 4941void 4942softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags) 4943 struct buf *bp; /* buffer for cylgroup block with block map */ 4944 struct mount *mp; /* filesystem doing allocation */ 4945 ufs2_daddr_t newblkno; /* number of newly allocated block */ 4946 int frags; /* Number of fragments. */ 4947 int oldfrags; /* Previous number of fragments for extend. */ 4948{ 4949 struct newblk *newblk; 4950 struct bmsafemap *bmsafemap; 4951 struct jnewblk *jnewblk; 4952 struct ufsmount *ump; 4953 struct fs *fs; 4954 4955 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 4956 ("softdep_setup_blkmapdep called on non-softdep filesystem")); 4957 ump = VFSTOUFS(mp); 4958 fs = ump->um_fs; 4959 jnewblk = NULL; 4960 /* 4961 * Create a dependency for the newly allocated block. 4962 * Add it to the dependency list for the buffer holding 4963 * the cylinder group map from which it was allocated. 4964 */ 4965 if (MOUNTEDSUJ(mp)) { 4966 jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS); 4967 workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp); 4968 jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list); 4969 jnewblk->jn_state = ATTACHED; 4970 jnewblk->jn_blkno = newblkno; 4971 jnewblk->jn_frags = frags; 4972 jnewblk->jn_oldfrags = oldfrags; 4973#ifdef SUJ_DEBUG 4974 { 4975 struct cg *cgp; 4976 uint8_t *blksfree; 4977 long bno; 4978 int i; 4979 4980 cgp = (struct cg *)bp->b_data; 4981 blksfree = cg_blksfree(cgp); 4982 bno = dtogd(fs, jnewblk->jn_blkno); 4983 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; 4984 i++) { 4985 if (isset(blksfree, bno + i)) 4986 panic("softdep_setup_blkmapdep: " 4987 "free fragment %d from %d-%d " 4988 "state 0x%X dep %p", i, 4989 jnewblk->jn_oldfrags, 4990 jnewblk->jn_frags, 4991 jnewblk->jn_state, 4992 jnewblk->jn_dep); 4993 } 4994 } 4995#endif 4996 } 4997 4998 CTR3(KTR_SUJ, 4999 "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d", 5000 newblkno, frags, oldfrags); 5001 ACQUIRE_LOCK(ump); 5002 if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0) 5003 panic("softdep_setup_blkmapdep: found block"); 5004 newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp, 5005 dtog(fs, newblkno), NULL); 5006 if (jnewblk) { 5007 jnewblk->jn_dep = (struct worklist *)newblk; 5008 LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps); 5009 } else { 5010 newblk->nb_state |= ONDEPLIST; 5011 LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps); 5012 } 5013 newblk->nb_bmsafemap = bmsafemap; 5014 newblk->nb_jnewblk = jnewblk; 5015 FREE_LOCK(ump); 5016} 5017 5018#define BMSAFEMAP_HASH(ump, cg) \ 5019 (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size]) 5020 5021static int 5022bmsafemap_find(bmsafemaphd, cg, bmsafemapp) 5023 struct bmsafemap_hashhead *bmsafemaphd; 5024 int cg; 5025 struct bmsafemap **bmsafemapp; 5026{ 5027 struct bmsafemap *bmsafemap; 5028 5029 LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash) 5030 if (bmsafemap->sm_cg == cg) 5031 break; 5032 if (bmsafemap) { 5033 *bmsafemapp = bmsafemap; 5034 return (1); 5035 } 5036 *bmsafemapp = NULL; 5037 5038 return (0); 5039} 5040 5041/* 5042 * Find the bmsafemap associated with a cylinder group buffer. 5043 * If none exists, create one. The buffer must be locked when 5044 * this routine is called and this routine must be called with 5045 * the softdep lock held. To avoid giving up the lock while 5046 * allocating a new bmsafemap, a preallocated bmsafemap may be 5047 * provided. If it is provided but not needed, it is freed. 5048 */ 5049static struct bmsafemap * 5050bmsafemap_lookup(mp, bp, cg, newbmsafemap) 5051 struct mount *mp; 5052 struct buf *bp; 5053 int cg; 5054 struct bmsafemap *newbmsafemap; 5055{ 5056 struct bmsafemap_hashhead *bmsafemaphd; 5057 struct bmsafemap *bmsafemap, *collision; 5058 struct worklist *wk; 5059 struct ufsmount *ump; 5060 5061 ump = VFSTOUFS(mp); 5062 LOCK_OWNED(ump); 5063 KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer")); 5064 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5065 if (wk->wk_type == D_BMSAFEMAP) { 5066 if (newbmsafemap) 5067 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5068 return (WK_BMSAFEMAP(wk)); 5069 } 5070 } 5071 bmsafemaphd = BMSAFEMAP_HASH(ump, cg); 5072 if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) { 5073 if (newbmsafemap) 5074 WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP); 5075 return (bmsafemap); 5076 } 5077 if (newbmsafemap) { 5078 bmsafemap = newbmsafemap; 5079 } else { 5080 FREE_LOCK(ump); 5081 bmsafemap = malloc(sizeof(struct bmsafemap), 5082 M_BMSAFEMAP, M_SOFTDEP_FLAGS); 5083 workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp); 5084 ACQUIRE_LOCK(ump); 5085 } 5086 bmsafemap->sm_buf = bp; 5087 LIST_INIT(&bmsafemap->sm_inodedephd); 5088 LIST_INIT(&bmsafemap->sm_inodedepwr); 5089 LIST_INIT(&bmsafemap->sm_newblkhd); 5090 LIST_INIT(&bmsafemap->sm_newblkwr); 5091 LIST_INIT(&bmsafemap->sm_jaddrefhd); 5092 LIST_INIT(&bmsafemap->sm_jnewblkhd); 5093 LIST_INIT(&bmsafemap->sm_freehd); 5094 LIST_INIT(&bmsafemap->sm_freewr); 5095 if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) { 5096 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 5097 return (collision); 5098 } 5099 bmsafemap->sm_cg = cg; 5100 LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash); 5101 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 5102 WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list); 5103 return (bmsafemap); 5104} 5105 5106/* 5107 * Direct block allocation dependencies. 5108 * 5109 * When a new block is allocated, the corresponding disk locations must be 5110 * initialized (with zeros or new data) before the on-disk inode points to 5111 * them. Also, the freemap from which the block was allocated must be 5112 * updated (on disk) before the inode's pointer. These two dependencies are 5113 * independent of each other and are needed for all file blocks and indirect 5114 * blocks that are pointed to directly by the inode. Just before the 5115 * "in-core" version of the inode is updated with a newly allocated block 5116 * number, a procedure (below) is called to setup allocation dependency 5117 * structures. These structures are removed when the corresponding 5118 * dependencies are satisfied or when the block allocation becomes obsolete 5119 * (i.e., the file is deleted, the block is de-allocated, or the block is a 5120 * fragment that gets upgraded). All of these cases are handled in 5121 * procedures described later. 5122 * 5123 * When a file extension causes a fragment to be upgraded, either to a larger 5124 * fragment or to a full block, the on-disk location may change (if the 5125 * previous fragment could not simply be extended). In this case, the old 5126 * fragment must be de-allocated, but not until after the inode's pointer has 5127 * been updated. In most cases, this is handled by later procedures, which 5128 * will construct a "freefrag" structure to be added to the workitem queue 5129 * when the inode update is complete (or obsolete). The main exception to 5130 * this is when an allocation occurs while a pending allocation dependency 5131 * (for the same block pointer) remains. This case is handled in the main 5132 * allocation dependency setup procedure by immediately freeing the 5133 * unreferenced fragments. 5134 */ 5135void 5136softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5137 struct inode *ip; /* inode to which block is being added */ 5138 ufs_lbn_t off; /* block pointer within inode */ 5139 ufs2_daddr_t newblkno; /* disk block number being added */ 5140 ufs2_daddr_t oldblkno; /* previous block number, 0 unless frag */ 5141 long newsize; /* size of new block */ 5142 long oldsize; /* size of new block */ 5143 struct buf *bp; /* bp for allocated block */ 5144{ 5145 struct allocdirect *adp, *oldadp; 5146 struct allocdirectlst *adphead; 5147 struct freefrag *freefrag; 5148 struct inodedep *inodedep; 5149 struct pagedep *pagedep; 5150 struct jnewblk *jnewblk; 5151 struct newblk *newblk; 5152 struct mount *mp; 5153 ufs_lbn_t lbn; 5154 5155 lbn = bp->b_lblkno; 5156 mp = UFSTOVFS(ip->i_ump); 5157 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5158 ("softdep_setup_allocdirect called on non-softdep filesystem")); 5159 if (oldblkno && oldblkno != newblkno) 5160 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5161 else 5162 freefrag = NULL; 5163 5164 CTR6(KTR_SUJ, 5165 "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd " 5166 "off %jd newsize %ld oldsize %d", 5167 ip->i_number, newblkno, oldblkno, off, newsize, oldsize); 5168 ACQUIRE_LOCK(ip->i_ump); 5169 if (off >= NDADDR) { 5170 if (lbn > 0) 5171 panic("softdep_setup_allocdirect: bad lbn %jd, off %jd", 5172 lbn, off); 5173 /* allocating an indirect block */ 5174 if (oldblkno != 0) 5175 panic("softdep_setup_allocdirect: non-zero indir"); 5176 } else { 5177 if (off != lbn) 5178 panic("softdep_setup_allocdirect: lbn %jd != off %jd", 5179 lbn, off); 5180 /* 5181 * Allocating a direct block. 5182 * 5183 * If we are allocating a directory block, then we must 5184 * allocate an associated pagedep to track additions and 5185 * deletions. 5186 */ 5187 if ((ip->i_mode & IFMT) == IFDIR) 5188 pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC, 5189 &pagedep); 5190 } 5191 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5192 panic("softdep_setup_allocdirect: lost block"); 5193 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5194 ("softdep_setup_allocdirect: newblk already initialized")); 5195 /* 5196 * Convert the newblk to an allocdirect. 5197 */ 5198 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5199 adp = (struct allocdirect *)newblk; 5200 newblk->nb_freefrag = freefrag; 5201 adp->ad_offset = off; 5202 adp->ad_oldblkno = oldblkno; 5203 adp->ad_newsize = newsize; 5204 adp->ad_oldsize = oldsize; 5205 5206 /* 5207 * Finish initializing the journal. 5208 */ 5209 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5210 jnewblk->jn_ino = ip->i_number; 5211 jnewblk->jn_lbn = lbn; 5212 add_to_journal(&jnewblk->jn_list); 5213 } 5214 if (freefrag && freefrag->ff_jdep != NULL && 5215 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5216 add_to_journal(freefrag->ff_jdep); 5217 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep); 5218 adp->ad_inodedep = inodedep; 5219 5220 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5221 /* 5222 * The list of allocdirects must be kept in sorted and ascending 5223 * order so that the rollback routines can quickly determine the 5224 * first uncommitted block (the size of the file stored on disk 5225 * ends at the end of the lowest committed fragment, or if there 5226 * are no fragments, at the end of the highest committed block). 5227 * Since files generally grow, the typical case is that the new 5228 * block is to be added at the end of the list. We speed this 5229 * special case by checking against the last allocdirect in the 5230 * list before laboriously traversing the list looking for the 5231 * insertion point. 5232 */ 5233 adphead = &inodedep->id_newinoupdt; 5234 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5235 if (oldadp == NULL || oldadp->ad_offset <= off) { 5236 /* insert at end of list */ 5237 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5238 if (oldadp != NULL && oldadp->ad_offset == off) 5239 allocdirect_merge(adphead, adp, oldadp); 5240 FREE_LOCK(ip->i_ump); 5241 return; 5242 } 5243 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5244 if (oldadp->ad_offset >= off) 5245 break; 5246 } 5247 if (oldadp == NULL) 5248 panic("softdep_setup_allocdirect: lost entry"); 5249 /* insert in middle of list */ 5250 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5251 if (oldadp->ad_offset == off) 5252 allocdirect_merge(adphead, adp, oldadp); 5253 5254 FREE_LOCK(ip->i_ump); 5255} 5256 5257/* 5258 * Merge a newer and older journal record to be stored either in a 5259 * newblock or freefrag. This handles aggregating journal records for 5260 * fragment allocation into a second record as well as replacing a 5261 * journal free with an aborted journal allocation. A segment for the 5262 * oldest record will be placed on wkhd if it has been written. If not 5263 * the segment for the newer record will suffice. 5264 */ 5265static struct worklist * 5266jnewblk_merge(new, old, wkhd) 5267 struct worklist *new; 5268 struct worklist *old; 5269 struct workhead *wkhd; 5270{ 5271 struct jnewblk *njnewblk; 5272 struct jnewblk *jnewblk; 5273 5274 /* Handle NULLs to simplify callers. */ 5275 if (new == NULL) 5276 return (old); 5277 if (old == NULL) 5278 return (new); 5279 /* Replace a jfreefrag with a jnewblk. */ 5280 if (new->wk_type == D_JFREEFRAG) { 5281 if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno) 5282 panic("jnewblk_merge: blkno mismatch: %p, %p", 5283 old, new); 5284 cancel_jfreefrag(WK_JFREEFRAG(new)); 5285 return (old); 5286 } 5287 if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK) 5288 panic("jnewblk_merge: Bad type: old %d new %d\n", 5289 old->wk_type, new->wk_type); 5290 /* 5291 * Handle merging of two jnewblk records that describe 5292 * different sets of fragments in the same block. 5293 */ 5294 jnewblk = WK_JNEWBLK(old); 5295 njnewblk = WK_JNEWBLK(new); 5296 if (jnewblk->jn_blkno != njnewblk->jn_blkno) 5297 panic("jnewblk_merge: Merging disparate blocks."); 5298 /* 5299 * The record may be rolled back in the cg. 5300 */ 5301 if (jnewblk->jn_state & UNDONE) { 5302 jnewblk->jn_state &= ~UNDONE; 5303 njnewblk->jn_state |= UNDONE; 5304 njnewblk->jn_state &= ~ATTACHED; 5305 } 5306 /* 5307 * We modify the newer addref and free the older so that if neither 5308 * has been written the most up-to-date copy will be on disk. If 5309 * both have been written but rolled back we only temporarily need 5310 * one of them to fix the bits when the cg write completes. 5311 */ 5312 jnewblk->jn_state |= ATTACHED | COMPLETE; 5313 njnewblk->jn_oldfrags = jnewblk->jn_oldfrags; 5314 cancel_jnewblk(jnewblk, wkhd); 5315 WORKLIST_REMOVE(&jnewblk->jn_list); 5316 free_jnewblk(jnewblk); 5317 return (new); 5318} 5319 5320/* 5321 * Replace an old allocdirect dependency with a newer one. 5322 * This routine must be called with splbio interrupts blocked. 5323 */ 5324static void 5325allocdirect_merge(adphead, newadp, oldadp) 5326 struct allocdirectlst *adphead; /* head of list holding allocdirects */ 5327 struct allocdirect *newadp; /* allocdirect being added */ 5328 struct allocdirect *oldadp; /* existing allocdirect being checked */ 5329{ 5330 struct worklist *wk; 5331 struct freefrag *freefrag; 5332 5333 freefrag = NULL; 5334 LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp)); 5335 if (newadp->ad_oldblkno != oldadp->ad_newblkno || 5336 newadp->ad_oldsize != oldadp->ad_newsize || 5337 newadp->ad_offset >= NDADDR) 5338 panic("%s %jd != new %jd || old size %ld != new %ld", 5339 "allocdirect_merge: old blkno", 5340 (intmax_t)newadp->ad_oldblkno, 5341 (intmax_t)oldadp->ad_newblkno, 5342 newadp->ad_oldsize, oldadp->ad_newsize); 5343 newadp->ad_oldblkno = oldadp->ad_oldblkno; 5344 newadp->ad_oldsize = oldadp->ad_oldsize; 5345 /* 5346 * If the old dependency had a fragment to free or had never 5347 * previously had a block allocated, then the new dependency 5348 * can immediately post its freefrag and adopt the old freefrag. 5349 * This action is done by swapping the freefrag dependencies. 5350 * The new dependency gains the old one's freefrag, and the 5351 * old one gets the new one and then immediately puts it on 5352 * the worklist when it is freed by free_newblk. It is 5353 * not possible to do this swap when the old dependency had a 5354 * non-zero size but no previous fragment to free. This condition 5355 * arises when the new block is an extension of the old block. 5356 * Here, the first part of the fragment allocated to the new 5357 * dependency is part of the block currently claimed on disk by 5358 * the old dependency, so cannot legitimately be freed until the 5359 * conditions for the new dependency are fulfilled. 5360 */ 5361 freefrag = newadp->ad_freefrag; 5362 if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) { 5363 newadp->ad_freefrag = oldadp->ad_freefrag; 5364 oldadp->ad_freefrag = freefrag; 5365 } 5366 /* 5367 * If we are tracking a new directory-block allocation, 5368 * move it from the old allocdirect to the new allocdirect. 5369 */ 5370 if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) { 5371 WORKLIST_REMOVE(wk); 5372 if (!LIST_EMPTY(&oldadp->ad_newdirblk)) 5373 panic("allocdirect_merge: extra newdirblk"); 5374 WORKLIST_INSERT(&newadp->ad_newdirblk, wk); 5375 } 5376 TAILQ_REMOVE(adphead, oldadp, ad_next); 5377 /* 5378 * We need to move any journal dependencies over to the freefrag 5379 * that releases this block if it exists. Otherwise we are 5380 * extending an existing block and we'll wait until that is 5381 * complete to release the journal space and extend the 5382 * new journal to cover this old space as well. 5383 */ 5384 if (freefrag == NULL) { 5385 if (oldadp->ad_newblkno != newadp->ad_newblkno) 5386 panic("allocdirect_merge: %jd != %jd", 5387 oldadp->ad_newblkno, newadp->ad_newblkno); 5388 newadp->ad_block.nb_jnewblk = (struct jnewblk *) 5389 jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list, 5390 &oldadp->ad_block.nb_jnewblk->jn_list, 5391 &newadp->ad_block.nb_jwork); 5392 oldadp->ad_block.nb_jnewblk = NULL; 5393 cancel_newblk(&oldadp->ad_block, NULL, 5394 &newadp->ad_block.nb_jwork); 5395 } else { 5396 wk = (struct worklist *) cancel_newblk(&oldadp->ad_block, 5397 &freefrag->ff_list, &freefrag->ff_jwork); 5398 freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk, 5399 &freefrag->ff_jwork); 5400 } 5401 free_newblk(&oldadp->ad_block); 5402} 5403 5404/* 5405 * Allocate a jfreefrag structure to journal a single block free. 5406 */ 5407static struct jfreefrag * 5408newjfreefrag(freefrag, ip, blkno, size, lbn) 5409 struct freefrag *freefrag; 5410 struct inode *ip; 5411 ufs2_daddr_t blkno; 5412 long size; 5413 ufs_lbn_t lbn; 5414{ 5415 struct jfreefrag *jfreefrag; 5416 struct fs *fs; 5417 5418 fs = ip->i_fs; 5419 jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG, 5420 M_SOFTDEP_FLAGS); 5421 workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump)); 5422 jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list); 5423 jfreefrag->fr_state = ATTACHED | DEPCOMPLETE; 5424 jfreefrag->fr_ino = ip->i_number; 5425 jfreefrag->fr_lbn = lbn; 5426 jfreefrag->fr_blkno = blkno; 5427 jfreefrag->fr_frags = numfrags(fs, size); 5428 jfreefrag->fr_freefrag = freefrag; 5429 5430 return (jfreefrag); 5431} 5432 5433/* 5434 * Allocate a new freefrag structure. 5435 */ 5436static struct freefrag * 5437newfreefrag(ip, blkno, size, lbn) 5438 struct inode *ip; 5439 ufs2_daddr_t blkno; 5440 long size; 5441 ufs_lbn_t lbn; 5442{ 5443 struct freefrag *freefrag; 5444 struct fs *fs; 5445 5446 CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd", 5447 ip->i_number, blkno, size, lbn); 5448 fs = ip->i_fs; 5449 if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag) 5450 panic("newfreefrag: frag size"); 5451 freefrag = malloc(sizeof(struct freefrag), 5452 M_FREEFRAG, M_SOFTDEP_FLAGS); 5453 workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump)); 5454 freefrag->ff_state = ATTACHED; 5455 LIST_INIT(&freefrag->ff_jwork); 5456 freefrag->ff_inum = ip->i_number; 5457 freefrag->ff_vtype = ITOV(ip)->v_type; 5458 freefrag->ff_blkno = blkno; 5459 freefrag->ff_fragsize = size; 5460 5461 if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) { 5462 freefrag->ff_jdep = (struct worklist *) 5463 newjfreefrag(freefrag, ip, blkno, size, lbn); 5464 } else { 5465 freefrag->ff_state |= DEPCOMPLETE; 5466 freefrag->ff_jdep = NULL; 5467 } 5468 5469 return (freefrag); 5470} 5471 5472/* 5473 * This workitem de-allocates fragments that were replaced during 5474 * file block allocation. 5475 */ 5476static void 5477handle_workitem_freefrag(freefrag) 5478 struct freefrag *freefrag; 5479{ 5480 struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp); 5481 struct workhead wkhd; 5482 5483 CTR3(KTR_SUJ, 5484 "handle_workitem_freefrag: ino %d blkno %jd size %ld", 5485 freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize); 5486 /* 5487 * It would be illegal to add new completion items to the 5488 * freefrag after it was schedule to be done so it must be 5489 * safe to modify the list head here. 5490 */ 5491 LIST_INIT(&wkhd); 5492 ACQUIRE_LOCK(ump); 5493 LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list); 5494 /* 5495 * If the journal has not been written we must cancel it here. 5496 */ 5497 if (freefrag->ff_jdep) { 5498 if (freefrag->ff_jdep->wk_type != D_JNEWBLK) 5499 panic("handle_workitem_freefrag: Unexpected type %d\n", 5500 freefrag->ff_jdep->wk_type); 5501 cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd); 5502 } 5503 FREE_LOCK(ump); 5504 ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno, 5505 freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd); 5506 ACQUIRE_LOCK(ump); 5507 WORKITEM_FREE(freefrag, D_FREEFRAG); 5508 FREE_LOCK(ump); 5509} 5510 5511/* 5512 * Set up a dependency structure for an external attributes data block. 5513 * This routine follows much of the structure of softdep_setup_allocdirect. 5514 * See the description of softdep_setup_allocdirect above for details. 5515 */ 5516void 5517softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp) 5518 struct inode *ip; 5519 ufs_lbn_t off; 5520 ufs2_daddr_t newblkno; 5521 ufs2_daddr_t oldblkno; 5522 long newsize; 5523 long oldsize; 5524 struct buf *bp; 5525{ 5526 struct allocdirect *adp, *oldadp; 5527 struct allocdirectlst *adphead; 5528 struct freefrag *freefrag; 5529 struct inodedep *inodedep; 5530 struct jnewblk *jnewblk; 5531 struct newblk *newblk; 5532 struct mount *mp; 5533 ufs_lbn_t lbn; 5534 5535 mp = UFSTOVFS(ip->i_ump); 5536 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5537 ("softdep_setup_allocext called on non-softdep filesystem")); 5538 KASSERT(off < NXADDR, ("softdep_setup_allocext: lbn %lld > NXADDR", 5539 (long long)off)); 5540 5541 lbn = bp->b_lblkno; 5542 if (oldblkno && oldblkno != newblkno) 5543 freefrag = newfreefrag(ip, oldblkno, oldsize, lbn); 5544 else 5545 freefrag = NULL; 5546 5547 ACQUIRE_LOCK(ip->i_ump); 5548 if (newblk_lookup(mp, newblkno, 0, &newblk) == 0) 5549 panic("softdep_setup_allocext: lost block"); 5550 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5551 ("softdep_setup_allocext: newblk already initialized")); 5552 /* 5553 * Convert the newblk to an allocdirect. 5554 */ 5555 WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT); 5556 adp = (struct allocdirect *)newblk; 5557 newblk->nb_freefrag = freefrag; 5558 adp->ad_offset = off; 5559 adp->ad_oldblkno = oldblkno; 5560 adp->ad_newsize = newsize; 5561 adp->ad_oldsize = oldsize; 5562 adp->ad_state |= EXTDATA; 5563 5564 /* 5565 * Finish initializing the journal. 5566 */ 5567 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5568 jnewblk->jn_ino = ip->i_number; 5569 jnewblk->jn_lbn = lbn; 5570 add_to_journal(&jnewblk->jn_list); 5571 } 5572 if (freefrag && freefrag->ff_jdep != NULL && 5573 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5574 add_to_journal(freefrag->ff_jdep); 5575 inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep); 5576 adp->ad_inodedep = inodedep; 5577 5578 WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list); 5579 /* 5580 * The list of allocdirects must be kept in sorted and ascending 5581 * order so that the rollback routines can quickly determine the 5582 * first uncommitted block (the size of the file stored on disk 5583 * ends at the end of the lowest committed fragment, or if there 5584 * are no fragments, at the end of the highest committed block). 5585 * Since files generally grow, the typical case is that the new 5586 * block is to be added at the end of the list. We speed this 5587 * special case by checking against the last allocdirect in the 5588 * list before laboriously traversing the list looking for the 5589 * insertion point. 5590 */ 5591 adphead = &inodedep->id_newextupdt; 5592 oldadp = TAILQ_LAST(adphead, allocdirectlst); 5593 if (oldadp == NULL || oldadp->ad_offset <= off) { 5594 /* insert at end of list */ 5595 TAILQ_INSERT_TAIL(adphead, adp, ad_next); 5596 if (oldadp != NULL && oldadp->ad_offset == off) 5597 allocdirect_merge(adphead, adp, oldadp); 5598 FREE_LOCK(ip->i_ump); 5599 return; 5600 } 5601 TAILQ_FOREACH(oldadp, adphead, ad_next) { 5602 if (oldadp->ad_offset >= off) 5603 break; 5604 } 5605 if (oldadp == NULL) 5606 panic("softdep_setup_allocext: lost entry"); 5607 /* insert in middle of list */ 5608 TAILQ_INSERT_BEFORE(oldadp, adp, ad_next); 5609 if (oldadp->ad_offset == off) 5610 allocdirect_merge(adphead, adp, oldadp); 5611 FREE_LOCK(ip->i_ump); 5612} 5613 5614/* 5615 * Indirect block allocation dependencies. 5616 * 5617 * The same dependencies that exist for a direct block also exist when 5618 * a new block is allocated and pointed to by an entry in a block of 5619 * indirect pointers. The undo/redo states described above are also 5620 * used here. Because an indirect block contains many pointers that 5621 * may have dependencies, a second copy of the entire in-memory indirect 5622 * block is kept. The buffer cache copy is always completely up-to-date. 5623 * The second copy, which is used only as a source for disk writes, 5624 * contains only the safe pointers (i.e., those that have no remaining 5625 * update dependencies). The second copy is freed when all pointers 5626 * are safe. The cache is not allowed to replace indirect blocks with 5627 * pending update dependencies. If a buffer containing an indirect 5628 * block with dependencies is written, these routines will mark it 5629 * dirty again. It can only be successfully written once all the 5630 * dependencies are removed. The ffs_fsync routine in conjunction with 5631 * softdep_sync_metadata work together to get all the dependencies 5632 * removed so that a file can be successfully written to disk. Three 5633 * procedures are used when setting up indirect block pointer 5634 * dependencies. The division is necessary because of the organization 5635 * of the "balloc" routine and because of the distinction between file 5636 * pages and file metadata blocks. 5637 */ 5638 5639/* 5640 * Allocate a new allocindir structure. 5641 */ 5642static struct allocindir * 5643newallocindir(ip, ptrno, newblkno, oldblkno, lbn) 5644 struct inode *ip; /* inode for file being extended */ 5645 int ptrno; /* offset of pointer in indirect block */ 5646 ufs2_daddr_t newblkno; /* disk block number being added */ 5647 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5648 ufs_lbn_t lbn; 5649{ 5650 struct newblk *newblk; 5651 struct allocindir *aip; 5652 struct freefrag *freefrag; 5653 struct jnewblk *jnewblk; 5654 5655 if (oldblkno) 5656 freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn); 5657 else 5658 freefrag = NULL; 5659 ACQUIRE_LOCK(ip->i_ump); 5660 if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0) 5661 panic("new_allocindir: lost block"); 5662 KASSERT(newblk->nb_list.wk_type == D_NEWBLK, 5663 ("newallocindir: newblk already initialized")); 5664 WORKITEM_REASSIGN(newblk, D_ALLOCINDIR); 5665 newblk->nb_freefrag = freefrag; 5666 aip = (struct allocindir *)newblk; 5667 aip->ai_offset = ptrno; 5668 aip->ai_oldblkno = oldblkno; 5669 aip->ai_lbn = lbn; 5670 if ((jnewblk = newblk->nb_jnewblk) != NULL) { 5671 jnewblk->jn_ino = ip->i_number; 5672 jnewblk->jn_lbn = lbn; 5673 add_to_journal(&jnewblk->jn_list); 5674 } 5675 if (freefrag && freefrag->ff_jdep != NULL && 5676 freefrag->ff_jdep->wk_type == D_JFREEFRAG) 5677 add_to_journal(freefrag->ff_jdep); 5678 return (aip); 5679} 5680 5681/* 5682 * Called just before setting an indirect block pointer 5683 * to a newly allocated file page. 5684 */ 5685void 5686softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp) 5687 struct inode *ip; /* inode for file being extended */ 5688 ufs_lbn_t lbn; /* allocated block number within file */ 5689 struct buf *bp; /* buffer with indirect blk referencing page */ 5690 int ptrno; /* offset of pointer in indirect block */ 5691 ufs2_daddr_t newblkno; /* disk block number being added */ 5692 ufs2_daddr_t oldblkno; /* previous block number, 0 if none */ 5693 struct buf *nbp; /* buffer holding allocated page */ 5694{ 5695 struct inodedep *inodedep; 5696 struct freefrag *freefrag; 5697 struct allocindir *aip; 5698 struct pagedep *pagedep; 5699 struct mount *mp; 5700 int dflags; 5701 5702 mp = UFSTOVFS(ip->i_ump); 5703 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 5704 ("softdep_setup_allocindir_page called on non-softdep filesystem")); 5705 KASSERT(lbn == nbp->b_lblkno, 5706 ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd", 5707 lbn, bp->b_lblkno)); 5708 CTR4(KTR_SUJ, 5709 "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd " 5710 "lbn %jd", ip->i_number, newblkno, oldblkno, lbn); 5711 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page"); 5712 aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn); 5713 dflags = DEPALLOC; 5714 if (IS_SNAPSHOT(ip)) 5715 dflags |= NODELAY; 5716 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 5717 /* 5718 * If we are allocating a directory page, then we must 5719 * allocate an associated pagedep to track additions and 5720 * deletions. 5721 */ 5722 if ((ip->i_mode & IFMT) == IFDIR) 5723 pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep); 5724 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5725 freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn); 5726 FREE_LOCK(ip->i_ump); 5727 if (freefrag) 5728 handle_workitem_freefrag(freefrag); 5729} 5730 5731/* 5732 * Called just before setting an indirect block pointer to a 5733 * newly allocated indirect block. 5734 */ 5735void 5736softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno) 5737 struct buf *nbp; /* newly allocated indirect block */ 5738 struct inode *ip; /* inode for file being extended */ 5739 struct buf *bp; /* indirect block referencing allocated block */ 5740 int ptrno; /* offset of pointer in indirect block */ 5741 ufs2_daddr_t newblkno; /* disk block number being added */ 5742{ 5743 struct inodedep *inodedep; 5744 struct allocindir *aip; 5745 ufs_lbn_t lbn; 5746 int dflags; 5747 5748 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 5749 ("softdep_setup_allocindir_meta called on non-softdep filesystem")); 5750 CTR3(KTR_SUJ, 5751 "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d", 5752 ip->i_number, newblkno, ptrno); 5753 lbn = nbp->b_lblkno; 5754 ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta"); 5755 aip = newallocindir(ip, ptrno, newblkno, 0, lbn); 5756 dflags = DEPALLOC; 5757 if (IS_SNAPSHOT(ip)) 5758 dflags |= NODELAY; 5759 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep); 5760 WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list); 5761 if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)) 5762 panic("softdep_setup_allocindir_meta: Block already existed"); 5763 FREE_LOCK(ip->i_ump); 5764} 5765 5766static void 5767indirdep_complete(indirdep) 5768 struct indirdep *indirdep; 5769{ 5770 struct allocindir *aip; 5771 5772 LIST_REMOVE(indirdep, ir_next); 5773 indirdep->ir_state |= DEPCOMPLETE; 5774 5775 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) { 5776 LIST_REMOVE(aip, ai_next); 5777 free_newblk(&aip->ai_block); 5778 } 5779 /* 5780 * If this indirdep is not attached to a buf it was simply waiting 5781 * on completion to clear completehd. free_indirdep() asserts 5782 * that nothing is dangling. 5783 */ 5784 if ((indirdep->ir_state & ONWORKLIST) == 0) 5785 free_indirdep(indirdep); 5786} 5787 5788static struct indirdep * 5789indirdep_lookup(mp, ip, bp) 5790 struct mount *mp; 5791 struct inode *ip; 5792 struct buf *bp; 5793{ 5794 struct indirdep *indirdep, *newindirdep; 5795 struct newblk *newblk; 5796 struct ufsmount *ump; 5797 struct worklist *wk; 5798 struct fs *fs; 5799 ufs2_daddr_t blkno; 5800 5801 ump = VFSTOUFS(mp); 5802 LOCK_OWNED(ump); 5803 indirdep = NULL; 5804 newindirdep = NULL; 5805 fs = ip->i_fs; 5806 for (;;) { 5807 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 5808 if (wk->wk_type != D_INDIRDEP) 5809 continue; 5810 indirdep = WK_INDIRDEP(wk); 5811 break; 5812 } 5813 /* Found on the buffer worklist, no new structure to free. */ 5814 if (indirdep != NULL && newindirdep == NULL) 5815 return (indirdep); 5816 if (indirdep != NULL && newindirdep != NULL) 5817 panic("indirdep_lookup: simultaneous create"); 5818 /* None found on the buffer and a new structure is ready. */ 5819 if (indirdep == NULL && newindirdep != NULL) 5820 break; 5821 /* None found and no new structure available. */ 5822 FREE_LOCK(ump); 5823 newindirdep = malloc(sizeof(struct indirdep), 5824 M_INDIRDEP, M_SOFTDEP_FLAGS); 5825 workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp); 5826 newindirdep->ir_state = ATTACHED; 5827 if (ip->i_ump->um_fstype == UFS1) 5828 newindirdep->ir_state |= UFS1FMT; 5829 TAILQ_INIT(&newindirdep->ir_trunc); 5830 newindirdep->ir_saveddata = NULL; 5831 LIST_INIT(&newindirdep->ir_deplisthd); 5832 LIST_INIT(&newindirdep->ir_donehd); 5833 LIST_INIT(&newindirdep->ir_writehd); 5834 LIST_INIT(&newindirdep->ir_completehd); 5835 if (bp->b_blkno == bp->b_lblkno) { 5836 ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp, 5837 NULL, NULL); 5838 bp->b_blkno = blkno; 5839 } 5840 newindirdep->ir_freeblks = NULL; 5841 newindirdep->ir_savebp = 5842 getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0); 5843 newindirdep->ir_bp = bp; 5844 BUF_KERNPROC(newindirdep->ir_savebp); 5845 bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount); 5846 ACQUIRE_LOCK(ump); 5847 } 5848 indirdep = newindirdep; 5849 WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list); 5850 /* 5851 * If the block is not yet allocated we don't set DEPCOMPLETE so 5852 * that we don't free dependencies until the pointers are valid. 5853 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather 5854 * than using the hash. 5855 */ 5856 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)) 5857 LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next); 5858 else 5859 indirdep->ir_state |= DEPCOMPLETE; 5860 return (indirdep); 5861} 5862 5863/* 5864 * Called to finish the allocation of the "aip" allocated 5865 * by one of the two routines above. 5866 */ 5867static struct freefrag * 5868setup_allocindir_phase2(bp, ip, inodedep, aip, lbn) 5869 struct buf *bp; /* in-memory copy of the indirect block */ 5870 struct inode *ip; /* inode for file being extended */ 5871 struct inodedep *inodedep; /* Inodedep for ip */ 5872 struct allocindir *aip; /* allocindir allocated by the above routines */ 5873 ufs_lbn_t lbn; /* Logical block number for this block. */ 5874{ 5875 struct fs *fs; 5876 struct indirdep *indirdep; 5877 struct allocindir *oldaip; 5878 struct freefrag *freefrag; 5879 struct mount *mp; 5880 5881 LOCK_OWNED(ip->i_ump); 5882 mp = UFSTOVFS(ip->i_ump); 5883 fs = ip->i_fs; 5884 if (bp->b_lblkno >= 0) 5885 panic("setup_allocindir_phase2: not indir blk"); 5886 KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs), 5887 ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset)); 5888 indirdep = indirdep_lookup(mp, ip, bp); 5889 KASSERT(indirdep->ir_savebp != NULL, 5890 ("setup_allocindir_phase2 NULL ir_savebp")); 5891 aip->ai_indirdep = indirdep; 5892 /* 5893 * Check for an unwritten dependency for this indirect offset. If 5894 * there is, merge the old dependency into the new one. This happens 5895 * as a result of reallocblk only. 5896 */ 5897 freefrag = NULL; 5898 if (aip->ai_oldblkno != 0) { 5899 LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) { 5900 if (oldaip->ai_offset == aip->ai_offset) { 5901 freefrag = allocindir_merge(aip, oldaip); 5902 goto done; 5903 } 5904 } 5905 LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) { 5906 if (oldaip->ai_offset == aip->ai_offset) { 5907 freefrag = allocindir_merge(aip, oldaip); 5908 goto done; 5909 } 5910 } 5911 } 5912done: 5913 LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next); 5914 return (freefrag); 5915} 5916 5917/* 5918 * Merge two allocindirs which refer to the same block. Move newblock 5919 * dependencies and setup the freefrags appropriately. 5920 */ 5921static struct freefrag * 5922allocindir_merge(aip, oldaip) 5923 struct allocindir *aip; 5924 struct allocindir *oldaip; 5925{ 5926 struct freefrag *freefrag; 5927 struct worklist *wk; 5928 5929 if (oldaip->ai_newblkno != aip->ai_oldblkno) 5930 panic("allocindir_merge: blkno"); 5931 aip->ai_oldblkno = oldaip->ai_oldblkno; 5932 freefrag = aip->ai_freefrag; 5933 aip->ai_freefrag = oldaip->ai_freefrag; 5934 oldaip->ai_freefrag = NULL; 5935 KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag")); 5936 /* 5937 * If we are tracking a new directory-block allocation, 5938 * move it from the old allocindir to the new allocindir. 5939 */ 5940 if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) { 5941 WORKLIST_REMOVE(wk); 5942 if (!LIST_EMPTY(&oldaip->ai_newdirblk)) 5943 panic("allocindir_merge: extra newdirblk"); 5944 WORKLIST_INSERT(&aip->ai_newdirblk, wk); 5945 } 5946 /* 5947 * We can skip journaling for this freefrag and just complete 5948 * any pending journal work for the allocindir that is being 5949 * removed after the freefrag completes. 5950 */ 5951 if (freefrag->ff_jdep) 5952 cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep)); 5953 LIST_REMOVE(oldaip, ai_next); 5954 freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block, 5955 &freefrag->ff_list, &freefrag->ff_jwork); 5956 free_newblk(&oldaip->ai_block); 5957 5958 return (freefrag); 5959} 5960 5961static inline void 5962setup_freedirect(freeblks, ip, i, needj) 5963 struct freeblks *freeblks; 5964 struct inode *ip; 5965 int i; 5966 int needj; 5967{ 5968 ufs2_daddr_t blkno; 5969 int frags; 5970 5971 blkno = DIP(ip, i_db[i]); 5972 if (blkno == 0) 5973 return; 5974 DIP_SET(ip, i_db[i], 0); 5975 frags = sblksize(ip->i_fs, ip->i_size, i); 5976 frags = numfrags(ip->i_fs, frags); 5977 newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj); 5978} 5979 5980static inline void 5981setup_freeext(freeblks, ip, i, needj) 5982 struct freeblks *freeblks; 5983 struct inode *ip; 5984 int i; 5985 int needj; 5986{ 5987 ufs2_daddr_t blkno; 5988 int frags; 5989 5990 blkno = ip->i_din2->di_extb[i]; 5991 if (blkno == 0) 5992 return; 5993 ip->i_din2->di_extb[i] = 0; 5994 frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i); 5995 frags = numfrags(ip->i_fs, frags); 5996 newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj); 5997} 5998 5999static inline void 6000setup_freeindir(freeblks, ip, i, lbn, needj) 6001 struct freeblks *freeblks; 6002 struct inode *ip; 6003 int i; 6004 ufs_lbn_t lbn; 6005 int needj; 6006{ 6007 ufs2_daddr_t blkno; 6008 6009 blkno = DIP(ip, i_ib[i]); 6010 if (blkno == 0) 6011 return; 6012 DIP_SET(ip, i_ib[i], 0); 6013 newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag, 6014 0, needj); 6015} 6016 6017static inline struct freeblks * 6018newfreeblks(mp, ip) 6019 struct mount *mp; 6020 struct inode *ip; 6021{ 6022 struct freeblks *freeblks; 6023 6024 freeblks = malloc(sizeof(struct freeblks), 6025 M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO); 6026 workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp); 6027 LIST_INIT(&freeblks->fb_jblkdephd); 6028 LIST_INIT(&freeblks->fb_jwork); 6029 freeblks->fb_ref = 0; 6030 freeblks->fb_cgwait = 0; 6031 freeblks->fb_state = ATTACHED; 6032 freeblks->fb_uid = ip->i_uid; 6033 freeblks->fb_inum = ip->i_number; 6034 freeblks->fb_vtype = ITOV(ip)->v_type; 6035 freeblks->fb_modrev = DIP(ip, i_modrev); 6036 freeblks->fb_devvp = ip->i_devvp; 6037 freeblks->fb_chkcnt = 0; 6038 freeblks->fb_len = 0; 6039 6040 return (freeblks); 6041} 6042 6043static void 6044trunc_indirdep(indirdep, freeblks, bp, off) 6045 struct indirdep *indirdep; 6046 struct freeblks *freeblks; 6047 struct buf *bp; 6048 int off; 6049{ 6050 struct allocindir *aip, *aipn; 6051 6052 /* 6053 * The first set of allocindirs won't be in savedbp. 6054 */ 6055 LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn) 6056 if (aip->ai_offset > off) 6057 cancel_allocindir(aip, bp, freeblks, 1); 6058 LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn) 6059 if (aip->ai_offset > off) 6060 cancel_allocindir(aip, bp, freeblks, 1); 6061 /* 6062 * These will exist in savedbp. 6063 */ 6064 LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn) 6065 if (aip->ai_offset > off) 6066 cancel_allocindir(aip, NULL, freeblks, 0); 6067 LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn) 6068 if (aip->ai_offset > off) 6069 cancel_allocindir(aip, NULL, freeblks, 0); 6070} 6071 6072/* 6073 * Follow the chain of indirects down to lastlbn creating a freework 6074 * structure for each. This will be used to start indir_trunc() at 6075 * the right offset and create the journal records for the parrtial 6076 * truncation. A second step will handle the truncated dependencies. 6077 */ 6078static int 6079setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno) 6080 struct freeblks *freeblks; 6081 struct inode *ip; 6082 ufs_lbn_t lbn; 6083 ufs_lbn_t lastlbn; 6084 ufs2_daddr_t blkno; 6085{ 6086 struct indirdep *indirdep; 6087 struct indirdep *indirn; 6088 struct freework *freework; 6089 struct newblk *newblk; 6090 struct mount *mp; 6091 struct buf *bp; 6092 uint8_t *start; 6093 uint8_t *end; 6094 ufs_lbn_t lbnadd; 6095 int level; 6096 int error; 6097 int off; 6098 6099 6100 freework = NULL; 6101 if (blkno == 0) 6102 return (0); 6103 mp = freeblks->fb_list.wk_mp; 6104 bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0); 6105 if ((bp->b_flags & B_CACHE) == 0) { 6106 bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno); 6107 bp->b_iocmd = BIO_READ; 6108 bp->b_flags &= ~B_INVAL; 6109 bp->b_ioflags &= ~BIO_ERROR; 6110 vfs_busy_pages(bp, 0); 6111 bp->b_iooffset = dbtob(bp->b_blkno); 6112 bstrategy(bp); 6113 curthread->td_ru.ru_inblock++; 6114 error = bufwait(bp); 6115 if (error) { 6116 brelse(bp); 6117 return (error); 6118 } 6119 } 6120 level = lbn_level(lbn); 6121 lbnadd = lbn_offset(ip->i_fs, level); 6122 /* 6123 * Compute the offset of the last block we want to keep. Store 6124 * in the freework the first block we want to completely free. 6125 */ 6126 off = (lastlbn - -(lbn + level)) / lbnadd; 6127 if (off + 1 == NINDIR(ip->i_fs)) 6128 goto nowork; 6129 freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1, 6130 0); 6131 /* 6132 * Link the freework into the indirdep. This will prevent any new 6133 * allocations from proceeding until we are finished with the 6134 * truncate and the block is written. 6135 */ 6136 ACQUIRE_LOCK(ip->i_ump); 6137 indirdep = indirdep_lookup(mp, ip, bp); 6138 if (indirdep->ir_freeblks) 6139 panic("setup_trunc_indir: indirdep already truncated."); 6140 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next); 6141 freework->fw_indir = indirdep; 6142 /* 6143 * Cancel any allocindirs that will not make it to disk. 6144 * We have to do this for all copies of the indirdep that 6145 * live on this newblk. 6146 */ 6147 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 6148 newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk); 6149 LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next) 6150 trunc_indirdep(indirn, freeblks, bp, off); 6151 } else 6152 trunc_indirdep(indirdep, freeblks, bp, off); 6153 FREE_LOCK(ip->i_ump); 6154 /* 6155 * Creation is protected by the buf lock. The saveddata is only 6156 * needed if a full truncation follows a partial truncation but it 6157 * is difficult to allocate in that case so we fetch it anyway. 6158 */ 6159 if (indirdep->ir_saveddata == NULL) 6160 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 6161 M_SOFTDEP_FLAGS); 6162nowork: 6163 /* Fetch the blkno of the child and the zero start offset. */ 6164 if (ip->i_ump->um_fstype == UFS1) { 6165 blkno = ((ufs1_daddr_t *)bp->b_data)[off]; 6166 start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1]; 6167 } else { 6168 blkno = ((ufs2_daddr_t *)bp->b_data)[off]; 6169 start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1]; 6170 } 6171 if (freework) { 6172 /* Zero the truncated pointers. */ 6173 end = bp->b_data + bp->b_bcount; 6174 bzero(start, end - start); 6175 bdwrite(bp); 6176 } else 6177 bqrelse(bp); 6178 if (level == 0) 6179 return (0); 6180 lbn++; /* adjust level */ 6181 lbn -= (off * lbnadd); 6182 return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno); 6183} 6184 6185/* 6186 * Complete the partial truncation of an indirect block setup by 6187 * setup_trunc_indir(). This zeros the truncated pointers in the saved 6188 * copy and writes them to disk before the freeblks is allowed to complete. 6189 */ 6190static void 6191complete_trunc_indir(freework) 6192 struct freework *freework; 6193{ 6194 struct freework *fwn; 6195 struct indirdep *indirdep; 6196 struct ufsmount *ump; 6197 struct buf *bp; 6198 uintptr_t start; 6199 int count; 6200 6201 ump = VFSTOUFS(freework->fw_list.wk_mp); 6202 LOCK_OWNED(ump); 6203 indirdep = freework->fw_indir; 6204 for (;;) { 6205 bp = indirdep->ir_bp; 6206 /* See if the block was discarded. */ 6207 if (bp == NULL) 6208 break; 6209 /* Inline part of getdirtybuf(). We dont want bremfree. */ 6210 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0) 6211 break; 6212 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 6213 LOCK_PTR(ump)) == 0) 6214 BUF_UNLOCK(bp); 6215 ACQUIRE_LOCK(ump); 6216 } 6217 freework->fw_state |= DEPCOMPLETE; 6218 TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next); 6219 /* 6220 * Zero the pointers in the saved copy. 6221 */ 6222 if (indirdep->ir_state & UFS1FMT) 6223 start = sizeof(ufs1_daddr_t); 6224 else 6225 start = sizeof(ufs2_daddr_t); 6226 start *= freework->fw_start; 6227 count = indirdep->ir_savebp->b_bcount - start; 6228 start += (uintptr_t)indirdep->ir_savebp->b_data; 6229 bzero((char *)start, count); 6230 /* 6231 * We need to start the next truncation in the list if it has not 6232 * been started yet. 6233 */ 6234 fwn = TAILQ_FIRST(&indirdep->ir_trunc); 6235 if (fwn != NULL) { 6236 if (fwn->fw_freeblks == indirdep->ir_freeblks) 6237 TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next); 6238 if ((fwn->fw_state & ONWORKLIST) == 0) 6239 freework_enqueue(fwn); 6240 } 6241 /* 6242 * If bp is NULL the block was fully truncated, restore 6243 * the saved block list otherwise free it if it is no 6244 * longer needed. 6245 */ 6246 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 6247 if (bp == NULL) 6248 bcopy(indirdep->ir_saveddata, 6249 indirdep->ir_savebp->b_data, 6250 indirdep->ir_savebp->b_bcount); 6251 free(indirdep->ir_saveddata, M_INDIRDEP); 6252 indirdep->ir_saveddata = NULL; 6253 } 6254 /* 6255 * When bp is NULL there is a full truncation pending. We 6256 * must wait for this full truncation to be journaled before 6257 * we can release this freework because the disk pointers will 6258 * never be written as zero. 6259 */ 6260 if (bp == NULL) { 6261 if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd)) 6262 handle_written_freework(freework); 6263 else 6264 WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd, 6265 &freework->fw_list); 6266 } else { 6267 /* Complete when the real copy is written. */ 6268 WORKLIST_INSERT(&bp->b_dep, &freework->fw_list); 6269 BUF_UNLOCK(bp); 6270 } 6271} 6272 6273/* 6274 * Calculate the number of blocks we are going to release where datablocks 6275 * is the current total and length is the new file size. 6276 */ 6277static ufs2_daddr_t 6278blkcount(fs, datablocks, length) 6279 struct fs *fs; 6280 ufs2_daddr_t datablocks; 6281 off_t length; 6282{ 6283 off_t totblks, numblks; 6284 6285 totblks = 0; 6286 numblks = howmany(length, fs->fs_bsize); 6287 if (numblks <= NDADDR) { 6288 totblks = howmany(length, fs->fs_fsize); 6289 goto out; 6290 } 6291 totblks = blkstofrags(fs, numblks); 6292 numblks -= NDADDR; 6293 /* 6294 * Count all single, then double, then triple indirects required. 6295 * Subtracting one indirects worth of blocks for each pass 6296 * acknowledges one of each pointed to by the inode. 6297 */ 6298 for (;;) { 6299 totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs))); 6300 numblks -= NINDIR(fs); 6301 if (numblks <= 0) 6302 break; 6303 numblks = howmany(numblks, NINDIR(fs)); 6304 } 6305out: 6306 totblks = fsbtodb(fs, totblks); 6307 /* 6308 * Handle sparse files. We can't reclaim more blocks than the inode 6309 * references. We will correct it later in handle_complete_freeblks() 6310 * when we know the real count. 6311 */ 6312 if (totblks > datablocks) 6313 return (0); 6314 return (datablocks - totblks); 6315} 6316 6317/* 6318 * Handle freeblocks for journaled softupdate filesystems. 6319 * 6320 * Contrary to normal softupdates, we must preserve the block pointers in 6321 * indirects until their subordinates are free. This is to avoid journaling 6322 * every block that is freed which may consume more space than the journal 6323 * itself. The recovery program will see the free block journals at the 6324 * base of the truncated area and traverse them to reclaim space. The 6325 * pointers in the inode may be cleared immediately after the journal 6326 * records are written because each direct and indirect pointer in the 6327 * inode is recorded in a journal. This permits full truncation to proceed 6328 * asynchronously. The write order is journal -> inode -> cgs -> indirects. 6329 * 6330 * The algorithm is as follows: 6331 * 1) Traverse the in-memory state and create journal entries to release 6332 * the relevant blocks and full indirect trees. 6333 * 2) Traverse the indirect block chain adding partial truncation freework 6334 * records to indirects in the path to lastlbn. The freework will 6335 * prevent new allocation dependencies from being satisfied in this 6336 * indirect until the truncation completes. 6337 * 3) Read and lock the inode block, performing an update with the new size 6338 * and pointers. This prevents truncated data from becoming valid on 6339 * disk through step 4. 6340 * 4) Reap unsatisfied dependencies that are beyond the truncated area, 6341 * eliminate journal work for those records that do not require it. 6342 * 5) Schedule the journal records to be written followed by the inode block. 6343 * 6) Allocate any necessary frags for the end of file. 6344 * 7) Zero any partially truncated blocks. 6345 * 6346 * From this truncation proceeds asynchronously using the freework and 6347 * indir_trunc machinery. The file will not be extended again into a 6348 * partially truncated indirect block until all work is completed but 6349 * the normal dependency mechanism ensures that it is rolled back/forward 6350 * as appropriate. Further truncation may occur without delay and is 6351 * serialized in indir_trunc(). 6352 */ 6353void 6354softdep_journal_freeblocks(ip, cred, length, flags) 6355 struct inode *ip; /* The inode whose length is to be reduced */ 6356 struct ucred *cred; 6357 off_t length; /* The new length for the file */ 6358 int flags; /* IO_EXT and/or IO_NORMAL */ 6359{ 6360 struct freeblks *freeblks, *fbn; 6361 struct worklist *wk, *wkn; 6362 struct inodedep *inodedep; 6363 struct jblkdep *jblkdep; 6364 struct allocdirect *adp, *adpn; 6365 struct ufsmount *ump; 6366 struct fs *fs; 6367 struct buf *bp; 6368 struct vnode *vp; 6369 struct mount *mp; 6370 ufs2_daddr_t extblocks, datablocks; 6371 ufs_lbn_t tmpval, lbn, lastlbn; 6372 int frags, lastoff, iboff, allocblock, needj, dflags, error, i; 6373 6374 fs = ip->i_fs; 6375 ump = ip->i_ump; 6376 mp = UFSTOVFS(ump); 6377 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6378 ("softdep_journal_freeblocks called on non-softdep filesystem")); 6379 vp = ITOV(ip); 6380 needj = 1; 6381 iboff = -1; 6382 allocblock = 0; 6383 extblocks = 0; 6384 datablocks = 0; 6385 frags = 0; 6386 freeblks = newfreeblks(mp, ip); 6387 ACQUIRE_LOCK(ump); 6388 /* 6389 * If we're truncating a removed file that will never be written 6390 * we don't need to journal the block frees. The canceled journals 6391 * for the allocations will suffice. 6392 */ 6393 dflags = DEPALLOC; 6394 if (IS_SNAPSHOT(ip)) 6395 dflags |= NODELAY; 6396 inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6397 if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED && 6398 length == 0) 6399 needj = 0; 6400 CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d", 6401 ip->i_number, length, needj); 6402 FREE_LOCK(ump); 6403 /* 6404 * Calculate the lbn that we are truncating to. This results in -1 6405 * if we're truncating the 0 bytes. So it is the last lbn we want 6406 * to keep, not the first lbn we want to truncate. 6407 */ 6408 lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1; 6409 lastoff = blkoff(fs, length); 6410 /* 6411 * Compute frags we are keeping in lastlbn. 0 means all. 6412 */ 6413 if (lastlbn >= 0 && lastlbn < NDADDR) { 6414 frags = fragroundup(fs, lastoff); 6415 /* adp offset of last valid allocdirect. */ 6416 iboff = lastlbn; 6417 } else if (lastlbn > 0) 6418 iboff = NDADDR; 6419 if (fs->fs_magic == FS_UFS2_MAGIC) 6420 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6421 /* 6422 * Handle normal data blocks and indirects. This section saves 6423 * values used after the inode update to complete frag and indirect 6424 * truncation. 6425 */ 6426 if ((flags & IO_NORMAL) != 0) { 6427 /* 6428 * Handle truncation of whole direct and indirect blocks. 6429 */ 6430 for (i = iboff + 1; i < NDADDR; i++) 6431 setup_freedirect(freeblks, ip, i, needj); 6432 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6433 i++, lbn += tmpval, tmpval *= NINDIR(fs)) { 6434 /* Release a whole indirect tree. */ 6435 if (lbn > lastlbn) { 6436 setup_freeindir(freeblks, ip, i, -lbn -i, 6437 needj); 6438 continue; 6439 } 6440 iboff = i + NDADDR; 6441 /* 6442 * Traverse partially truncated indirect tree. 6443 */ 6444 if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn) 6445 setup_trunc_indir(freeblks, ip, -lbn - i, 6446 lastlbn, DIP(ip, i_ib[i])); 6447 } 6448 /* 6449 * Handle partial truncation to a frag boundary. 6450 */ 6451 if (frags) { 6452 ufs2_daddr_t blkno; 6453 long oldfrags; 6454 6455 oldfrags = blksize(fs, ip, lastlbn); 6456 blkno = DIP(ip, i_db[lastlbn]); 6457 if (blkno && oldfrags != frags) { 6458 oldfrags -= frags; 6459 oldfrags = numfrags(ip->i_fs, oldfrags); 6460 blkno += numfrags(ip->i_fs, frags); 6461 newfreework(ump, freeblks, NULL, lastlbn, 6462 blkno, oldfrags, 0, needj); 6463 if (needj) 6464 adjust_newfreework(freeblks, 6465 numfrags(ip->i_fs, frags)); 6466 } else if (blkno == 0) 6467 allocblock = 1; 6468 } 6469 /* 6470 * Add a journal record for partial truncate if we are 6471 * handling indirect blocks. Non-indirects need no extra 6472 * journaling. 6473 */ 6474 if (length != 0 && lastlbn >= NDADDR) { 6475 ip->i_flag |= IN_TRUNCATED; 6476 newjtrunc(freeblks, length, 0); 6477 } 6478 ip->i_size = length; 6479 DIP_SET(ip, i_size, ip->i_size); 6480 datablocks = DIP(ip, i_blocks) - extblocks; 6481 if (length != 0) 6482 datablocks = blkcount(ip->i_fs, datablocks, length); 6483 freeblks->fb_len = length; 6484 } 6485 if ((flags & IO_EXT) != 0) { 6486 for (i = 0; i < NXADDR; i++) 6487 setup_freeext(freeblks, ip, i, needj); 6488 ip->i_din2->di_extsize = 0; 6489 datablocks += extblocks; 6490 } 6491#ifdef QUOTA 6492 /* Reference the quotas in case the block count is wrong in the end. */ 6493 quotaref(vp, freeblks->fb_quota); 6494 (void) chkdq(ip, -datablocks, NOCRED, 0); 6495#endif 6496 freeblks->fb_chkcnt = -datablocks; 6497 UFS_LOCK(ump); 6498 fs->fs_pendingblocks += datablocks; 6499 UFS_UNLOCK(ump); 6500 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6501 /* 6502 * Handle truncation of incomplete alloc direct dependencies. We 6503 * hold the inode block locked to prevent incomplete dependencies 6504 * from reaching the disk while we are eliminating those that 6505 * have been truncated. This is a partially inlined ffs_update(). 6506 */ 6507 ufs_itimes(vp); 6508 ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED); 6509 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6510 (int)fs->fs_bsize, cred, &bp); 6511 if (error) { 6512 brelse(bp); 6513 softdep_error("softdep_journal_freeblocks", error); 6514 return; 6515 } 6516 if (bp->b_bufsize == fs->fs_bsize) 6517 bp->b_flags |= B_CLUSTEROK; 6518 softdep_update_inodeblock(ip, bp, 0); 6519 if (ump->um_fstype == UFS1) 6520 *((struct ufs1_dinode *)bp->b_data + 6521 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1; 6522 else 6523 *((struct ufs2_dinode *)bp->b_data + 6524 ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2; 6525 ACQUIRE_LOCK(ump); 6526 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6527 if ((inodedep->id_state & IOSTARTED) != 0) 6528 panic("softdep_setup_freeblocks: inode busy"); 6529 /* 6530 * Add the freeblks structure to the list of operations that 6531 * must await the zero'ed inode being written to disk. If we 6532 * still have a bitmap dependency (needj), then the inode 6533 * has never been written to disk, so we can process the 6534 * freeblks below once we have deleted the dependencies. 6535 */ 6536 if (needj) 6537 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6538 else 6539 freeblks->fb_state |= COMPLETE; 6540 if ((flags & IO_NORMAL) != 0) { 6541 TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) { 6542 if (adp->ad_offset > iboff) 6543 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6544 freeblks); 6545 /* 6546 * Truncate the allocdirect. We could eliminate 6547 * or modify journal records as well. 6548 */ 6549 else if (adp->ad_offset == iboff && frags) 6550 adp->ad_newsize = frags; 6551 } 6552 } 6553 if ((flags & IO_EXT) != 0) 6554 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6555 cancel_allocdirect(&inodedep->id_extupdt, adp, 6556 freeblks); 6557 /* 6558 * Scan the bufwait list for newblock dependencies that will never 6559 * make it to disk. 6560 */ 6561 LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) { 6562 if (wk->wk_type != D_ALLOCDIRECT) 6563 continue; 6564 adp = WK_ALLOCDIRECT(wk); 6565 if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) || 6566 ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) { 6567 cancel_jfreeblk(freeblks, adp->ad_newblkno); 6568 cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork); 6569 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 6570 } 6571 } 6572 /* 6573 * Add journal work. 6574 */ 6575 LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) 6576 add_to_journal(&jblkdep->jb_list); 6577 FREE_LOCK(ump); 6578 bdwrite(bp); 6579 /* 6580 * Truncate dependency structures beyond length. 6581 */ 6582 trunc_dependencies(ip, freeblks, lastlbn, frags, flags); 6583 /* 6584 * This is only set when we need to allocate a fragment because 6585 * none existed at the end of a frag-sized file. It handles only 6586 * allocating a new, zero filled block. 6587 */ 6588 if (allocblock) { 6589 ip->i_size = length - lastoff; 6590 DIP_SET(ip, i_size, ip->i_size); 6591 error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp); 6592 if (error != 0) { 6593 softdep_error("softdep_journal_freeblks", error); 6594 return; 6595 } 6596 ip->i_size = length; 6597 DIP_SET(ip, i_size, length); 6598 ip->i_flag |= IN_CHANGE | IN_UPDATE; 6599 allocbuf(bp, frags); 6600 ffs_update(vp, 0); 6601 bawrite(bp); 6602 } else if (lastoff != 0 && vp->v_type != VDIR) { 6603 int size; 6604 6605 /* 6606 * Zero the end of a truncated frag or block. 6607 */ 6608 size = sblksize(fs, length, lastlbn); 6609 error = bread(vp, lastlbn, size, cred, &bp); 6610 if (error) { 6611 softdep_error("softdep_journal_freeblks", error); 6612 return; 6613 } 6614 bzero((char *)bp->b_data + lastoff, size - lastoff); 6615 bawrite(bp); 6616 6617 } 6618 ACQUIRE_LOCK(ump); 6619 inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6620 TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next); 6621 freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST; 6622 /* 6623 * We zero earlier truncations so they don't erroneously 6624 * update i_blocks. 6625 */ 6626 if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0) 6627 TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next) 6628 fbn->fb_len = 0; 6629 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE && 6630 LIST_EMPTY(&freeblks->fb_jblkdephd)) 6631 freeblks->fb_state |= INPROGRESS; 6632 else 6633 freeblks = NULL; 6634 FREE_LOCK(ump); 6635 if (freeblks) 6636 handle_workitem_freeblocks(freeblks, 0); 6637 trunc_pages(ip, length, extblocks, flags); 6638 6639} 6640 6641/* 6642 * Flush a JOP_SYNC to the journal. 6643 */ 6644void 6645softdep_journal_fsync(ip) 6646 struct inode *ip; 6647{ 6648 struct jfsync *jfsync; 6649 6650 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 6651 ("softdep_journal_fsync called on non-softdep filesystem")); 6652 if ((ip->i_flag & IN_TRUNCATED) == 0) 6653 return; 6654 ip->i_flag &= ~IN_TRUNCATED; 6655 jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO); 6656 workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump)); 6657 jfsync->jfs_size = ip->i_size; 6658 jfsync->jfs_ino = ip->i_number; 6659 ACQUIRE_LOCK(ip->i_ump); 6660 add_to_journal(&jfsync->jfs_list); 6661 jwait(&jfsync->jfs_list, MNT_WAIT); 6662 FREE_LOCK(ip->i_ump); 6663} 6664 6665/* 6666 * Block de-allocation dependencies. 6667 * 6668 * When blocks are de-allocated, the on-disk pointers must be nullified before 6669 * the blocks are made available for use by other files. (The true 6670 * requirement is that old pointers must be nullified before new on-disk 6671 * pointers are set. We chose this slightly more stringent requirement to 6672 * reduce complexity.) Our implementation handles this dependency by updating 6673 * the inode (or indirect block) appropriately but delaying the actual block 6674 * de-allocation (i.e., freemap and free space count manipulation) until 6675 * after the updated versions reach stable storage. After the disk is 6676 * updated, the blocks can be safely de-allocated whenever it is convenient. 6677 * This implementation handles only the common case of reducing a file's 6678 * length to zero. Other cases are handled by the conventional synchronous 6679 * write approach. 6680 * 6681 * The ffs implementation with which we worked double-checks 6682 * the state of the block pointers and file size as it reduces 6683 * a file's length. Some of this code is replicated here in our 6684 * soft updates implementation. The freeblks->fb_chkcnt field is 6685 * used to transfer a part of this information to the procedure 6686 * that eventually de-allocates the blocks. 6687 * 6688 * This routine should be called from the routine that shortens 6689 * a file's length, before the inode's size or block pointers 6690 * are modified. It will save the block pointer information for 6691 * later release and zero the inode so that the calling routine 6692 * can release it. 6693 */ 6694void 6695softdep_setup_freeblocks(ip, length, flags) 6696 struct inode *ip; /* The inode whose length is to be reduced */ 6697 off_t length; /* The new length for the file */ 6698 int flags; /* IO_EXT and/or IO_NORMAL */ 6699{ 6700 struct ufs1_dinode *dp1; 6701 struct ufs2_dinode *dp2; 6702 struct freeblks *freeblks; 6703 struct inodedep *inodedep; 6704 struct allocdirect *adp; 6705 struct ufsmount *ump; 6706 struct buf *bp; 6707 struct fs *fs; 6708 ufs2_daddr_t extblocks, datablocks; 6709 struct mount *mp; 6710 int i, delay, error, dflags; 6711 ufs_lbn_t tmpval; 6712 ufs_lbn_t lbn; 6713 6714 ump = ip->i_ump; 6715 mp = UFSTOVFS(ump); 6716 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 6717 ("softdep_setup_freeblocks called on non-softdep filesystem")); 6718 CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld", 6719 ip->i_number, length); 6720 KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length")); 6721 fs = ip->i_fs; 6722 freeblks = newfreeblks(mp, ip); 6723 extblocks = 0; 6724 datablocks = 0; 6725 if (fs->fs_magic == FS_UFS2_MAGIC) 6726 extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize)); 6727 if ((flags & IO_NORMAL) != 0) { 6728 for (i = 0; i < NDADDR; i++) 6729 setup_freedirect(freeblks, ip, i, 0); 6730 for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR; 6731 i++, lbn += tmpval, tmpval *= NINDIR(fs)) 6732 setup_freeindir(freeblks, ip, i, -lbn -i, 0); 6733 ip->i_size = 0; 6734 DIP_SET(ip, i_size, 0); 6735 datablocks = DIP(ip, i_blocks) - extblocks; 6736 } 6737 if ((flags & IO_EXT) != 0) { 6738 for (i = 0; i < NXADDR; i++) 6739 setup_freeext(freeblks, ip, i, 0); 6740 ip->i_din2->di_extsize = 0; 6741 datablocks += extblocks; 6742 } 6743#ifdef QUOTA 6744 /* Reference the quotas in case the block count is wrong in the end. */ 6745 quotaref(ITOV(ip), freeblks->fb_quota); 6746 (void) chkdq(ip, -datablocks, NOCRED, 0); 6747#endif 6748 freeblks->fb_chkcnt = -datablocks; 6749 UFS_LOCK(ump); 6750 fs->fs_pendingblocks += datablocks; 6751 UFS_UNLOCK(ump); 6752 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks); 6753 /* 6754 * Push the zero'ed inode to to its disk buffer so that we are free 6755 * to delete its dependencies below. Once the dependencies are gone 6756 * the buffer can be safely released. 6757 */ 6758 if ((error = bread(ip->i_devvp, 6759 fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 6760 (int)fs->fs_bsize, NOCRED, &bp)) != 0) { 6761 brelse(bp); 6762 softdep_error("softdep_setup_freeblocks", error); 6763 } 6764 if (ump->um_fstype == UFS1) { 6765 dp1 = ((struct ufs1_dinode *)bp->b_data + 6766 ino_to_fsbo(fs, ip->i_number)); 6767 ip->i_din1->di_freelink = dp1->di_freelink; 6768 *dp1 = *ip->i_din1; 6769 } else { 6770 dp2 = ((struct ufs2_dinode *)bp->b_data + 6771 ino_to_fsbo(fs, ip->i_number)); 6772 ip->i_din2->di_freelink = dp2->di_freelink; 6773 *dp2 = *ip->i_din2; 6774 } 6775 /* 6776 * Find and eliminate any inode dependencies. 6777 */ 6778 ACQUIRE_LOCK(ump); 6779 dflags = DEPALLOC; 6780 if (IS_SNAPSHOT(ip)) 6781 dflags |= NODELAY; 6782 (void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep); 6783 if ((inodedep->id_state & IOSTARTED) != 0) 6784 panic("softdep_setup_freeblocks: inode busy"); 6785 /* 6786 * Add the freeblks structure to the list of operations that 6787 * must await the zero'ed inode being written to disk. If we 6788 * still have a bitmap dependency (delay == 0), then the inode 6789 * has never been written to disk, so we can process the 6790 * freeblks below once we have deleted the dependencies. 6791 */ 6792 delay = (inodedep->id_state & DEPCOMPLETE); 6793 if (delay) 6794 WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list); 6795 else 6796 freeblks->fb_state |= COMPLETE; 6797 /* 6798 * Because the file length has been truncated to zero, any 6799 * pending block allocation dependency structures associated 6800 * with this inode are obsolete and can simply be de-allocated. 6801 * We must first merge the two dependency lists to get rid of 6802 * any duplicate freefrag structures, then purge the merged list. 6803 * If we still have a bitmap dependency, then the inode has never 6804 * been written to disk, so we can free any fragments without delay. 6805 */ 6806 if (flags & IO_NORMAL) { 6807 merge_inode_lists(&inodedep->id_newinoupdt, 6808 &inodedep->id_inoupdt); 6809 while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0) 6810 cancel_allocdirect(&inodedep->id_inoupdt, adp, 6811 freeblks); 6812 } 6813 if (flags & IO_EXT) { 6814 merge_inode_lists(&inodedep->id_newextupdt, 6815 &inodedep->id_extupdt); 6816 while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0) 6817 cancel_allocdirect(&inodedep->id_extupdt, adp, 6818 freeblks); 6819 } 6820 FREE_LOCK(ump); 6821 bdwrite(bp); 6822 trunc_dependencies(ip, freeblks, -1, 0, flags); 6823 ACQUIRE_LOCK(ump); 6824 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 6825 (void) free_inodedep(inodedep); 6826 freeblks->fb_state |= DEPCOMPLETE; 6827 /* 6828 * If the inode with zeroed block pointers is now on disk 6829 * we can start freeing blocks. 6830 */ 6831 if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE) 6832 freeblks->fb_state |= INPROGRESS; 6833 else 6834 freeblks = NULL; 6835 FREE_LOCK(ump); 6836 if (freeblks) 6837 handle_workitem_freeblocks(freeblks, 0); 6838 trunc_pages(ip, length, extblocks, flags); 6839} 6840 6841/* 6842 * Eliminate pages from the page cache that back parts of this inode and 6843 * adjust the vnode pager's idea of our size. This prevents stale data 6844 * from hanging around in the page cache. 6845 */ 6846static void 6847trunc_pages(ip, length, extblocks, flags) 6848 struct inode *ip; 6849 off_t length; 6850 ufs2_daddr_t extblocks; 6851 int flags; 6852{ 6853 struct vnode *vp; 6854 struct fs *fs; 6855 ufs_lbn_t lbn; 6856 off_t end, extend; 6857 6858 vp = ITOV(ip); 6859 fs = ip->i_fs; 6860 extend = OFF_TO_IDX(lblktosize(fs, -extblocks)); 6861 if ((flags & IO_EXT) != 0) 6862 vn_pages_remove(vp, extend, 0); 6863 if ((flags & IO_NORMAL) == 0) 6864 return; 6865 BO_LOCK(&vp->v_bufobj); 6866 drain_output(vp); 6867 BO_UNLOCK(&vp->v_bufobj); 6868 /* 6869 * The vnode pager eliminates file pages we eliminate indirects 6870 * below. 6871 */ 6872 vnode_pager_setsize(vp, length); 6873 /* 6874 * Calculate the end based on the last indirect we want to keep. If 6875 * the block extends into indirects we can just use the negative of 6876 * its lbn. Doubles and triples exist at lower numbers so we must 6877 * be careful not to remove those, if they exist. double and triple 6878 * indirect lbns do not overlap with others so it is not important 6879 * to verify how many levels are required. 6880 */ 6881 lbn = lblkno(fs, length); 6882 if (lbn >= NDADDR) { 6883 /* Calculate the virtual lbn of the triple indirect. */ 6884 lbn = -lbn - (NIADDR - 1); 6885 end = OFF_TO_IDX(lblktosize(fs, lbn)); 6886 } else 6887 end = extend; 6888 vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end); 6889} 6890 6891/* 6892 * See if the buf bp is in the range eliminated by truncation. 6893 */ 6894static int 6895trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags) 6896 struct buf *bp; 6897 int *blkoffp; 6898 ufs_lbn_t lastlbn; 6899 int lastoff; 6900 int flags; 6901{ 6902 ufs_lbn_t lbn; 6903 6904 *blkoffp = 0; 6905 /* Only match ext/normal blocks as appropriate. */ 6906 if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) || 6907 ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0)) 6908 return (0); 6909 /* ALTDATA is always a full truncation. */ 6910 if ((bp->b_xflags & BX_ALTDATA) != 0) 6911 return (1); 6912 /* -1 is full truncation. */ 6913 if (lastlbn == -1) 6914 return (1); 6915 /* 6916 * If this is a partial truncate we only want those 6917 * blocks and indirect blocks that cover the range 6918 * we're after. 6919 */ 6920 lbn = bp->b_lblkno; 6921 if (lbn < 0) 6922 lbn = -(lbn + lbn_level(lbn)); 6923 if (lbn < lastlbn) 6924 return (0); 6925 /* Here we only truncate lblkno if it's partial. */ 6926 if (lbn == lastlbn) { 6927 if (lastoff == 0) 6928 return (0); 6929 *blkoffp = lastoff; 6930 } 6931 return (1); 6932} 6933 6934/* 6935 * Eliminate any dependencies that exist in memory beyond lblkno:off 6936 */ 6937static void 6938trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags) 6939 struct inode *ip; 6940 struct freeblks *freeblks; 6941 ufs_lbn_t lastlbn; 6942 int lastoff; 6943 int flags; 6944{ 6945 struct bufobj *bo; 6946 struct vnode *vp; 6947 struct buf *bp; 6948 struct fs *fs; 6949 int blkoff; 6950 6951 /* 6952 * We must wait for any I/O in progress to finish so that 6953 * all potential buffers on the dirty list will be visible. 6954 * Once they are all there, walk the list and get rid of 6955 * any dependencies. 6956 */ 6957 fs = ip->i_fs; 6958 vp = ITOV(ip); 6959 bo = &vp->v_bufobj; 6960 BO_LOCK(bo); 6961 drain_output(vp); 6962 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) 6963 bp->b_vflags &= ~BV_SCANNED; 6964restart: 6965 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { 6966 if (bp->b_vflags & BV_SCANNED) 6967 continue; 6968 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 6969 bp->b_vflags |= BV_SCANNED; 6970 continue; 6971 } 6972 KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer")); 6973 if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL) 6974 goto restart; 6975 BO_UNLOCK(bo); 6976 if (deallocate_dependencies(bp, freeblks, blkoff)) 6977 bqrelse(bp); 6978 else 6979 brelse(bp); 6980 BO_LOCK(bo); 6981 goto restart; 6982 } 6983 /* 6984 * Now do the work of vtruncbuf while also matching indirect blocks. 6985 */ 6986 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) 6987 bp->b_vflags &= ~BV_SCANNED; 6988cleanrestart: 6989 TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) { 6990 if (bp->b_vflags & BV_SCANNED) 6991 continue; 6992 if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) { 6993 bp->b_vflags |= BV_SCANNED; 6994 continue; 6995 } 6996 if (BUF_LOCK(bp, 6997 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, 6998 BO_LOCKPTR(bo)) == ENOLCK) { 6999 BO_LOCK(bo); 7000 goto cleanrestart; 7001 } 7002 bp->b_vflags |= BV_SCANNED; 7003 bremfree(bp); 7004 if (blkoff != 0) { 7005 allocbuf(bp, blkoff); 7006 bqrelse(bp); 7007 } else { 7008 bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF; 7009 brelse(bp); 7010 } 7011 BO_LOCK(bo); 7012 goto cleanrestart; 7013 } 7014 drain_output(vp); 7015 BO_UNLOCK(bo); 7016} 7017 7018static int 7019cancel_pagedep(pagedep, freeblks, blkoff) 7020 struct pagedep *pagedep; 7021 struct freeblks *freeblks; 7022 int blkoff; 7023{ 7024 struct jremref *jremref; 7025 struct jmvref *jmvref; 7026 struct dirrem *dirrem, *tmp; 7027 int i; 7028 7029 /* 7030 * Copy any directory remove dependencies to the list 7031 * to be processed after the freeblks proceeds. If 7032 * directory entry never made it to disk they 7033 * can be dumped directly onto the work list. 7034 */ 7035 LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) { 7036 /* Skip this directory removal if it is intended to remain. */ 7037 if (dirrem->dm_offset < blkoff) 7038 continue; 7039 /* 7040 * If there are any dirrems we wait for the journal write 7041 * to complete and then restart the buf scan as the lock 7042 * has been dropped. 7043 */ 7044 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) { 7045 jwait(&jremref->jr_list, MNT_WAIT); 7046 return (ERESTART); 7047 } 7048 LIST_REMOVE(dirrem, dm_next); 7049 dirrem->dm_dirinum = pagedep->pd_ino; 7050 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list); 7051 } 7052 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) { 7053 jwait(&jmvref->jm_list, MNT_WAIT); 7054 return (ERESTART); 7055 } 7056 /* 7057 * When we're partially truncating a pagedep we just want to flush 7058 * journal entries and return. There can not be any adds in the 7059 * truncated portion of the directory and newblk must remain if 7060 * part of the block remains. 7061 */ 7062 if (blkoff != 0) { 7063 struct diradd *dap; 7064 7065 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 7066 if (dap->da_offset > blkoff) 7067 panic("cancel_pagedep: diradd %p off %d > %d", 7068 dap, dap->da_offset, blkoff); 7069 for (i = 0; i < DAHASHSZ; i++) 7070 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) 7071 if (dap->da_offset > blkoff) 7072 panic("cancel_pagedep: diradd %p off %d > %d", 7073 dap, dap->da_offset, blkoff); 7074 return (0); 7075 } 7076 /* 7077 * There should be no directory add dependencies present 7078 * as the directory could not be truncated until all 7079 * children were removed. 7080 */ 7081 KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL, 7082 ("deallocate_dependencies: pendinghd != NULL")); 7083 for (i = 0; i < DAHASHSZ; i++) 7084 KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL, 7085 ("deallocate_dependencies: diraddhd != NULL")); 7086 if ((pagedep->pd_state & NEWBLOCK) != 0) 7087 free_newdirblk(pagedep->pd_newdirblk); 7088 if (free_pagedep(pagedep) == 0) 7089 panic("Failed to free pagedep %p", pagedep); 7090 return (0); 7091} 7092 7093/* 7094 * Reclaim any dependency structures from a buffer that is about to 7095 * be reallocated to a new vnode. The buffer must be locked, thus, 7096 * no I/O completion operations can occur while we are manipulating 7097 * its associated dependencies. The mutex is held so that other I/O's 7098 * associated with related dependencies do not occur. 7099 */ 7100static int 7101deallocate_dependencies(bp, freeblks, off) 7102 struct buf *bp; 7103 struct freeblks *freeblks; 7104 int off; 7105{ 7106 struct indirdep *indirdep; 7107 struct pagedep *pagedep; 7108 struct allocdirect *adp; 7109 struct worklist *wk, *wkn; 7110 struct ufsmount *ump; 7111 7112 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 7113 goto done; 7114 ump = VFSTOUFS(wk->wk_mp); 7115 ACQUIRE_LOCK(ump); 7116 LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) { 7117 switch (wk->wk_type) { 7118 case D_INDIRDEP: 7119 indirdep = WK_INDIRDEP(wk); 7120 if (bp->b_lblkno >= 0 || 7121 bp->b_blkno != indirdep->ir_savebp->b_lblkno) 7122 panic("deallocate_dependencies: not indir"); 7123 cancel_indirdep(indirdep, bp, freeblks); 7124 continue; 7125 7126 case D_PAGEDEP: 7127 pagedep = WK_PAGEDEP(wk); 7128 if (cancel_pagedep(pagedep, freeblks, off)) { 7129 FREE_LOCK(ump); 7130 return (ERESTART); 7131 } 7132 continue; 7133 7134 case D_ALLOCINDIR: 7135 /* 7136 * Simply remove the allocindir, we'll find it via 7137 * the indirdep where we can clear pointers if 7138 * needed. 7139 */ 7140 WORKLIST_REMOVE(wk); 7141 continue; 7142 7143 case D_FREEWORK: 7144 /* 7145 * A truncation is waiting for the zero'd pointers 7146 * to be written. It can be freed when the freeblks 7147 * is journaled. 7148 */ 7149 WORKLIST_REMOVE(wk); 7150 wk->wk_state |= ONDEPLIST; 7151 WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk); 7152 break; 7153 7154 case D_ALLOCDIRECT: 7155 adp = WK_ALLOCDIRECT(wk); 7156 if (off != 0) 7157 continue; 7158 /* FALLTHROUGH */ 7159 default: 7160 panic("deallocate_dependencies: Unexpected type %s", 7161 TYPENAME(wk->wk_type)); 7162 /* NOTREACHED */ 7163 } 7164 } 7165 FREE_LOCK(ump); 7166done: 7167 /* 7168 * Don't throw away this buf, we were partially truncating and 7169 * some deps may always remain. 7170 */ 7171 if (off) { 7172 allocbuf(bp, off); 7173 bp->b_vflags |= BV_SCANNED; 7174 return (EBUSY); 7175 } 7176 bp->b_flags |= B_INVAL | B_NOCACHE; 7177 7178 return (0); 7179} 7180 7181/* 7182 * An allocdirect is being canceled due to a truncate. We must make sure 7183 * the journal entry is released in concert with the blkfree that releases 7184 * the storage. Completed journal entries must not be released until the 7185 * space is no longer pointed to by the inode or in the bitmap. 7186 */ 7187static void 7188cancel_allocdirect(adphead, adp, freeblks) 7189 struct allocdirectlst *adphead; 7190 struct allocdirect *adp; 7191 struct freeblks *freeblks; 7192{ 7193 struct freework *freework; 7194 struct newblk *newblk; 7195 struct worklist *wk; 7196 7197 TAILQ_REMOVE(adphead, adp, ad_next); 7198 newblk = (struct newblk *)adp; 7199 freework = NULL; 7200 /* 7201 * Find the correct freework structure. 7202 */ 7203 LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) { 7204 if (wk->wk_type != D_FREEWORK) 7205 continue; 7206 freework = WK_FREEWORK(wk); 7207 if (freework->fw_blkno == newblk->nb_newblkno) 7208 break; 7209 } 7210 if (freework == NULL) 7211 panic("cancel_allocdirect: Freework not found"); 7212 /* 7213 * If a newblk exists at all we still have the journal entry that 7214 * initiated the allocation so we do not need to journal the free. 7215 */ 7216 cancel_jfreeblk(freeblks, freework->fw_blkno); 7217 /* 7218 * If the journal hasn't been written the jnewblk must be passed 7219 * to the call to ffs_blkfree that reclaims the space. We accomplish 7220 * this by linking the journal dependency into the freework to be 7221 * freed when freework_freeblock() is called. If the journal has 7222 * been written we can simply reclaim the journal space when the 7223 * freeblks work is complete. 7224 */ 7225 freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list, 7226 &freeblks->fb_jwork); 7227 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 7228} 7229 7230 7231/* 7232 * Cancel a new block allocation. May be an indirect or direct block. We 7233 * remove it from various lists and return any journal record that needs to 7234 * be resolved by the caller. 7235 * 7236 * A special consideration is made for indirects which were never pointed 7237 * at on disk and will never be found once this block is released. 7238 */ 7239static struct jnewblk * 7240cancel_newblk(newblk, wk, wkhd) 7241 struct newblk *newblk; 7242 struct worklist *wk; 7243 struct workhead *wkhd; 7244{ 7245 struct jnewblk *jnewblk; 7246 7247 CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno); 7248 7249 newblk->nb_state |= GOINGAWAY; 7250 /* 7251 * Previously we traversed the completedhd on each indirdep 7252 * attached to this newblk to cancel them and gather journal 7253 * work. Since we need only the oldest journal segment and 7254 * the lowest point on the tree will always have the oldest 7255 * journal segment we are free to release the segments 7256 * of any subordinates and may leave the indirdep list to 7257 * indirdep_complete() when this newblk is freed. 7258 */ 7259 if (newblk->nb_state & ONDEPLIST) { 7260 newblk->nb_state &= ~ONDEPLIST; 7261 LIST_REMOVE(newblk, nb_deps); 7262 } 7263 if (newblk->nb_state & ONWORKLIST) 7264 WORKLIST_REMOVE(&newblk->nb_list); 7265 /* 7266 * If the journal entry hasn't been written we save a pointer to 7267 * the dependency that frees it until it is written or the 7268 * superseding operation completes. 7269 */ 7270 jnewblk = newblk->nb_jnewblk; 7271 if (jnewblk != NULL && wk != NULL) { 7272 newblk->nb_jnewblk = NULL; 7273 jnewblk->jn_dep = wk; 7274 } 7275 if (!LIST_EMPTY(&newblk->nb_jwork)) 7276 jwork_move(wkhd, &newblk->nb_jwork); 7277 /* 7278 * When truncating we must free the newdirblk early to remove 7279 * the pagedep from the hash before returning. 7280 */ 7281 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7282 free_newdirblk(WK_NEWDIRBLK(wk)); 7283 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7284 panic("cancel_newblk: extra newdirblk"); 7285 7286 return (jnewblk); 7287} 7288 7289/* 7290 * Schedule the freefrag associated with a newblk to be released once 7291 * the pointers are written and the previous block is no longer needed. 7292 */ 7293static void 7294newblk_freefrag(newblk) 7295 struct newblk *newblk; 7296{ 7297 struct freefrag *freefrag; 7298 7299 if (newblk->nb_freefrag == NULL) 7300 return; 7301 freefrag = newblk->nb_freefrag; 7302 newblk->nb_freefrag = NULL; 7303 freefrag->ff_state |= COMPLETE; 7304 if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE) 7305 add_to_worklist(&freefrag->ff_list, 0); 7306} 7307 7308/* 7309 * Free a newblk. Generate a new freefrag work request if appropriate. 7310 * This must be called after the inode pointer and any direct block pointers 7311 * are valid or fully removed via truncate or frag extension. 7312 */ 7313static void 7314free_newblk(newblk) 7315 struct newblk *newblk; 7316{ 7317 struct indirdep *indirdep; 7318 struct worklist *wk; 7319 7320 KASSERT(newblk->nb_jnewblk == NULL, 7321 ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk)); 7322 KASSERT(newblk->nb_list.wk_type != D_NEWBLK, 7323 ("free_newblk: unclaimed newblk")); 7324 LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp)); 7325 newblk_freefrag(newblk); 7326 if (newblk->nb_state & ONDEPLIST) 7327 LIST_REMOVE(newblk, nb_deps); 7328 if (newblk->nb_state & ONWORKLIST) 7329 WORKLIST_REMOVE(&newblk->nb_list); 7330 LIST_REMOVE(newblk, nb_hash); 7331 if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL) 7332 free_newdirblk(WK_NEWDIRBLK(wk)); 7333 if (!LIST_EMPTY(&newblk->nb_newdirblk)) 7334 panic("free_newblk: extra newdirblk"); 7335 while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL) 7336 indirdep_complete(indirdep); 7337 handle_jwork(&newblk->nb_jwork); 7338 WORKITEM_FREE(newblk, D_NEWBLK); 7339} 7340 7341/* 7342 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep. 7343 * This routine must be called with splbio interrupts blocked. 7344 */ 7345static void 7346free_newdirblk(newdirblk) 7347 struct newdirblk *newdirblk; 7348{ 7349 struct pagedep *pagedep; 7350 struct diradd *dap; 7351 struct worklist *wk; 7352 7353 LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp)); 7354 WORKLIST_REMOVE(&newdirblk->db_list); 7355 /* 7356 * If the pagedep is still linked onto the directory buffer 7357 * dependency chain, then some of the entries on the 7358 * pd_pendinghd list may not be committed to disk yet. In 7359 * this case, we will simply clear the NEWBLOCK flag and 7360 * let the pd_pendinghd list be processed when the pagedep 7361 * is next written. If the pagedep is no longer on the buffer 7362 * dependency chain, then all the entries on the pd_pending 7363 * list are committed to disk and we can free them here. 7364 */ 7365 pagedep = newdirblk->db_pagedep; 7366 pagedep->pd_state &= ~NEWBLOCK; 7367 if ((pagedep->pd_state & ONWORKLIST) == 0) { 7368 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 7369 free_diradd(dap, NULL); 7370 /* 7371 * If no dependencies remain, the pagedep will be freed. 7372 */ 7373 free_pagedep(pagedep); 7374 } 7375 /* Should only ever be one item in the list. */ 7376 while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) { 7377 WORKLIST_REMOVE(wk); 7378 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 7379 } 7380 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 7381} 7382 7383/* 7384 * Prepare an inode to be freed. The actual free operation is not 7385 * done until the zero'ed inode has been written to disk. 7386 */ 7387void 7388softdep_freefile(pvp, ino, mode) 7389 struct vnode *pvp; 7390 ino_t ino; 7391 int mode; 7392{ 7393 struct inode *ip = VTOI(pvp); 7394 struct inodedep *inodedep; 7395 struct freefile *freefile; 7396 struct freeblks *freeblks; 7397 struct ufsmount *ump; 7398 7399 ump = ip->i_ump; 7400 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 7401 ("softdep_freefile called on non-softdep filesystem")); 7402 /* 7403 * This sets up the inode de-allocation dependency. 7404 */ 7405 freefile = malloc(sizeof(struct freefile), 7406 M_FREEFILE, M_SOFTDEP_FLAGS); 7407 workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount); 7408 freefile->fx_mode = mode; 7409 freefile->fx_oldinum = ino; 7410 freefile->fx_devvp = ip->i_devvp; 7411 LIST_INIT(&freefile->fx_jwork); 7412 UFS_LOCK(ump); 7413 ip->i_fs->fs_pendinginodes += 1; 7414 UFS_UNLOCK(ump); 7415 7416 /* 7417 * If the inodedep does not exist, then the zero'ed inode has 7418 * been written to disk. If the allocated inode has never been 7419 * written to disk, then the on-disk inode is zero'ed. In either 7420 * case we can free the file immediately. If the journal was 7421 * canceled before being written the inode will never make it to 7422 * disk and we must send the canceled journal entrys to 7423 * ffs_freefile() to be cleared in conjunction with the bitmap. 7424 * Any blocks waiting on the inode to write can be safely freed 7425 * here as it will never been written. 7426 */ 7427 ACQUIRE_LOCK(ump); 7428 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7429 if (inodedep) { 7430 /* 7431 * Clear out freeblks that no longer need to reference 7432 * this inode. 7433 */ 7434 while ((freeblks = 7435 TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) { 7436 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, 7437 fb_next); 7438 freeblks->fb_state &= ~ONDEPLIST; 7439 } 7440 /* 7441 * Remove this inode from the unlinked list. 7442 */ 7443 if (inodedep->id_state & UNLINKED) { 7444 /* 7445 * Save the journal work to be freed with the bitmap 7446 * before we clear UNLINKED. Otherwise it can be lost 7447 * if the inode block is written. 7448 */ 7449 handle_bufwait(inodedep, &freefile->fx_jwork); 7450 clear_unlinked_inodedep(inodedep); 7451 /* 7452 * Re-acquire inodedep as we've dropped the 7453 * soft updates lock in clear_unlinked_inodedep(). 7454 */ 7455 inodedep_lookup(pvp->v_mount, ino, 0, &inodedep); 7456 } 7457 } 7458 if (inodedep == NULL || check_inode_unwritten(inodedep)) { 7459 FREE_LOCK(ump); 7460 handle_workitem_freefile(freefile); 7461 return; 7462 } 7463 if ((inodedep->id_state & DEPCOMPLETE) == 0) 7464 inodedep->id_state |= GOINGAWAY; 7465 WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list); 7466 FREE_LOCK(ump); 7467 if (ip->i_number == ino) 7468 ip->i_flag |= IN_MODIFIED; 7469} 7470 7471/* 7472 * Check to see if an inode has never been written to disk. If 7473 * so free the inodedep and return success, otherwise return failure. 7474 * This routine must be called with splbio interrupts blocked. 7475 * 7476 * If we still have a bitmap dependency, then the inode has never 7477 * been written to disk. Drop the dependency as it is no longer 7478 * necessary since the inode is being deallocated. We set the 7479 * ALLCOMPLETE flags since the bitmap now properly shows that the 7480 * inode is not allocated. Even if the inode is actively being 7481 * written, it has been rolled back to its zero'ed state, so we 7482 * are ensured that a zero inode is what is on the disk. For short 7483 * lived files, this change will usually result in removing all the 7484 * dependencies from the inode so that it can be freed immediately. 7485 */ 7486static int 7487check_inode_unwritten(inodedep) 7488 struct inodedep *inodedep; 7489{ 7490 7491 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7492 7493 if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 || 7494 !LIST_EMPTY(&inodedep->id_dirremhd) || 7495 !LIST_EMPTY(&inodedep->id_pendinghd) || 7496 !LIST_EMPTY(&inodedep->id_bufwait) || 7497 !LIST_EMPTY(&inodedep->id_inowait) || 7498 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7499 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7500 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7501 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7502 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7503 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7504 inodedep->id_mkdiradd != NULL || 7505 inodedep->id_nlinkdelta != 0) 7506 return (0); 7507 /* 7508 * Another process might be in initiate_write_inodeblock_ufs[12] 7509 * trying to allocate memory without holding "Softdep Lock". 7510 */ 7511 if ((inodedep->id_state & IOSTARTED) != 0 && 7512 inodedep->id_savedino1 == NULL) 7513 return (0); 7514 7515 if (inodedep->id_state & ONDEPLIST) 7516 LIST_REMOVE(inodedep, id_deps); 7517 inodedep->id_state &= ~ONDEPLIST; 7518 inodedep->id_state |= ALLCOMPLETE; 7519 inodedep->id_bmsafemap = NULL; 7520 if (inodedep->id_state & ONWORKLIST) 7521 WORKLIST_REMOVE(&inodedep->id_list); 7522 if (inodedep->id_savedino1 != NULL) { 7523 free(inodedep->id_savedino1, M_SAVEDINO); 7524 inodedep->id_savedino1 = NULL; 7525 } 7526 if (free_inodedep(inodedep) == 0) 7527 panic("check_inode_unwritten: busy inode"); 7528 return (1); 7529} 7530 7531/* 7532 * Try to free an inodedep structure. Return 1 if it could be freed. 7533 */ 7534static int 7535free_inodedep(inodedep) 7536 struct inodedep *inodedep; 7537{ 7538 7539 LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp)); 7540 if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 || 7541 (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE || 7542 !LIST_EMPTY(&inodedep->id_dirremhd) || 7543 !LIST_EMPTY(&inodedep->id_pendinghd) || 7544 !LIST_EMPTY(&inodedep->id_bufwait) || 7545 !LIST_EMPTY(&inodedep->id_inowait) || 7546 !TAILQ_EMPTY(&inodedep->id_inoreflst) || 7547 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 7548 !TAILQ_EMPTY(&inodedep->id_newinoupdt) || 7549 !TAILQ_EMPTY(&inodedep->id_extupdt) || 7550 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 7551 !TAILQ_EMPTY(&inodedep->id_freeblklst) || 7552 inodedep->id_mkdiradd != NULL || 7553 inodedep->id_nlinkdelta != 0 || 7554 inodedep->id_savedino1 != NULL) 7555 return (0); 7556 if (inodedep->id_state & ONDEPLIST) 7557 LIST_REMOVE(inodedep, id_deps); 7558 LIST_REMOVE(inodedep, id_hash); 7559 WORKITEM_FREE(inodedep, D_INODEDEP); 7560 return (1); 7561} 7562 7563/* 7564 * Free the block referenced by a freework structure. The parent freeblks 7565 * structure is released and completed when the final cg bitmap reaches 7566 * the disk. This routine may be freeing a jnewblk which never made it to 7567 * disk in which case we do not have to wait as the operation is undone 7568 * in memory immediately. 7569 */ 7570static void 7571freework_freeblock(freework) 7572 struct freework *freework; 7573{ 7574 struct freeblks *freeblks; 7575 struct jnewblk *jnewblk; 7576 struct ufsmount *ump; 7577 struct workhead wkhd; 7578 struct fs *fs; 7579 int bsize; 7580 int needj; 7581 7582 ump = VFSTOUFS(freework->fw_list.wk_mp); 7583 LOCK_OWNED(ump); 7584 /* 7585 * Handle partial truncate separately. 7586 */ 7587 if (freework->fw_indir) { 7588 complete_trunc_indir(freework); 7589 return; 7590 } 7591 freeblks = freework->fw_freeblks; 7592 fs = ump->um_fs; 7593 needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0; 7594 bsize = lfragtosize(fs, freework->fw_frags); 7595 LIST_INIT(&wkhd); 7596 /* 7597 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives 7598 * on the indirblk hashtable and prevents premature freeing. 7599 */ 7600 freework->fw_state |= DEPCOMPLETE; 7601 /* 7602 * SUJ needs to wait for the segment referencing freed indirect 7603 * blocks to expire so that we know the checker will not confuse 7604 * a re-allocated indirect block with its old contents. 7605 */ 7606 if (needj && freework->fw_lbn <= -NDADDR) 7607 indirblk_insert(freework); 7608 /* 7609 * If we are canceling an existing jnewblk pass it to the free 7610 * routine, otherwise pass the freeblk which will ultimately 7611 * release the freeblks. If we're not journaling, we can just 7612 * free the freeblks immediately. 7613 */ 7614 jnewblk = freework->fw_jnewblk; 7615 if (jnewblk != NULL) { 7616 cancel_jnewblk(jnewblk, &wkhd); 7617 needj = 0; 7618 } else if (needj) { 7619 freework->fw_state |= DELAYEDFREE; 7620 freeblks->fb_cgwait++; 7621 WORKLIST_INSERT(&wkhd, &freework->fw_list); 7622 } 7623 FREE_LOCK(ump); 7624 freeblks_free(ump, freeblks, btodb(bsize)); 7625 CTR4(KTR_SUJ, 7626 "freework_freeblock: ino %d blkno %jd lbn %jd size %ld", 7627 freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize); 7628 ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize, 7629 freeblks->fb_inum, freeblks->fb_vtype, &wkhd); 7630 ACQUIRE_LOCK(ump); 7631 /* 7632 * The jnewblk will be discarded and the bits in the map never 7633 * made it to disk. We can immediately free the freeblk. 7634 */ 7635 if (needj == 0) 7636 handle_written_freework(freework); 7637} 7638 7639/* 7640 * We enqueue freework items that need processing back on the freeblks and 7641 * add the freeblks to the worklist. This makes it easier to find all work 7642 * required to flush a truncation in process_truncates(). 7643 */ 7644static void 7645freework_enqueue(freework) 7646 struct freework *freework; 7647{ 7648 struct freeblks *freeblks; 7649 7650 freeblks = freework->fw_freeblks; 7651 if ((freework->fw_state & INPROGRESS) == 0) 7652 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list); 7653 if ((freeblks->fb_state & 7654 (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE && 7655 LIST_EMPTY(&freeblks->fb_jblkdephd)) 7656 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7657} 7658 7659/* 7660 * Start, continue, or finish the process of freeing an indirect block tree. 7661 * The free operation may be paused at any point with fw_off containing the 7662 * offset to restart from. This enables us to implement some flow control 7663 * for large truncates which may fan out and generate a huge number of 7664 * dependencies. 7665 */ 7666static void 7667handle_workitem_indirblk(freework) 7668 struct freework *freework; 7669{ 7670 struct freeblks *freeblks; 7671 struct ufsmount *ump; 7672 struct fs *fs; 7673 7674 freeblks = freework->fw_freeblks; 7675 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7676 fs = ump->um_fs; 7677 if (freework->fw_state & DEPCOMPLETE) { 7678 handle_written_freework(freework); 7679 return; 7680 } 7681 if (freework->fw_off == NINDIR(fs)) { 7682 freework_freeblock(freework); 7683 return; 7684 } 7685 freework->fw_state |= INPROGRESS; 7686 FREE_LOCK(ump); 7687 indir_trunc(freework, fsbtodb(fs, freework->fw_blkno), 7688 freework->fw_lbn); 7689 ACQUIRE_LOCK(ump); 7690} 7691 7692/* 7693 * Called when a freework structure attached to a cg buf is written. The 7694 * ref on either the parent or the freeblks structure is released and 7695 * the freeblks is added back to the worklist if there is more work to do. 7696 */ 7697static void 7698handle_written_freework(freework) 7699 struct freework *freework; 7700{ 7701 struct freeblks *freeblks; 7702 struct freework *parent; 7703 7704 freeblks = freework->fw_freeblks; 7705 parent = freework->fw_parent; 7706 if (freework->fw_state & DELAYEDFREE) 7707 freeblks->fb_cgwait--; 7708 freework->fw_state |= COMPLETE; 7709 if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE) 7710 WORKITEM_FREE(freework, D_FREEWORK); 7711 if (parent) { 7712 if (--parent->fw_ref == 0) 7713 freework_enqueue(parent); 7714 return; 7715 } 7716 if (--freeblks->fb_ref != 0) 7717 return; 7718 if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) == 7719 ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd)) 7720 add_to_worklist(&freeblks->fb_list, WK_NODELAY); 7721} 7722 7723/* 7724 * This workitem routine performs the block de-allocation. 7725 * The workitem is added to the pending list after the updated 7726 * inode block has been written to disk. As mentioned above, 7727 * checks regarding the number of blocks de-allocated (compared 7728 * to the number of blocks allocated for the file) are also 7729 * performed in this function. 7730 */ 7731static int 7732handle_workitem_freeblocks(freeblks, flags) 7733 struct freeblks *freeblks; 7734 int flags; 7735{ 7736 struct freework *freework; 7737 struct newblk *newblk; 7738 struct allocindir *aip; 7739 struct ufsmount *ump; 7740 struct worklist *wk; 7741 7742 KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd), 7743 ("handle_workitem_freeblocks: Journal entries not written.")); 7744 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7745 ACQUIRE_LOCK(ump); 7746 while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) { 7747 WORKLIST_REMOVE(wk); 7748 switch (wk->wk_type) { 7749 case D_DIRREM: 7750 wk->wk_state |= COMPLETE; 7751 add_to_worklist(wk, 0); 7752 continue; 7753 7754 case D_ALLOCDIRECT: 7755 free_newblk(WK_NEWBLK(wk)); 7756 continue; 7757 7758 case D_ALLOCINDIR: 7759 aip = WK_ALLOCINDIR(wk); 7760 freework = NULL; 7761 if (aip->ai_state & DELAYEDFREE) { 7762 FREE_LOCK(ump); 7763 freework = newfreework(ump, freeblks, NULL, 7764 aip->ai_lbn, aip->ai_newblkno, 7765 ump->um_fs->fs_frag, 0, 0); 7766 ACQUIRE_LOCK(ump); 7767 } 7768 newblk = WK_NEWBLK(wk); 7769 if (newblk->nb_jnewblk) { 7770 freework->fw_jnewblk = newblk->nb_jnewblk; 7771 newblk->nb_jnewblk->jn_dep = &freework->fw_list; 7772 newblk->nb_jnewblk = NULL; 7773 } 7774 free_newblk(newblk); 7775 continue; 7776 7777 case D_FREEWORK: 7778 freework = WK_FREEWORK(wk); 7779 if (freework->fw_lbn <= -NDADDR) 7780 handle_workitem_indirblk(freework); 7781 else 7782 freework_freeblock(freework); 7783 continue; 7784 default: 7785 panic("handle_workitem_freeblocks: Unknown type %s", 7786 TYPENAME(wk->wk_type)); 7787 } 7788 } 7789 if (freeblks->fb_ref != 0) { 7790 freeblks->fb_state &= ~INPROGRESS; 7791 wake_worklist(&freeblks->fb_list); 7792 freeblks = NULL; 7793 } 7794 FREE_LOCK(ump); 7795 if (freeblks) 7796 return handle_complete_freeblocks(freeblks, flags); 7797 return (0); 7798} 7799 7800/* 7801 * Handle completion of block free via truncate. This allows fs_pending 7802 * to track the actual free block count more closely than if we only updated 7803 * it at the end. We must be careful to handle cases where the block count 7804 * on free was incorrect. 7805 */ 7806static void 7807freeblks_free(ump, freeblks, blocks) 7808 struct ufsmount *ump; 7809 struct freeblks *freeblks; 7810 int blocks; 7811{ 7812 struct fs *fs; 7813 ufs2_daddr_t remain; 7814 7815 UFS_LOCK(ump); 7816 remain = -freeblks->fb_chkcnt; 7817 freeblks->fb_chkcnt += blocks; 7818 if (remain > 0) { 7819 if (remain < blocks) 7820 blocks = remain; 7821 fs = ump->um_fs; 7822 fs->fs_pendingblocks -= blocks; 7823 } 7824 UFS_UNLOCK(ump); 7825} 7826 7827/* 7828 * Once all of the freework workitems are complete we can retire the 7829 * freeblocks dependency and any journal work awaiting completion. This 7830 * can not be called until all other dependencies are stable on disk. 7831 */ 7832static int 7833handle_complete_freeblocks(freeblks, flags) 7834 struct freeblks *freeblks; 7835 int flags; 7836{ 7837 struct inodedep *inodedep; 7838 struct inode *ip; 7839 struct vnode *vp; 7840 struct fs *fs; 7841 struct ufsmount *ump; 7842 ufs2_daddr_t spare; 7843 7844 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7845 fs = ump->um_fs; 7846 flags = LK_EXCLUSIVE | flags; 7847 spare = freeblks->fb_chkcnt; 7848 7849 /* 7850 * If we did not release the expected number of blocks we may have 7851 * to adjust the inode block count here. Only do so if it wasn't 7852 * a truncation to zero and the modrev still matches. 7853 */ 7854 if (spare && freeblks->fb_len != 0) { 7855 if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum, 7856 flags, &vp, FFSV_FORCEINSMQ) != 0) 7857 return (EBUSY); 7858 ip = VTOI(vp); 7859 if (DIP(ip, i_modrev) == freeblks->fb_modrev) { 7860 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare); 7861 ip->i_flag |= IN_CHANGE; 7862 /* 7863 * We must wait so this happens before the 7864 * journal is reclaimed. 7865 */ 7866 ffs_update(vp, 1); 7867 } 7868 vput(vp); 7869 } 7870 if (spare < 0) { 7871 UFS_LOCK(ump); 7872 fs->fs_pendingblocks += spare; 7873 UFS_UNLOCK(ump); 7874 } 7875#ifdef QUOTA 7876 /* Handle spare. */ 7877 if (spare) 7878 quotaadj(freeblks->fb_quota, ump, -spare); 7879 quotarele(freeblks->fb_quota); 7880#endif 7881 ACQUIRE_LOCK(ump); 7882 if (freeblks->fb_state & ONDEPLIST) { 7883 inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum, 7884 0, &inodedep); 7885 TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next); 7886 freeblks->fb_state &= ~ONDEPLIST; 7887 if (TAILQ_EMPTY(&inodedep->id_freeblklst)) 7888 free_inodedep(inodedep); 7889 } 7890 /* 7891 * All of the freeblock deps must be complete prior to this call 7892 * so it's now safe to complete earlier outstanding journal entries. 7893 */ 7894 handle_jwork(&freeblks->fb_jwork); 7895 WORKITEM_FREE(freeblks, D_FREEBLKS); 7896 FREE_LOCK(ump); 7897 return (0); 7898} 7899 7900/* 7901 * Release blocks associated with the freeblks and stored in the indirect 7902 * block dbn. If level is greater than SINGLE, the block is an indirect block 7903 * and recursive calls to indirtrunc must be used to cleanse other indirect 7904 * blocks. 7905 * 7906 * This handles partial and complete truncation of blocks. Partial is noted 7907 * with goingaway == 0. In this case the freework is completed after the 7908 * zero'd indirects are written to disk. For full truncation the freework 7909 * is completed after the block is freed. 7910 */ 7911static void 7912indir_trunc(freework, dbn, lbn) 7913 struct freework *freework; 7914 ufs2_daddr_t dbn; 7915 ufs_lbn_t lbn; 7916{ 7917 struct freework *nfreework; 7918 struct workhead wkhd; 7919 struct freeblks *freeblks; 7920 struct buf *bp; 7921 struct fs *fs; 7922 struct indirdep *indirdep; 7923 struct ufsmount *ump; 7924 ufs1_daddr_t *bap1 = 0; 7925 ufs2_daddr_t nb, nnb, *bap2 = 0; 7926 ufs_lbn_t lbnadd, nlbn; 7927 int i, nblocks, ufs1fmt; 7928 int freedblocks; 7929 int goingaway; 7930 int freedeps; 7931 int needj; 7932 int level; 7933 int cnt; 7934 7935 freeblks = freework->fw_freeblks; 7936 ump = VFSTOUFS(freeblks->fb_list.wk_mp); 7937 fs = ump->um_fs; 7938 /* 7939 * Get buffer of block pointers to be freed. There are three cases: 7940 * 7941 * 1) Partial truncate caches the indirdep pointer in the freework 7942 * which provides us a back copy to the save bp which holds the 7943 * pointers we want to clear. When this completes the zero 7944 * pointers are written to the real copy. 7945 * 2) The indirect is being completely truncated, cancel_indirdep() 7946 * eliminated the real copy and placed the indirdep on the saved 7947 * copy. The indirdep and buf are discarded when this completes. 7948 * 3) The indirect was not in memory, we read a copy off of the disk 7949 * using the devvp and drop and invalidate the buffer when we're 7950 * done. 7951 */ 7952 goingaway = 1; 7953 indirdep = NULL; 7954 if (freework->fw_indir != NULL) { 7955 goingaway = 0; 7956 indirdep = freework->fw_indir; 7957 bp = indirdep->ir_savebp; 7958 if (bp == NULL || bp->b_blkno != dbn) 7959 panic("indir_trunc: Bad saved buf %p blkno %jd", 7960 bp, (intmax_t)dbn); 7961 } else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) { 7962 /* 7963 * The lock prevents the buf dep list from changing and 7964 * indirects on devvp should only ever have one dependency. 7965 */ 7966 indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep)); 7967 if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0) 7968 panic("indir_trunc: Bad indirdep %p from buf %p", 7969 indirdep, bp); 7970 } else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 7971 NOCRED, &bp) != 0) { 7972 brelse(bp); 7973 return; 7974 } 7975 ACQUIRE_LOCK(ump); 7976 /* Protects against a race with complete_trunc_indir(). */ 7977 freework->fw_state &= ~INPROGRESS; 7978 /* 7979 * If we have an indirdep we need to enforce the truncation order 7980 * and discard it when it is complete. 7981 */ 7982 if (indirdep) { 7983 if (freework != TAILQ_FIRST(&indirdep->ir_trunc) && 7984 !TAILQ_EMPTY(&indirdep->ir_trunc)) { 7985 /* 7986 * Add the complete truncate to the list on the 7987 * indirdep to enforce in-order processing. 7988 */ 7989 if (freework->fw_indir == NULL) 7990 TAILQ_INSERT_TAIL(&indirdep->ir_trunc, 7991 freework, fw_next); 7992 FREE_LOCK(ump); 7993 return; 7994 } 7995 /* 7996 * If we're goingaway, free the indirdep. Otherwise it will 7997 * linger until the write completes. 7998 */ 7999 if (goingaway) { 8000 free_indirdep(indirdep); 8001 ump->softdep_numindirdeps -= 1; 8002 } 8003 } 8004 FREE_LOCK(ump); 8005 /* Initialize pointers depending on block size. */ 8006 if (ump->um_fstype == UFS1) { 8007 bap1 = (ufs1_daddr_t *)bp->b_data; 8008 nb = bap1[freework->fw_off]; 8009 ufs1fmt = 1; 8010 } else { 8011 bap2 = (ufs2_daddr_t *)bp->b_data; 8012 nb = bap2[freework->fw_off]; 8013 ufs1fmt = 0; 8014 } 8015 level = lbn_level(lbn); 8016 needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0; 8017 lbnadd = lbn_offset(fs, level); 8018 nblocks = btodb(fs->fs_bsize); 8019 nfreework = freework; 8020 freedeps = 0; 8021 cnt = 0; 8022 /* 8023 * Reclaim blocks. Traverses into nested indirect levels and 8024 * arranges for the current level to be freed when subordinates 8025 * are free when journaling. 8026 */ 8027 for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) { 8028 if (i != NINDIR(fs) - 1) { 8029 if (ufs1fmt) 8030 nnb = bap1[i+1]; 8031 else 8032 nnb = bap2[i+1]; 8033 } else 8034 nnb = 0; 8035 if (nb == 0) 8036 continue; 8037 cnt++; 8038 if (level != 0) { 8039 nlbn = (lbn + 1) - (i * lbnadd); 8040 if (needj != 0) { 8041 nfreework = newfreework(ump, freeblks, freework, 8042 nlbn, nb, fs->fs_frag, 0, 0); 8043 freedeps++; 8044 } 8045 indir_trunc(nfreework, fsbtodb(fs, nb), nlbn); 8046 } else { 8047 struct freedep *freedep; 8048 8049 /* 8050 * Attempt to aggregate freedep dependencies for 8051 * all blocks being released to the same CG. 8052 */ 8053 LIST_INIT(&wkhd); 8054 if (needj != 0 && 8055 (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) { 8056 freedep = newfreedep(freework); 8057 WORKLIST_INSERT_UNLOCKED(&wkhd, 8058 &freedep->fd_list); 8059 freedeps++; 8060 } 8061 CTR3(KTR_SUJ, 8062 "indir_trunc: ino %d blkno %jd size %ld", 8063 freeblks->fb_inum, nb, fs->fs_bsize); 8064 ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, 8065 fs->fs_bsize, freeblks->fb_inum, 8066 freeblks->fb_vtype, &wkhd); 8067 } 8068 } 8069 if (goingaway) { 8070 bp->b_flags |= B_INVAL | B_NOCACHE; 8071 brelse(bp); 8072 } 8073 freedblocks = 0; 8074 if (level == 0) 8075 freedblocks = (nblocks * cnt); 8076 if (needj == 0) 8077 freedblocks += nblocks; 8078 freeblks_free(ump, freeblks, freedblocks); 8079 /* 8080 * If we are journaling set up the ref counts and offset so this 8081 * indirect can be completed when its children are free. 8082 */ 8083 if (needj) { 8084 ACQUIRE_LOCK(ump); 8085 freework->fw_off = i; 8086 freework->fw_ref += freedeps; 8087 freework->fw_ref -= NINDIR(fs) + 1; 8088 if (level == 0) 8089 freeblks->fb_cgwait += freedeps; 8090 if (freework->fw_ref == 0) 8091 freework_freeblock(freework); 8092 FREE_LOCK(ump); 8093 return; 8094 } 8095 /* 8096 * If we're not journaling we can free the indirect now. 8097 */ 8098 dbn = dbtofsb(fs, dbn); 8099 CTR3(KTR_SUJ, 8100 "indir_trunc 2: ino %d blkno %jd size %ld", 8101 freeblks->fb_inum, dbn, fs->fs_bsize); 8102 ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize, 8103 freeblks->fb_inum, freeblks->fb_vtype, NULL); 8104 /* Non SUJ softdep does single-threaded truncations. */ 8105 if (freework->fw_blkno == dbn) { 8106 freework->fw_state |= ALLCOMPLETE; 8107 ACQUIRE_LOCK(ump); 8108 handle_written_freework(freework); 8109 FREE_LOCK(ump); 8110 } 8111 return; 8112} 8113 8114/* 8115 * Cancel an allocindir when it is removed via truncation. When bp is not 8116 * NULL the indirect never appeared on disk and is scheduled to be freed 8117 * independently of the indir so we can more easily track journal work. 8118 */ 8119static void 8120cancel_allocindir(aip, bp, freeblks, trunc) 8121 struct allocindir *aip; 8122 struct buf *bp; 8123 struct freeblks *freeblks; 8124 int trunc; 8125{ 8126 struct indirdep *indirdep; 8127 struct freefrag *freefrag; 8128 struct newblk *newblk; 8129 8130 newblk = (struct newblk *)aip; 8131 LIST_REMOVE(aip, ai_next); 8132 /* 8133 * We must eliminate the pointer in bp if it must be freed on its 8134 * own due to partial truncate or pending journal work. 8135 */ 8136 if (bp && (trunc || newblk->nb_jnewblk)) { 8137 /* 8138 * Clear the pointer and mark the aip to be freed 8139 * directly if it never existed on disk. 8140 */ 8141 aip->ai_state |= DELAYEDFREE; 8142 indirdep = aip->ai_indirdep; 8143 if (indirdep->ir_state & UFS1FMT) 8144 ((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8145 else 8146 ((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0; 8147 } 8148 /* 8149 * When truncating the previous pointer will be freed via 8150 * savedbp. Eliminate the freefrag which would dup free. 8151 */ 8152 if (trunc && (freefrag = newblk->nb_freefrag) != NULL) { 8153 newblk->nb_freefrag = NULL; 8154 if (freefrag->ff_jdep) 8155 cancel_jfreefrag( 8156 WK_JFREEFRAG(freefrag->ff_jdep)); 8157 jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork); 8158 WORKITEM_FREE(freefrag, D_FREEFRAG); 8159 } 8160 /* 8161 * If the journal hasn't been written the jnewblk must be passed 8162 * to the call to ffs_blkfree that reclaims the space. We accomplish 8163 * this by leaving the journal dependency on the newblk to be freed 8164 * when a freework is created in handle_workitem_freeblocks(). 8165 */ 8166 cancel_newblk(newblk, NULL, &freeblks->fb_jwork); 8167 WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list); 8168} 8169 8170/* 8171 * Create the mkdir dependencies for . and .. in a new directory. Link them 8172 * in to a newdirblk so any subsequent additions are tracked properly. The 8173 * caller is responsible for adding the mkdir1 dependency to the journal 8174 * and updating id_mkdiradd. This function returns with the soft updates 8175 * lock held. 8176 */ 8177static struct mkdir * 8178setup_newdir(dap, newinum, dinum, newdirbp, mkdirp) 8179 struct diradd *dap; 8180 ino_t newinum; 8181 ino_t dinum; 8182 struct buf *newdirbp; 8183 struct mkdir **mkdirp; 8184{ 8185 struct newblk *newblk; 8186 struct pagedep *pagedep; 8187 struct inodedep *inodedep; 8188 struct newdirblk *newdirblk = 0; 8189 struct mkdir *mkdir1, *mkdir2; 8190 struct worklist *wk; 8191 struct jaddref *jaddref; 8192 struct ufsmount *ump; 8193 struct mount *mp; 8194 8195 mp = dap->da_list.wk_mp; 8196 ump = VFSTOUFS(mp); 8197 newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK, 8198 M_SOFTDEP_FLAGS); 8199 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8200 LIST_INIT(&newdirblk->db_mkdir); 8201 mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8202 workitem_alloc(&mkdir1->md_list, D_MKDIR, mp); 8203 mkdir1->md_state = ATTACHED | MKDIR_BODY; 8204 mkdir1->md_diradd = dap; 8205 mkdir1->md_jaddref = NULL; 8206 mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS); 8207 workitem_alloc(&mkdir2->md_list, D_MKDIR, mp); 8208 mkdir2->md_state = ATTACHED | MKDIR_PARENT; 8209 mkdir2->md_diradd = dap; 8210 mkdir2->md_jaddref = NULL; 8211 if (MOUNTEDSUJ(mp) == 0) { 8212 mkdir1->md_state |= DEPCOMPLETE; 8213 mkdir2->md_state |= DEPCOMPLETE; 8214 } 8215 /* 8216 * Dependency on "." and ".." being written to disk. 8217 */ 8218 mkdir1->md_buf = newdirbp; 8219 ACQUIRE_LOCK(VFSTOUFS(mp)); 8220 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs); 8221 /* 8222 * We must link the pagedep, allocdirect, and newdirblk for 8223 * the initial file page so the pointer to the new directory 8224 * is not written until the directory contents are live and 8225 * any subsequent additions are not marked live until the 8226 * block is reachable via the inode. 8227 */ 8228 if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0) 8229 panic("setup_newdir: lost pagedep"); 8230 LIST_FOREACH(wk, &newdirbp->b_dep, wk_list) 8231 if (wk->wk_type == D_ALLOCDIRECT) 8232 break; 8233 if (wk == NULL) 8234 panic("setup_newdir: lost allocdirect"); 8235 if (pagedep->pd_state & NEWBLOCK) 8236 panic("setup_newdir: NEWBLOCK already set"); 8237 newblk = WK_NEWBLK(wk); 8238 pagedep->pd_state |= NEWBLOCK; 8239 pagedep->pd_newdirblk = newdirblk; 8240 newdirblk->db_pagedep = pagedep; 8241 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8242 WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list); 8243 /* 8244 * Look up the inodedep for the parent directory so that we 8245 * can link mkdir2 into the pending dotdot jaddref or 8246 * the inode write if there is none. If the inode is 8247 * ALLCOMPLETE and no jaddref is present all dependencies have 8248 * been satisfied and mkdir2 can be freed. 8249 */ 8250 inodedep_lookup(mp, dinum, 0, &inodedep); 8251 if (MOUNTEDSUJ(mp)) { 8252 if (inodedep == NULL) 8253 panic("setup_newdir: Lost parent."); 8254 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8255 inoreflst); 8256 KASSERT(jaddref != NULL && jaddref->ja_parent == newinum && 8257 (jaddref->ja_state & MKDIR_PARENT), 8258 ("setup_newdir: bad dotdot jaddref %p", jaddref)); 8259 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8260 mkdir2->md_jaddref = jaddref; 8261 jaddref->ja_mkdir = mkdir2; 8262 } else if (inodedep == NULL || 8263 (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 8264 dap->da_state &= ~MKDIR_PARENT; 8265 WORKITEM_FREE(mkdir2, D_MKDIR); 8266 mkdir2 = NULL; 8267 } else { 8268 LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs); 8269 WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list); 8270 } 8271 *mkdirp = mkdir2; 8272 8273 return (mkdir1); 8274} 8275 8276/* 8277 * Directory entry addition dependencies. 8278 * 8279 * When adding a new directory entry, the inode (with its incremented link 8280 * count) must be written to disk before the directory entry's pointer to it. 8281 * Also, if the inode is newly allocated, the corresponding freemap must be 8282 * updated (on disk) before the directory entry's pointer. These requirements 8283 * are met via undo/redo on the directory entry's pointer, which consists 8284 * simply of the inode number. 8285 * 8286 * As directory entries are added and deleted, the free space within a 8287 * directory block can become fragmented. The ufs filesystem will compact 8288 * a fragmented directory block to make space for a new entry. When this 8289 * occurs, the offsets of previously added entries change. Any "diradd" 8290 * dependency structures corresponding to these entries must be updated with 8291 * the new offsets. 8292 */ 8293 8294/* 8295 * This routine is called after the in-memory inode's link 8296 * count has been incremented, but before the directory entry's 8297 * pointer to the inode has been set. 8298 */ 8299int 8300softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk) 8301 struct buf *bp; /* buffer containing directory block */ 8302 struct inode *dp; /* inode for directory */ 8303 off_t diroffset; /* offset of new entry in directory */ 8304 ino_t newinum; /* inode referenced by new directory entry */ 8305 struct buf *newdirbp; /* non-NULL => contents of new mkdir */ 8306 int isnewblk; /* entry is in a newly allocated block */ 8307{ 8308 int offset; /* offset of new entry within directory block */ 8309 ufs_lbn_t lbn; /* block in directory containing new entry */ 8310 struct fs *fs; 8311 struct diradd *dap; 8312 struct newblk *newblk; 8313 struct pagedep *pagedep; 8314 struct inodedep *inodedep; 8315 struct newdirblk *newdirblk = 0; 8316 struct mkdir *mkdir1, *mkdir2; 8317 struct jaddref *jaddref; 8318 struct ufsmount *ump; 8319 struct mount *mp; 8320 int isindir; 8321 8322 ump = dp->i_ump; 8323 mp = UFSTOVFS(ump); 8324 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8325 ("softdep_setup_directory_add called on non-softdep filesystem")); 8326 /* 8327 * Whiteouts have no dependencies. 8328 */ 8329 if (newinum == WINO) { 8330 if (newdirbp != NULL) 8331 bdwrite(newdirbp); 8332 return (0); 8333 } 8334 jaddref = NULL; 8335 mkdir1 = mkdir2 = NULL; 8336 fs = dp->i_fs; 8337 lbn = lblkno(fs, diroffset); 8338 offset = blkoff(fs, diroffset); 8339 dap = malloc(sizeof(struct diradd), M_DIRADD, 8340 M_SOFTDEP_FLAGS|M_ZERO); 8341 workitem_alloc(&dap->da_list, D_DIRADD, mp); 8342 dap->da_offset = offset; 8343 dap->da_newinum = newinum; 8344 dap->da_state = ATTACHED; 8345 LIST_INIT(&dap->da_jwork); 8346 isindir = bp->b_lblkno >= NDADDR; 8347 if (isnewblk && 8348 (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) { 8349 newdirblk = malloc(sizeof(struct newdirblk), 8350 M_NEWDIRBLK, M_SOFTDEP_FLAGS); 8351 workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp); 8352 LIST_INIT(&newdirblk->db_mkdir); 8353 } 8354 /* 8355 * If we're creating a new directory setup the dependencies and set 8356 * the dap state to wait for them. Otherwise it's COMPLETE and 8357 * we can move on. 8358 */ 8359 if (newdirbp == NULL) { 8360 dap->da_state |= DEPCOMPLETE; 8361 ACQUIRE_LOCK(ump); 8362 } else { 8363 dap->da_state |= MKDIR_BODY | MKDIR_PARENT; 8364 mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp, 8365 &mkdir2); 8366 } 8367 /* 8368 * Link into parent directory pagedep to await its being written. 8369 */ 8370 pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep); 8371#ifdef DEBUG 8372 if (diradd_lookup(pagedep, offset) != NULL) 8373 panic("softdep_setup_directory_add: %p already at off %d\n", 8374 diradd_lookup(pagedep, offset), offset); 8375#endif 8376 dap->da_pagedep = pagedep; 8377 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap, 8378 da_pdlist); 8379 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep); 8380 /* 8381 * If we're journaling, link the diradd into the jaddref so it 8382 * may be completed after the journal entry is written. Otherwise, 8383 * link the diradd into its inodedep. If the inode is not yet 8384 * written place it on the bufwait list, otherwise do the post-inode 8385 * write processing to put it on the id_pendinghd list. 8386 */ 8387 if (MOUNTEDSUJ(mp)) { 8388 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 8389 inoreflst); 8390 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 8391 ("softdep_setup_directory_add: bad jaddref %p", jaddref)); 8392 jaddref->ja_diroff = diroffset; 8393 jaddref->ja_diradd = dap; 8394 add_to_journal(&jaddref->ja_list); 8395 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) 8396 diradd_inode_written(dap, inodedep); 8397 else 8398 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 8399 /* 8400 * Add the journal entries for . and .. links now that the primary 8401 * link is written. 8402 */ 8403 if (mkdir1 != NULL && MOUNTEDSUJ(mp)) { 8404 jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref, 8405 inoreflst, if_deps); 8406 KASSERT(jaddref != NULL && 8407 jaddref->ja_ino == jaddref->ja_parent && 8408 (jaddref->ja_state & MKDIR_BODY), 8409 ("softdep_setup_directory_add: bad dot jaddref %p", 8410 jaddref)); 8411 mkdir1->md_jaddref = jaddref; 8412 jaddref->ja_mkdir = mkdir1; 8413 /* 8414 * It is important that the dotdot journal entry 8415 * is added prior to the dot entry since dot writes 8416 * both the dot and dotdot links. These both must 8417 * be added after the primary link for the journal 8418 * to remain consistent. 8419 */ 8420 add_to_journal(&mkdir2->md_jaddref->ja_list); 8421 add_to_journal(&jaddref->ja_list); 8422 } 8423 /* 8424 * If we are adding a new directory remember this diradd so that if 8425 * we rename it we can keep the dot and dotdot dependencies. If 8426 * we are adding a new name for an inode that has a mkdiradd we 8427 * must be in rename and we have to move the dot and dotdot 8428 * dependencies to this new name. The old name is being orphaned 8429 * soon. 8430 */ 8431 if (mkdir1 != NULL) { 8432 if (inodedep->id_mkdiradd != NULL) 8433 panic("softdep_setup_directory_add: Existing mkdir"); 8434 inodedep->id_mkdiradd = dap; 8435 } else if (inodedep->id_mkdiradd) 8436 merge_diradd(inodedep, dap); 8437 if (newdirblk) { 8438 /* 8439 * There is nothing to do if we are already tracking 8440 * this block. 8441 */ 8442 if ((pagedep->pd_state & NEWBLOCK) != 0) { 8443 WORKITEM_FREE(newdirblk, D_NEWDIRBLK); 8444 FREE_LOCK(ump); 8445 return (0); 8446 } 8447 if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk) 8448 == 0) 8449 panic("softdep_setup_directory_add: lost entry"); 8450 WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list); 8451 pagedep->pd_state |= NEWBLOCK; 8452 pagedep->pd_newdirblk = newdirblk; 8453 newdirblk->db_pagedep = pagedep; 8454 FREE_LOCK(ump); 8455 /* 8456 * If we extended into an indirect signal direnter to sync. 8457 */ 8458 if (isindir) 8459 return (1); 8460 return (0); 8461 } 8462 FREE_LOCK(ump); 8463 return (0); 8464} 8465 8466/* 8467 * This procedure is called to change the offset of a directory 8468 * entry when compacting a directory block which must be owned 8469 * exclusively by the caller. Note that the actual entry movement 8470 * must be done in this procedure to ensure that no I/O completions 8471 * occur while the move is in progress. 8472 */ 8473void 8474softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize) 8475 struct buf *bp; /* Buffer holding directory block. */ 8476 struct inode *dp; /* inode for directory */ 8477 caddr_t base; /* address of dp->i_offset */ 8478 caddr_t oldloc; /* address of old directory location */ 8479 caddr_t newloc; /* address of new directory location */ 8480 int entrysize; /* size of directory entry */ 8481{ 8482 int offset, oldoffset, newoffset; 8483 struct pagedep *pagedep; 8484 struct jmvref *jmvref; 8485 struct diradd *dap; 8486 struct direct *de; 8487 struct mount *mp; 8488 ufs_lbn_t lbn; 8489 int flags; 8490 8491 mp = UFSTOVFS(dp->i_ump); 8492 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 8493 ("softdep_change_directoryentry_offset called on " 8494 "non-softdep filesystem")); 8495 de = (struct direct *)oldloc; 8496 jmvref = NULL; 8497 flags = 0; 8498 /* 8499 * Moves are always journaled as it would be too complex to 8500 * determine if any affected adds or removes are present in the 8501 * journal. 8502 */ 8503 if (MOUNTEDSUJ(mp)) { 8504 flags = DEPALLOC; 8505 jmvref = newjmvref(dp, de->d_ino, 8506 dp->i_offset + (oldloc - base), 8507 dp->i_offset + (newloc - base)); 8508 } 8509 lbn = lblkno(dp->i_fs, dp->i_offset); 8510 offset = blkoff(dp->i_fs, dp->i_offset); 8511 oldoffset = offset + (oldloc - base); 8512 newoffset = offset + (newloc - base); 8513 ACQUIRE_LOCK(dp->i_ump); 8514 if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0) 8515 goto done; 8516 dap = diradd_lookup(pagedep, oldoffset); 8517 if (dap) { 8518 dap->da_offset = newoffset; 8519 newoffset = DIRADDHASH(newoffset); 8520 oldoffset = DIRADDHASH(oldoffset); 8521 if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE && 8522 newoffset != oldoffset) { 8523 LIST_REMOVE(dap, da_pdlist); 8524 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset], 8525 dap, da_pdlist); 8526 } 8527 } 8528done: 8529 if (jmvref) { 8530 jmvref->jm_pagedep = pagedep; 8531 LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps); 8532 add_to_journal(&jmvref->jm_list); 8533 } 8534 bcopy(oldloc, newloc, entrysize); 8535 FREE_LOCK(dp->i_ump); 8536} 8537 8538/* 8539 * Move the mkdir dependencies and journal work from one diradd to another 8540 * when renaming a directory. The new name must depend on the mkdir deps 8541 * completing as the old name did. Directories can only have one valid link 8542 * at a time so one must be canonical. 8543 */ 8544static void 8545merge_diradd(inodedep, newdap) 8546 struct inodedep *inodedep; 8547 struct diradd *newdap; 8548{ 8549 struct diradd *olddap; 8550 struct mkdir *mkdir, *nextmd; 8551 struct ufsmount *ump; 8552 short state; 8553 8554 olddap = inodedep->id_mkdiradd; 8555 inodedep->id_mkdiradd = newdap; 8556 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8557 newdap->da_state &= ~DEPCOMPLETE; 8558 ump = VFSTOUFS(inodedep->id_list.wk_mp); 8559 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8560 mkdir = nextmd) { 8561 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8562 if (mkdir->md_diradd != olddap) 8563 continue; 8564 mkdir->md_diradd = newdap; 8565 state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY); 8566 newdap->da_state |= state; 8567 olddap->da_state &= ~state; 8568 if ((olddap->da_state & 8569 (MKDIR_PARENT | MKDIR_BODY)) == 0) 8570 break; 8571 } 8572 if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8573 panic("merge_diradd: unfound ref"); 8574 } 8575 /* 8576 * Any mkdir related journal items are not safe to be freed until 8577 * the new name is stable. 8578 */ 8579 jwork_move(&newdap->da_jwork, &olddap->da_jwork); 8580 olddap->da_state |= DEPCOMPLETE; 8581 complete_diradd(olddap); 8582} 8583 8584/* 8585 * Move the diradd to the pending list when all diradd dependencies are 8586 * complete. 8587 */ 8588static void 8589complete_diradd(dap) 8590 struct diradd *dap; 8591{ 8592 struct pagedep *pagedep; 8593 8594 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 8595 if (dap->da_state & DIRCHG) 8596 pagedep = dap->da_previous->dm_pagedep; 8597 else 8598 pagedep = dap->da_pagedep; 8599 LIST_REMOVE(dap, da_pdlist); 8600 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 8601 } 8602} 8603 8604/* 8605 * Cancel a diradd when a dirrem overlaps with it. We must cancel the journal 8606 * add entries and conditonally journal the remove. 8607 */ 8608static void 8609cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref) 8610 struct diradd *dap; 8611 struct dirrem *dirrem; 8612 struct jremref *jremref; 8613 struct jremref *dotremref; 8614 struct jremref *dotdotremref; 8615{ 8616 struct inodedep *inodedep; 8617 struct jaddref *jaddref; 8618 struct inoref *inoref; 8619 struct ufsmount *ump; 8620 struct mkdir *mkdir; 8621 8622 /* 8623 * If no remove references were allocated we're on a non-journaled 8624 * filesystem and can skip the cancel step. 8625 */ 8626 if (jremref == NULL) { 8627 free_diradd(dap, NULL); 8628 return; 8629 } 8630 /* 8631 * Cancel the primary name an free it if it does not require 8632 * journaling. 8633 */ 8634 if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum, 8635 0, &inodedep) != 0) { 8636 /* Abort the addref that reference this diradd. */ 8637 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 8638 if (inoref->if_list.wk_type != D_JADDREF) 8639 continue; 8640 jaddref = (struct jaddref *)inoref; 8641 if (jaddref->ja_diradd != dap) 8642 continue; 8643 if (cancel_jaddref(jaddref, inodedep, 8644 &dirrem->dm_jwork) == 0) { 8645 free_jremref(jremref); 8646 jremref = NULL; 8647 } 8648 break; 8649 } 8650 } 8651 /* 8652 * Cancel subordinate names and free them if they do not require 8653 * journaling. 8654 */ 8655 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8656 ump = VFSTOUFS(dap->da_list.wk_mp); 8657 LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) { 8658 if (mkdir->md_diradd != dap) 8659 continue; 8660 if ((jaddref = mkdir->md_jaddref) == NULL) 8661 continue; 8662 mkdir->md_jaddref = NULL; 8663 if (mkdir->md_state & MKDIR_PARENT) { 8664 if (cancel_jaddref(jaddref, NULL, 8665 &dirrem->dm_jwork) == 0) { 8666 free_jremref(dotdotremref); 8667 dotdotremref = NULL; 8668 } 8669 } else { 8670 if (cancel_jaddref(jaddref, inodedep, 8671 &dirrem->dm_jwork) == 0) { 8672 free_jremref(dotremref); 8673 dotremref = NULL; 8674 } 8675 } 8676 } 8677 } 8678 8679 if (jremref) 8680 journal_jremref(dirrem, jremref, inodedep); 8681 if (dotremref) 8682 journal_jremref(dirrem, dotremref, inodedep); 8683 if (dotdotremref) 8684 journal_jremref(dirrem, dotdotremref, NULL); 8685 jwork_move(&dirrem->dm_jwork, &dap->da_jwork); 8686 free_diradd(dap, &dirrem->dm_jwork); 8687} 8688 8689/* 8690 * Free a diradd dependency structure. This routine must be called 8691 * with splbio interrupts blocked. 8692 */ 8693static void 8694free_diradd(dap, wkhd) 8695 struct diradd *dap; 8696 struct workhead *wkhd; 8697{ 8698 struct dirrem *dirrem; 8699 struct pagedep *pagedep; 8700 struct inodedep *inodedep; 8701 struct mkdir *mkdir, *nextmd; 8702 struct ufsmount *ump; 8703 8704 ump = VFSTOUFS(dap->da_list.wk_mp); 8705 LOCK_OWNED(ump); 8706 LIST_REMOVE(dap, da_pdlist); 8707 if (dap->da_state & ONWORKLIST) 8708 WORKLIST_REMOVE(&dap->da_list); 8709 if ((dap->da_state & DIRCHG) == 0) { 8710 pagedep = dap->da_pagedep; 8711 } else { 8712 dirrem = dap->da_previous; 8713 pagedep = dirrem->dm_pagedep; 8714 dirrem->dm_dirinum = pagedep->pd_ino; 8715 dirrem->dm_state |= COMPLETE; 8716 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 8717 add_to_worklist(&dirrem->dm_list, 0); 8718 } 8719 if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum, 8720 0, &inodedep) != 0) 8721 if (inodedep->id_mkdiradd == dap) 8722 inodedep->id_mkdiradd = NULL; 8723 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) { 8724 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8725 mkdir = nextmd) { 8726 nextmd = LIST_NEXT(mkdir, md_mkdirs); 8727 if (mkdir->md_diradd != dap) 8728 continue; 8729 dap->da_state &= 8730 ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 8731 LIST_REMOVE(mkdir, md_mkdirs); 8732 if (mkdir->md_state & ONWORKLIST) 8733 WORKLIST_REMOVE(&mkdir->md_list); 8734 if (mkdir->md_jaddref != NULL) 8735 panic("free_diradd: Unexpected jaddref"); 8736 WORKITEM_FREE(mkdir, D_MKDIR); 8737 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) 8738 break; 8739 } 8740 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) 8741 panic("free_diradd: unfound ref"); 8742 } 8743 if (inodedep) 8744 free_inodedep(inodedep); 8745 /* 8746 * Free any journal segments waiting for the directory write. 8747 */ 8748 handle_jwork(&dap->da_jwork); 8749 WORKITEM_FREE(dap, D_DIRADD); 8750} 8751 8752/* 8753 * Directory entry removal dependencies. 8754 * 8755 * When removing a directory entry, the entry's inode pointer must be 8756 * zero'ed on disk before the corresponding inode's link count is decremented 8757 * (possibly freeing the inode for re-use). This dependency is handled by 8758 * updating the directory entry but delaying the inode count reduction until 8759 * after the directory block has been written to disk. After this point, the 8760 * inode count can be decremented whenever it is convenient. 8761 */ 8762 8763/* 8764 * This routine should be called immediately after removing 8765 * a directory entry. The inode's link count should not be 8766 * decremented by the calling procedure -- the soft updates 8767 * code will do this task when it is safe. 8768 */ 8769void 8770softdep_setup_remove(bp, dp, ip, isrmdir) 8771 struct buf *bp; /* buffer containing directory block */ 8772 struct inode *dp; /* inode for the directory being modified */ 8773 struct inode *ip; /* inode for directory entry being removed */ 8774 int isrmdir; /* indicates if doing RMDIR */ 8775{ 8776 struct dirrem *dirrem, *prevdirrem; 8777 struct inodedep *inodedep; 8778 int direct; 8779 8780 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 8781 ("softdep_setup_remove called on non-softdep filesystem")); 8782 /* 8783 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK. We want 8784 * newdirrem() to setup the full directory remove which requires 8785 * isrmdir > 1. 8786 */ 8787 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 8788 /* 8789 * Add the dirrem to the inodedep's pending remove list for quick 8790 * discovery later. 8791 */ 8792 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 8793 &inodedep) == 0) 8794 panic("softdep_setup_remove: Lost inodedep."); 8795 KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked")); 8796 dirrem->dm_state |= ONDEPLIST; 8797 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 8798 8799 /* 8800 * If the COMPLETE flag is clear, then there were no active 8801 * entries and we want to roll back to a zeroed entry until 8802 * the new inode is committed to disk. If the COMPLETE flag is 8803 * set then we have deleted an entry that never made it to 8804 * disk. If the entry we deleted resulted from a name change, 8805 * then the old name still resides on disk. We cannot delete 8806 * its inode (returned to us in prevdirrem) until the zeroed 8807 * directory entry gets to disk. The new inode has never been 8808 * referenced on the disk, so can be deleted immediately. 8809 */ 8810 if ((dirrem->dm_state & COMPLETE) == 0) { 8811 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem, 8812 dm_next); 8813 FREE_LOCK(ip->i_ump); 8814 } else { 8815 if (prevdirrem != NULL) 8816 LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, 8817 prevdirrem, dm_next); 8818 dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino; 8819 direct = LIST_EMPTY(&dirrem->dm_jremrefhd); 8820 FREE_LOCK(ip->i_ump); 8821 if (direct) 8822 handle_workitem_remove(dirrem, 0); 8823 } 8824} 8825 8826/* 8827 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the 8828 * pd_pendinghd list of a pagedep. 8829 */ 8830static struct diradd * 8831diradd_lookup(pagedep, offset) 8832 struct pagedep *pagedep; 8833 int offset; 8834{ 8835 struct diradd *dap; 8836 8837 LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist) 8838 if (dap->da_offset == offset) 8839 return (dap); 8840 LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) 8841 if (dap->da_offset == offset) 8842 return (dap); 8843 return (NULL); 8844} 8845 8846/* 8847 * Search for a .. diradd dependency in a directory that is being removed. 8848 * If the directory was renamed to a new parent we have a diradd rather 8849 * than a mkdir for the .. entry. We need to cancel it now before 8850 * it is found in truncate(). 8851 */ 8852static struct jremref * 8853cancel_diradd_dotdot(ip, dirrem, jremref) 8854 struct inode *ip; 8855 struct dirrem *dirrem; 8856 struct jremref *jremref; 8857{ 8858 struct pagedep *pagedep; 8859 struct diradd *dap; 8860 struct worklist *wk; 8861 8862 if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0, 8863 &pagedep) == 0) 8864 return (jremref); 8865 dap = diradd_lookup(pagedep, DOTDOT_OFFSET); 8866 if (dap == NULL) 8867 return (jremref); 8868 cancel_diradd(dap, dirrem, jremref, NULL, NULL); 8869 /* 8870 * Mark any journal work as belonging to the parent so it is freed 8871 * with the .. reference. 8872 */ 8873 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 8874 wk->wk_state |= MKDIR_PARENT; 8875 return (NULL); 8876} 8877 8878/* 8879 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to 8880 * replace it with a dirrem/diradd pair as a result of re-parenting a 8881 * directory. This ensures that we don't simultaneously have a mkdir and 8882 * a diradd for the same .. entry. 8883 */ 8884static struct jremref * 8885cancel_mkdir_dotdot(ip, dirrem, jremref) 8886 struct inode *ip; 8887 struct dirrem *dirrem; 8888 struct jremref *jremref; 8889{ 8890 struct inodedep *inodedep; 8891 struct jaddref *jaddref; 8892 struct ufsmount *ump; 8893 struct mkdir *mkdir; 8894 struct diradd *dap; 8895 8896 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 8897 &inodedep) == 0) 8898 return (jremref); 8899 dap = inodedep->id_mkdiradd; 8900 if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0) 8901 return (jremref); 8902 ump = VFSTOUFS(inodedep->id_list.wk_mp); 8903 for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir; 8904 mkdir = LIST_NEXT(mkdir, md_mkdirs)) 8905 if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT) 8906 break; 8907 if (mkdir == NULL) 8908 panic("cancel_mkdir_dotdot: Unable to find mkdir\n"); 8909 if ((jaddref = mkdir->md_jaddref) != NULL) { 8910 mkdir->md_jaddref = NULL; 8911 jaddref->ja_state &= ~MKDIR_PARENT; 8912 if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0, 8913 &inodedep) == 0) 8914 panic("cancel_mkdir_dotdot: Lost parent inodedep"); 8915 if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) { 8916 journal_jremref(dirrem, jremref, inodedep); 8917 jremref = NULL; 8918 } 8919 } 8920 if (mkdir->md_state & ONWORKLIST) 8921 WORKLIST_REMOVE(&mkdir->md_list); 8922 mkdir->md_state |= ALLCOMPLETE; 8923 complete_mkdir(mkdir); 8924 return (jremref); 8925} 8926 8927static void 8928journal_jremref(dirrem, jremref, inodedep) 8929 struct dirrem *dirrem; 8930 struct jremref *jremref; 8931 struct inodedep *inodedep; 8932{ 8933 8934 if (inodedep == NULL) 8935 if (inodedep_lookup(jremref->jr_list.wk_mp, 8936 jremref->jr_ref.if_ino, 0, &inodedep) == 0) 8937 panic("journal_jremref: Lost inodedep"); 8938 LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps); 8939 TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps); 8940 add_to_journal(&jremref->jr_list); 8941} 8942 8943static void 8944dirrem_journal(dirrem, jremref, dotremref, dotdotremref) 8945 struct dirrem *dirrem; 8946 struct jremref *jremref; 8947 struct jremref *dotremref; 8948 struct jremref *dotdotremref; 8949{ 8950 struct inodedep *inodedep; 8951 8952 8953 if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0, 8954 &inodedep) == 0) 8955 panic("dirrem_journal: Lost inodedep"); 8956 journal_jremref(dirrem, jremref, inodedep); 8957 if (dotremref) 8958 journal_jremref(dirrem, dotremref, inodedep); 8959 if (dotdotremref) 8960 journal_jremref(dirrem, dotdotremref, NULL); 8961} 8962 8963/* 8964 * Allocate a new dirrem if appropriate and return it along with 8965 * its associated pagedep. Called without a lock, returns with lock. 8966 */ 8967static struct dirrem * 8968newdirrem(bp, dp, ip, isrmdir, prevdirremp) 8969 struct buf *bp; /* buffer containing directory block */ 8970 struct inode *dp; /* inode for the directory being modified */ 8971 struct inode *ip; /* inode for directory entry being removed */ 8972 int isrmdir; /* indicates if doing RMDIR */ 8973 struct dirrem **prevdirremp; /* previously referenced inode, if any */ 8974{ 8975 int offset; 8976 ufs_lbn_t lbn; 8977 struct diradd *dap; 8978 struct dirrem *dirrem; 8979 struct pagedep *pagedep; 8980 struct jremref *jremref; 8981 struct jremref *dotremref; 8982 struct jremref *dotdotremref; 8983 struct vnode *dvp; 8984 8985 /* 8986 * Whiteouts have no deletion dependencies. 8987 */ 8988 if (ip == NULL) 8989 panic("newdirrem: whiteout"); 8990 dvp = ITOV(dp); 8991 /* 8992 * If we are over our limit, try to improve the situation. 8993 * Limiting the number of dirrem structures will also limit 8994 * the number of freefile and freeblks structures. 8995 */ 8996 ACQUIRE_LOCK(ip->i_ump); 8997 if (!IS_SNAPSHOT(ip) && dep_current[D_DIRREM] > max_softdeps / 2) 8998 (void) request_cleanup(ITOV(dp)->v_mount, FLUSH_BLOCKS); 8999 FREE_LOCK(ip->i_ump); 9000 dirrem = malloc(sizeof(struct dirrem), 9001 M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO); 9002 workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount); 9003 LIST_INIT(&dirrem->dm_jremrefhd); 9004 LIST_INIT(&dirrem->dm_jwork); 9005 dirrem->dm_state = isrmdir ? RMDIR : 0; 9006 dirrem->dm_oldinum = ip->i_number; 9007 *prevdirremp = NULL; 9008 /* 9009 * Allocate remove reference structures to track journal write 9010 * dependencies. We will always have one for the link and 9011 * when doing directories we will always have one more for dot. 9012 * When renaming a directory we skip the dotdot link change so 9013 * this is not needed. 9014 */ 9015 jremref = dotremref = dotdotremref = NULL; 9016 if (DOINGSUJ(dvp)) { 9017 if (isrmdir) { 9018 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9019 ip->i_effnlink + 2); 9020 dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET, 9021 ip->i_effnlink + 1); 9022 dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET, 9023 dp->i_effnlink + 1); 9024 dotdotremref->jr_state |= MKDIR_PARENT; 9025 } else 9026 jremref = newjremref(dirrem, dp, ip, dp->i_offset, 9027 ip->i_effnlink + 1); 9028 } 9029 ACQUIRE_LOCK(ip->i_ump); 9030 lbn = lblkno(dp->i_fs, dp->i_offset); 9031 offset = blkoff(dp->i_fs, dp->i_offset); 9032 pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC, 9033 &pagedep); 9034 dirrem->dm_pagedep = pagedep; 9035 dirrem->dm_offset = offset; 9036 /* 9037 * If we're renaming a .. link to a new directory, cancel any 9038 * existing MKDIR_PARENT mkdir. If it has already been canceled 9039 * the jremref is preserved for any potential diradd in this 9040 * location. This can not coincide with a rmdir. 9041 */ 9042 if (dp->i_offset == DOTDOT_OFFSET) { 9043 if (isrmdir) 9044 panic("newdirrem: .. directory change during remove?"); 9045 jremref = cancel_mkdir_dotdot(dp, dirrem, jremref); 9046 } 9047 /* 9048 * If we're removing a directory search for the .. dependency now and 9049 * cancel it. Any pending journal work will be added to the dirrem 9050 * to be completed when the workitem remove completes. 9051 */ 9052 if (isrmdir) 9053 dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref); 9054 /* 9055 * Check for a diradd dependency for the same directory entry. 9056 * If present, then both dependencies become obsolete and can 9057 * be de-allocated. 9058 */ 9059 dap = diradd_lookup(pagedep, offset); 9060 if (dap == NULL) { 9061 /* 9062 * Link the jremref structures into the dirrem so they are 9063 * written prior to the pagedep. 9064 */ 9065 if (jremref) 9066 dirrem_journal(dirrem, jremref, dotremref, 9067 dotdotremref); 9068 return (dirrem); 9069 } 9070 /* 9071 * Must be ATTACHED at this point. 9072 */ 9073 if ((dap->da_state & ATTACHED) == 0) 9074 panic("newdirrem: not ATTACHED"); 9075 if (dap->da_newinum != ip->i_number) 9076 panic("newdirrem: inum %ju should be %ju", 9077 (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum); 9078 /* 9079 * If we are deleting a changed name that never made it to disk, 9080 * then return the dirrem describing the previous inode (which 9081 * represents the inode currently referenced from this entry on disk). 9082 */ 9083 if ((dap->da_state & DIRCHG) != 0) { 9084 *prevdirremp = dap->da_previous; 9085 dap->da_state &= ~DIRCHG; 9086 dap->da_pagedep = pagedep; 9087 } 9088 /* 9089 * We are deleting an entry that never made it to disk. 9090 * Mark it COMPLETE so we can delete its inode immediately. 9091 */ 9092 dirrem->dm_state |= COMPLETE; 9093 cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref); 9094#ifdef SUJ_DEBUG 9095 if (isrmdir == 0) { 9096 struct worklist *wk; 9097 9098 LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list) 9099 if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT)) 9100 panic("bad wk %p (0x%X)\n", wk, wk->wk_state); 9101 } 9102#endif 9103 9104 return (dirrem); 9105} 9106 9107/* 9108 * Directory entry change dependencies. 9109 * 9110 * Changing an existing directory entry requires that an add operation 9111 * be completed first followed by a deletion. The semantics for the addition 9112 * are identical to the description of adding a new entry above except 9113 * that the rollback is to the old inode number rather than zero. Once 9114 * the addition dependency is completed, the removal is done as described 9115 * in the removal routine above. 9116 */ 9117 9118/* 9119 * This routine should be called immediately after changing 9120 * a directory entry. The inode's link count should not be 9121 * decremented by the calling procedure -- the soft updates 9122 * code will perform this task when it is safe. 9123 */ 9124void 9125softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir) 9126 struct buf *bp; /* buffer containing directory block */ 9127 struct inode *dp; /* inode for the directory being modified */ 9128 struct inode *ip; /* inode for directory entry being removed */ 9129 ino_t newinum; /* new inode number for changed entry */ 9130 int isrmdir; /* indicates if doing RMDIR */ 9131{ 9132 int offset; 9133 struct diradd *dap = NULL; 9134 struct dirrem *dirrem, *prevdirrem; 9135 struct pagedep *pagedep; 9136 struct inodedep *inodedep; 9137 struct jaddref *jaddref; 9138 struct mount *mp; 9139 9140 offset = blkoff(dp->i_fs, dp->i_offset); 9141 mp = UFSTOVFS(dp->i_ump); 9142 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 9143 ("softdep_setup_directory_change called on non-softdep filesystem")); 9144 9145 /* 9146 * Whiteouts do not need diradd dependencies. 9147 */ 9148 if (newinum != WINO) { 9149 dap = malloc(sizeof(struct diradd), 9150 M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO); 9151 workitem_alloc(&dap->da_list, D_DIRADD, mp); 9152 dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE; 9153 dap->da_offset = offset; 9154 dap->da_newinum = newinum; 9155 LIST_INIT(&dap->da_jwork); 9156 } 9157 9158 /* 9159 * Allocate a new dirrem and ACQUIRE_LOCK. 9160 */ 9161 dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem); 9162 pagedep = dirrem->dm_pagedep; 9163 /* 9164 * The possible values for isrmdir: 9165 * 0 - non-directory file rename 9166 * 1 - directory rename within same directory 9167 * inum - directory rename to new directory of given inode number 9168 * When renaming to a new directory, we are both deleting and 9169 * creating a new directory entry, so the link count on the new 9170 * directory should not change. Thus we do not need the followup 9171 * dirrem which is usually done in handle_workitem_remove. We set 9172 * the DIRCHG flag to tell handle_workitem_remove to skip the 9173 * followup dirrem. 9174 */ 9175 if (isrmdir > 1) 9176 dirrem->dm_state |= DIRCHG; 9177 9178 /* 9179 * Whiteouts have no additional dependencies, 9180 * so just put the dirrem on the correct list. 9181 */ 9182 if (newinum == WINO) { 9183 if ((dirrem->dm_state & COMPLETE) == 0) { 9184 LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem, 9185 dm_next); 9186 } else { 9187 dirrem->dm_dirinum = pagedep->pd_ino; 9188 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9189 add_to_worklist(&dirrem->dm_list, 0); 9190 } 9191 FREE_LOCK(dp->i_ump); 9192 return; 9193 } 9194 /* 9195 * Add the dirrem to the inodedep's pending remove list for quick 9196 * discovery later. A valid nlinkdelta ensures that this lookup 9197 * will not fail. 9198 */ 9199 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 9200 panic("softdep_setup_directory_change: Lost inodedep."); 9201 dirrem->dm_state |= ONDEPLIST; 9202 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9203 9204 /* 9205 * If the COMPLETE flag is clear, then there were no active 9206 * entries and we want to roll back to the previous inode until 9207 * the new inode is committed to disk. If the COMPLETE flag is 9208 * set, then we have deleted an entry that never made it to disk. 9209 * If the entry we deleted resulted from a name change, then the old 9210 * inode reference still resides on disk. Any rollback that we do 9211 * needs to be to that old inode (returned to us in prevdirrem). If 9212 * the entry we deleted resulted from a create, then there is 9213 * no entry on the disk, so we want to roll back to zero rather 9214 * than the uncommitted inode. In either of the COMPLETE cases we 9215 * want to immediately free the unwritten and unreferenced inode. 9216 */ 9217 if ((dirrem->dm_state & COMPLETE) == 0) { 9218 dap->da_previous = dirrem; 9219 } else { 9220 if (prevdirrem != NULL) { 9221 dap->da_previous = prevdirrem; 9222 } else { 9223 dap->da_state &= ~DIRCHG; 9224 dap->da_pagedep = pagedep; 9225 } 9226 dirrem->dm_dirinum = pagedep->pd_ino; 9227 if (LIST_EMPTY(&dirrem->dm_jremrefhd)) 9228 add_to_worklist(&dirrem->dm_list, 0); 9229 } 9230 /* 9231 * Lookup the jaddref for this journal entry. We must finish 9232 * initializing it and make the diradd write dependent on it. 9233 * If we're not journaling, put it on the id_bufwait list if the 9234 * inode is not yet written. If it is written, do the post-inode 9235 * write processing to put it on the id_pendinghd list. 9236 */ 9237 inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep); 9238 if (MOUNTEDSUJ(mp)) { 9239 jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst, 9240 inoreflst); 9241 KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number, 9242 ("softdep_setup_directory_change: bad jaddref %p", 9243 jaddref)); 9244 jaddref->ja_diroff = dp->i_offset; 9245 jaddref->ja_diradd = dap; 9246 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9247 dap, da_pdlist); 9248 add_to_journal(&jaddref->ja_list); 9249 } else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) { 9250 dap->da_state |= COMPLETE; 9251 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist); 9252 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 9253 } else { 9254 LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], 9255 dap, da_pdlist); 9256 WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list); 9257 } 9258 /* 9259 * If we're making a new name for a directory that has not been 9260 * committed when need to move the dot and dotdot references to 9261 * this new name. 9262 */ 9263 if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET) 9264 merge_diradd(inodedep, dap); 9265 FREE_LOCK(dp->i_ump); 9266} 9267 9268/* 9269 * Called whenever the link count on an inode is changed. 9270 * It creates an inode dependency so that the new reference(s) 9271 * to the inode cannot be committed to disk until the updated 9272 * inode has been written. 9273 */ 9274void 9275softdep_change_linkcnt(ip) 9276 struct inode *ip; /* the inode with the increased link count */ 9277{ 9278 struct inodedep *inodedep; 9279 int dflags; 9280 9281 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 9282 ("softdep_change_linkcnt called on non-softdep filesystem")); 9283 ACQUIRE_LOCK(ip->i_ump); 9284 dflags = DEPALLOC; 9285 if (IS_SNAPSHOT(ip)) 9286 dflags |= NODELAY; 9287 inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep); 9288 if (ip->i_nlink < ip->i_effnlink) 9289 panic("softdep_change_linkcnt: bad delta"); 9290 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9291 FREE_LOCK(ip->i_ump); 9292} 9293 9294/* 9295 * Attach a sbdep dependency to the superblock buf so that we can keep 9296 * track of the head of the linked list of referenced but unlinked inodes. 9297 */ 9298void 9299softdep_setup_sbupdate(ump, fs, bp) 9300 struct ufsmount *ump; 9301 struct fs *fs; 9302 struct buf *bp; 9303{ 9304 struct sbdep *sbdep; 9305 struct worklist *wk; 9306 9307 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 9308 ("softdep_setup_sbupdate called on non-softdep filesystem")); 9309 LIST_FOREACH(wk, &bp->b_dep, wk_list) 9310 if (wk->wk_type == D_SBDEP) 9311 break; 9312 if (wk != NULL) 9313 return; 9314 sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS); 9315 workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump)); 9316 sbdep->sb_fs = fs; 9317 sbdep->sb_ump = ump; 9318 ACQUIRE_LOCK(ump); 9319 WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list); 9320 FREE_LOCK(ump); 9321} 9322 9323/* 9324 * Return the first unlinked inodedep which is ready to be the head of the 9325 * list. The inodedep and all those after it must have valid next pointers. 9326 */ 9327static struct inodedep * 9328first_unlinked_inodedep(ump) 9329 struct ufsmount *ump; 9330{ 9331 struct inodedep *inodedep; 9332 struct inodedep *idp; 9333 9334 LOCK_OWNED(ump); 9335 for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst); 9336 inodedep; inodedep = idp) { 9337 if ((inodedep->id_state & UNLINKNEXT) == 0) 9338 return (NULL); 9339 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9340 if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0) 9341 break; 9342 if ((inodedep->id_state & UNLINKPREV) == 0) 9343 break; 9344 } 9345 return (inodedep); 9346} 9347 9348/* 9349 * Set the sujfree unlinked head pointer prior to writing a superblock. 9350 */ 9351static void 9352initiate_write_sbdep(sbdep) 9353 struct sbdep *sbdep; 9354{ 9355 struct inodedep *inodedep; 9356 struct fs *bpfs; 9357 struct fs *fs; 9358 9359 bpfs = sbdep->sb_fs; 9360 fs = sbdep->sb_ump->um_fs; 9361 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9362 if (inodedep) { 9363 fs->fs_sujfree = inodedep->id_ino; 9364 inodedep->id_state |= UNLINKPREV; 9365 } else 9366 fs->fs_sujfree = 0; 9367 bpfs->fs_sujfree = fs->fs_sujfree; 9368} 9369 9370/* 9371 * After a superblock is written determine whether it must be written again 9372 * due to a changing unlinked list head. 9373 */ 9374static int 9375handle_written_sbdep(sbdep, bp) 9376 struct sbdep *sbdep; 9377 struct buf *bp; 9378{ 9379 struct inodedep *inodedep; 9380 struct mount *mp; 9381 struct fs *fs; 9382 9383 LOCK_OWNED(sbdep->sb_ump); 9384 fs = sbdep->sb_fs; 9385 mp = UFSTOVFS(sbdep->sb_ump); 9386 /* 9387 * If the superblock doesn't match the in-memory list start over. 9388 */ 9389 inodedep = first_unlinked_inodedep(sbdep->sb_ump); 9390 if ((inodedep && fs->fs_sujfree != inodedep->id_ino) || 9391 (inodedep == NULL && fs->fs_sujfree != 0)) { 9392 bdirty(bp); 9393 return (1); 9394 } 9395 WORKITEM_FREE(sbdep, D_SBDEP); 9396 if (fs->fs_sujfree == 0) 9397 return (0); 9398 /* 9399 * Now that we have a record of this inode in stable store allow it 9400 * to be written to free up pending work. Inodes may see a lot of 9401 * write activity after they are unlinked which we must not hold up. 9402 */ 9403 for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) { 9404 if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS) 9405 panic("handle_written_sbdep: Bad inodedep %p (0x%X)", 9406 inodedep, inodedep->id_state); 9407 if (inodedep->id_state & UNLINKONLIST) 9408 break; 9409 inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST; 9410 } 9411 9412 return (0); 9413} 9414 9415/* 9416 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list. 9417 */ 9418static void 9419unlinked_inodedep(mp, inodedep) 9420 struct mount *mp; 9421 struct inodedep *inodedep; 9422{ 9423 struct ufsmount *ump; 9424 9425 ump = VFSTOUFS(mp); 9426 LOCK_OWNED(ump); 9427 if (MOUNTEDSUJ(mp) == 0) 9428 return; 9429 ump->um_fs->fs_fmod = 1; 9430 if (inodedep->id_state & UNLINKED) 9431 panic("unlinked_inodedep: %p already unlinked\n", inodedep); 9432 inodedep->id_state |= UNLINKED; 9433 TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked); 9434} 9435 9436/* 9437 * Remove an inodedep from the unlinked inodedep list. This may require 9438 * disk writes if the inode has made it that far. 9439 */ 9440static void 9441clear_unlinked_inodedep(inodedep) 9442 struct inodedep *inodedep; 9443{ 9444 struct ufsmount *ump; 9445 struct inodedep *idp; 9446 struct inodedep *idn; 9447 struct fs *fs; 9448 struct buf *bp; 9449 ino_t ino; 9450 ino_t nino; 9451 ino_t pino; 9452 int error; 9453 9454 ump = VFSTOUFS(inodedep->id_list.wk_mp); 9455 fs = ump->um_fs; 9456 ino = inodedep->id_ino; 9457 error = 0; 9458 for (;;) { 9459 LOCK_OWNED(ump); 9460 KASSERT((inodedep->id_state & UNLINKED) != 0, 9461 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9462 inodedep)); 9463 /* 9464 * If nothing has yet been written simply remove us from 9465 * the in memory list and return. This is the most common 9466 * case where handle_workitem_remove() loses the final 9467 * reference. 9468 */ 9469 if ((inodedep->id_state & UNLINKLINKS) == 0) 9470 break; 9471 /* 9472 * If we have a NEXT pointer and no PREV pointer we can simply 9473 * clear NEXT's PREV and remove ourselves from the list. Be 9474 * careful not to clear PREV if the superblock points at 9475 * next as well. 9476 */ 9477 idn = TAILQ_NEXT(inodedep, id_unlinked); 9478 if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) { 9479 if (idn && fs->fs_sujfree != idn->id_ino) 9480 idn->id_state &= ~UNLINKPREV; 9481 break; 9482 } 9483 /* 9484 * Here we have an inodedep which is actually linked into 9485 * the list. We must remove it by forcing a write to the 9486 * link before us, whether it be the superblock or an inode. 9487 * Unfortunately the list may change while we're waiting 9488 * on the buf lock for either resource so we must loop until 9489 * we lock the right one. If both the superblock and an 9490 * inode point to this inode we must clear the inode first 9491 * followed by the superblock. 9492 */ 9493 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9494 pino = 0; 9495 if (idp && (idp->id_state & UNLINKNEXT)) 9496 pino = idp->id_ino; 9497 FREE_LOCK(ump); 9498 if (pino == 0) { 9499 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9500 (int)fs->fs_sbsize, 0, 0, 0); 9501 } else { 9502 error = bread(ump->um_devvp, 9503 fsbtodb(fs, ino_to_fsba(fs, pino)), 9504 (int)fs->fs_bsize, NOCRED, &bp); 9505 if (error) 9506 brelse(bp); 9507 } 9508 ACQUIRE_LOCK(ump); 9509 if (error) 9510 break; 9511 /* If the list has changed restart the loop. */ 9512 idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked); 9513 nino = 0; 9514 if (idp && (idp->id_state & UNLINKNEXT)) 9515 nino = idp->id_ino; 9516 if (nino != pino || 9517 (inodedep->id_state & UNLINKPREV) != UNLINKPREV) { 9518 FREE_LOCK(ump); 9519 brelse(bp); 9520 ACQUIRE_LOCK(ump); 9521 continue; 9522 } 9523 nino = 0; 9524 idn = TAILQ_NEXT(inodedep, id_unlinked); 9525 if (idn) 9526 nino = idn->id_ino; 9527 /* 9528 * Remove us from the in memory list. After this we cannot 9529 * access the inodedep. 9530 */ 9531 KASSERT((inodedep->id_state & UNLINKED) != 0, 9532 ("clear_unlinked_inodedep: inodedep %p not unlinked", 9533 inodedep)); 9534 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9535 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9536 FREE_LOCK(ump); 9537 /* 9538 * The predecessor's next pointer is manually updated here 9539 * so that the NEXT flag is never cleared for an element 9540 * that is in the list. 9541 */ 9542 if (pino == 0) { 9543 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9544 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9545 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9546 bp); 9547 } else if (fs->fs_magic == FS_UFS1_MAGIC) 9548 ((struct ufs1_dinode *)bp->b_data + 9549 ino_to_fsbo(fs, pino))->di_freelink = nino; 9550 else 9551 ((struct ufs2_dinode *)bp->b_data + 9552 ino_to_fsbo(fs, pino))->di_freelink = nino; 9553 /* 9554 * If the bwrite fails we have no recourse to recover. The 9555 * filesystem is corrupted already. 9556 */ 9557 bwrite(bp); 9558 ACQUIRE_LOCK(ump); 9559 /* 9560 * If the superblock pointer still needs to be cleared force 9561 * a write here. 9562 */ 9563 if (fs->fs_sujfree == ino) { 9564 FREE_LOCK(ump); 9565 bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc), 9566 (int)fs->fs_sbsize, 0, 0, 0); 9567 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize); 9568 ffs_oldfscompat_write((struct fs *)bp->b_data, ump); 9569 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, 9570 bp); 9571 bwrite(bp); 9572 ACQUIRE_LOCK(ump); 9573 } 9574 9575 if (fs->fs_sujfree != ino) 9576 return; 9577 panic("clear_unlinked_inodedep: Failed to clear free head"); 9578 } 9579 if (inodedep->id_ino == fs->fs_sujfree) 9580 panic("clear_unlinked_inodedep: Freeing head of free list"); 9581 inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST); 9582 TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked); 9583 return; 9584} 9585 9586/* 9587 * This workitem decrements the inode's link count. 9588 * If the link count reaches zero, the file is removed. 9589 */ 9590static int 9591handle_workitem_remove(dirrem, flags) 9592 struct dirrem *dirrem; 9593 int flags; 9594{ 9595 struct inodedep *inodedep; 9596 struct workhead dotdotwk; 9597 struct worklist *wk; 9598 struct ufsmount *ump; 9599 struct mount *mp; 9600 struct vnode *vp; 9601 struct inode *ip; 9602 ino_t oldinum; 9603 9604 if (dirrem->dm_state & ONWORKLIST) 9605 panic("handle_workitem_remove: dirrem %p still on worklist", 9606 dirrem); 9607 oldinum = dirrem->dm_oldinum; 9608 mp = dirrem->dm_list.wk_mp; 9609 ump = VFSTOUFS(mp); 9610 flags |= LK_EXCLUSIVE; 9611 if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0) 9612 return (EBUSY); 9613 ip = VTOI(vp); 9614 ACQUIRE_LOCK(ump); 9615 if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0) 9616 panic("handle_workitem_remove: lost inodedep"); 9617 if (dirrem->dm_state & ONDEPLIST) 9618 LIST_REMOVE(dirrem, dm_inonext); 9619 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 9620 ("handle_workitem_remove: Journal entries not written.")); 9621 9622 /* 9623 * Move all dependencies waiting on the remove to complete 9624 * from the dirrem to the inode inowait list to be completed 9625 * after the inode has been updated and written to disk. Any 9626 * marked MKDIR_PARENT are saved to be completed when the .. ref 9627 * is removed. 9628 */ 9629 LIST_INIT(&dotdotwk); 9630 while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) { 9631 WORKLIST_REMOVE(wk); 9632 if (wk->wk_state & MKDIR_PARENT) { 9633 wk->wk_state &= ~MKDIR_PARENT; 9634 WORKLIST_INSERT(&dotdotwk, wk); 9635 continue; 9636 } 9637 WORKLIST_INSERT(&inodedep->id_inowait, wk); 9638 } 9639 LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list); 9640 /* 9641 * Normal file deletion. 9642 */ 9643 if ((dirrem->dm_state & RMDIR) == 0) { 9644 ip->i_nlink--; 9645 DIP_SET(ip, i_nlink, ip->i_nlink); 9646 ip->i_flag |= IN_CHANGE; 9647 if (ip->i_nlink < ip->i_effnlink) 9648 panic("handle_workitem_remove: bad file delta"); 9649 if (ip->i_nlink == 0) 9650 unlinked_inodedep(mp, inodedep); 9651 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9652 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9653 ("handle_workitem_remove: worklist not empty. %s", 9654 TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type))); 9655 WORKITEM_FREE(dirrem, D_DIRREM); 9656 FREE_LOCK(ump); 9657 goto out; 9658 } 9659 /* 9660 * Directory deletion. Decrement reference count for both the 9661 * just deleted parent directory entry and the reference for ".". 9662 * Arrange to have the reference count on the parent decremented 9663 * to account for the loss of "..". 9664 */ 9665 ip->i_nlink -= 2; 9666 DIP_SET(ip, i_nlink, ip->i_nlink); 9667 ip->i_flag |= IN_CHANGE; 9668 if (ip->i_nlink < ip->i_effnlink) 9669 panic("handle_workitem_remove: bad dir delta"); 9670 if (ip->i_nlink == 0) 9671 unlinked_inodedep(mp, inodedep); 9672 inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink; 9673 /* 9674 * Rename a directory to a new parent. Since, we are both deleting 9675 * and creating a new directory entry, the link count on the new 9676 * directory should not change. Thus we skip the followup dirrem. 9677 */ 9678 if (dirrem->dm_state & DIRCHG) { 9679 KASSERT(LIST_EMPTY(&dirrem->dm_jwork), 9680 ("handle_workitem_remove: DIRCHG and worklist not empty.")); 9681 WORKITEM_FREE(dirrem, D_DIRREM); 9682 FREE_LOCK(ump); 9683 goto out; 9684 } 9685 dirrem->dm_state = ONDEPLIST; 9686 dirrem->dm_oldinum = dirrem->dm_dirinum; 9687 /* 9688 * Place the dirrem on the parent's diremhd list. 9689 */ 9690 if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0) 9691 panic("handle_workitem_remove: lost dir inodedep"); 9692 LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext); 9693 /* 9694 * If the allocated inode has never been written to disk, then 9695 * the on-disk inode is zero'ed and we can remove the file 9696 * immediately. When journaling if the inode has been marked 9697 * unlinked and not DEPCOMPLETE we know it can never be written. 9698 */ 9699 inodedep_lookup(mp, oldinum, 0, &inodedep); 9700 if (inodedep == NULL || 9701 (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED || 9702 check_inode_unwritten(inodedep)) { 9703 FREE_LOCK(ump); 9704 vput(vp); 9705 return handle_workitem_remove(dirrem, flags); 9706 } 9707 WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list); 9708 FREE_LOCK(ump); 9709 ip->i_flag |= IN_CHANGE; 9710out: 9711 ffs_update(vp, 0); 9712 vput(vp); 9713 return (0); 9714} 9715 9716/* 9717 * Inode de-allocation dependencies. 9718 * 9719 * When an inode's link count is reduced to zero, it can be de-allocated. We 9720 * found it convenient to postpone de-allocation until after the inode is 9721 * written to disk with its new link count (zero). At this point, all of the 9722 * on-disk inode's block pointers are nullified and, with careful dependency 9723 * list ordering, all dependencies related to the inode will be satisfied and 9724 * the corresponding dependency structures de-allocated. So, if/when the 9725 * inode is reused, there will be no mixing of old dependencies with new 9726 * ones. This artificial dependency is set up by the block de-allocation 9727 * procedure above (softdep_setup_freeblocks) and completed by the 9728 * following procedure. 9729 */ 9730static void 9731handle_workitem_freefile(freefile) 9732 struct freefile *freefile; 9733{ 9734 struct workhead wkhd; 9735 struct fs *fs; 9736 struct inodedep *idp; 9737 struct ufsmount *ump; 9738 int error; 9739 9740 ump = VFSTOUFS(freefile->fx_list.wk_mp); 9741 fs = ump->um_fs; 9742#ifdef DEBUG 9743 ACQUIRE_LOCK(ump); 9744 error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp); 9745 FREE_LOCK(ump); 9746 if (error) 9747 panic("handle_workitem_freefile: inodedep %p survived", idp); 9748#endif 9749 UFS_LOCK(ump); 9750 fs->fs_pendinginodes -= 1; 9751 UFS_UNLOCK(ump); 9752 LIST_INIT(&wkhd); 9753 LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list); 9754 if ((error = ffs_freefile(ump, fs, freefile->fx_devvp, 9755 freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0) 9756 softdep_error("handle_workitem_freefile", error); 9757 ACQUIRE_LOCK(ump); 9758 WORKITEM_FREE(freefile, D_FREEFILE); 9759 FREE_LOCK(ump); 9760} 9761 9762 9763/* 9764 * Helper function which unlinks marker element from work list and returns 9765 * the next element on the list. 9766 */ 9767static __inline struct worklist * 9768markernext(struct worklist *marker) 9769{ 9770 struct worklist *next; 9771 9772 next = LIST_NEXT(marker, wk_list); 9773 LIST_REMOVE(marker, wk_list); 9774 return next; 9775} 9776 9777/* 9778 * Disk writes. 9779 * 9780 * The dependency structures constructed above are most actively used when file 9781 * system blocks are written to disk. No constraints are placed on when a 9782 * block can be written, but unsatisfied update dependencies are made safe by 9783 * modifying (or replacing) the source memory for the duration of the disk 9784 * write. When the disk write completes, the memory block is again brought 9785 * up-to-date. 9786 * 9787 * In-core inode structure reclamation. 9788 * 9789 * Because there are a finite number of "in-core" inode structures, they are 9790 * reused regularly. By transferring all inode-related dependencies to the 9791 * in-memory inode block and indexing them separately (via "inodedep"s), we 9792 * can allow "in-core" inode structures to be reused at any time and avoid 9793 * any increase in contention. 9794 * 9795 * Called just before entering the device driver to initiate a new disk I/O. 9796 * The buffer must be locked, thus, no I/O completion operations can occur 9797 * while we are manipulating its associated dependencies. 9798 */ 9799static void 9800softdep_disk_io_initiation(bp) 9801 struct buf *bp; /* structure describing disk write to occur */ 9802{ 9803 struct worklist *wk; 9804 struct worklist marker; 9805 struct inodedep *inodedep; 9806 struct freeblks *freeblks; 9807 struct jblkdep *jblkdep; 9808 struct newblk *newblk; 9809 struct ufsmount *ump; 9810 9811 /* 9812 * We only care about write operations. There should never 9813 * be dependencies for reads. 9814 */ 9815 if (bp->b_iocmd != BIO_WRITE) 9816 panic("softdep_disk_io_initiation: not write"); 9817 9818 if (bp->b_vflags & BV_BKGRDINPROG) 9819 panic("softdep_disk_io_initiation: Writing buffer with " 9820 "background write in progress: %p", bp); 9821 9822 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 9823 return; 9824 ump = VFSTOUFS(wk->wk_mp); 9825 9826 marker.wk_type = D_LAST + 1; /* Not a normal workitem */ 9827 PHOLD(curproc); /* Don't swap out kernel stack */ 9828 ACQUIRE_LOCK(ump); 9829 /* 9830 * Do any necessary pre-I/O processing. 9831 */ 9832 for (wk = LIST_FIRST(&bp->b_dep); wk != NULL; 9833 wk = markernext(&marker)) { 9834 LIST_INSERT_AFTER(wk, &marker, wk_list); 9835 switch (wk->wk_type) { 9836 9837 case D_PAGEDEP: 9838 initiate_write_filepage(WK_PAGEDEP(wk), bp); 9839 continue; 9840 9841 case D_INODEDEP: 9842 inodedep = WK_INODEDEP(wk); 9843 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) 9844 initiate_write_inodeblock_ufs1(inodedep, bp); 9845 else 9846 initiate_write_inodeblock_ufs2(inodedep, bp); 9847 continue; 9848 9849 case D_INDIRDEP: 9850 initiate_write_indirdep(WK_INDIRDEP(wk), bp); 9851 continue; 9852 9853 case D_BMSAFEMAP: 9854 initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp); 9855 continue; 9856 9857 case D_JSEG: 9858 WK_JSEG(wk)->js_buf = NULL; 9859 continue; 9860 9861 case D_FREEBLKS: 9862 freeblks = WK_FREEBLKS(wk); 9863 jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd); 9864 /* 9865 * We have to wait for the freeblks to be journaled 9866 * before we can write an inodeblock with updated 9867 * pointers. Be careful to arrange the marker so 9868 * we revisit the freeblks if it's not removed by 9869 * the first jwait(). 9870 */ 9871 if (jblkdep != NULL) { 9872 LIST_REMOVE(&marker, wk_list); 9873 LIST_INSERT_BEFORE(wk, &marker, wk_list); 9874 jwait(&jblkdep->jb_list, MNT_WAIT); 9875 } 9876 continue; 9877 case D_ALLOCDIRECT: 9878 case D_ALLOCINDIR: 9879 /* 9880 * We have to wait for the jnewblk to be journaled 9881 * before we can write to a block if the contents 9882 * may be confused with an earlier file's indirect 9883 * at recovery time. Handle the marker as described 9884 * above. 9885 */ 9886 newblk = WK_NEWBLK(wk); 9887 if (newblk->nb_jnewblk != NULL && 9888 indirblk_lookup(newblk->nb_list.wk_mp, 9889 newblk->nb_newblkno)) { 9890 LIST_REMOVE(&marker, wk_list); 9891 LIST_INSERT_BEFORE(wk, &marker, wk_list); 9892 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 9893 } 9894 continue; 9895 9896 case D_SBDEP: 9897 initiate_write_sbdep(WK_SBDEP(wk)); 9898 continue; 9899 9900 case D_MKDIR: 9901 case D_FREEWORK: 9902 case D_FREEDEP: 9903 case D_JSEGDEP: 9904 continue; 9905 9906 default: 9907 panic("handle_disk_io_initiation: Unexpected type %s", 9908 TYPENAME(wk->wk_type)); 9909 /* NOTREACHED */ 9910 } 9911 } 9912 FREE_LOCK(ump); 9913 PRELE(curproc); /* Allow swapout of kernel stack */ 9914} 9915 9916/* 9917 * Called from within the procedure above to deal with unsatisfied 9918 * allocation dependencies in a directory. The buffer must be locked, 9919 * thus, no I/O completion operations can occur while we are 9920 * manipulating its associated dependencies. 9921 */ 9922static void 9923initiate_write_filepage(pagedep, bp) 9924 struct pagedep *pagedep; 9925 struct buf *bp; 9926{ 9927 struct jremref *jremref; 9928 struct jmvref *jmvref; 9929 struct dirrem *dirrem; 9930 struct diradd *dap; 9931 struct direct *ep; 9932 int i; 9933 9934 if (pagedep->pd_state & IOSTARTED) { 9935 /* 9936 * This can only happen if there is a driver that does not 9937 * understand chaining. Here biodone will reissue the call 9938 * to strategy for the incomplete buffers. 9939 */ 9940 printf("initiate_write_filepage: already started\n"); 9941 return; 9942 } 9943 pagedep->pd_state |= IOSTARTED; 9944 /* 9945 * Wait for all journal remove dependencies to hit the disk. 9946 * We can not allow any potentially conflicting directory adds 9947 * to be visible before removes and rollback is too difficult. 9948 * The soft updates lock may be dropped and re-acquired, however 9949 * we hold the buf locked so the dependency can not go away. 9950 */ 9951 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) 9952 while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) 9953 jwait(&jremref->jr_list, MNT_WAIT); 9954 while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) 9955 jwait(&jmvref->jm_list, MNT_WAIT); 9956 for (i = 0; i < DAHASHSZ; i++) { 9957 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 9958 ep = (struct direct *) 9959 ((char *)bp->b_data + dap->da_offset); 9960 if (ep->d_ino != dap->da_newinum) 9961 panic("%s: dir inum %ju != new %ju", 9962 "initiate_write_filepage", 9963 (uintmax_t)ep->d_ino, 9964 (uintmax_t)dap->da_newinum); 9965 if (dap->da_state & DIRCHG) 9966 ep->d_ino = dap->da_previous->dm_oldinum; 9967 else 9968 ep->d_ino = 0; 9969 dap->da_state &= ~ATTACHED; 9970 dap->da_state |= UNDONE; 9971 } 9972 } 9973} 9974 9975/* 9976 * Version of initiate_write_inodeblock that handles UFS1 dinodes. 9977 * Note that any bug fixes made to this routine must be done in the 9978 * version found below. 9979 * 9980 * Called from within the procedure above to deal with unsatisfied 9981 * allocation dependencies in an inodeblock. The buffer must be 9982 * locked, thus, no I/O completion operations can occur while we 9983 * are manipulating its associated dependencies. 9984 */ 9985static void 9986initiate_write_inodeblock_ufs1(inodedep, bp) 9987 struct inodedep *inodedep; 9988 struct buf *bp; /* The inode block */ 9989{ 9990 struct allocdirect *adp, *lastadp; 9991 struct ufs1_dinode *dp; 9992 struct ufs1_dinode *sip; 9993 struct inoref *inoref; 9994 struct ufsmount *ump; 9995 struct fs *fs; 9996 ufs_lbn_t i; 9997#ifdef INVARIANTS 9998 ufs_lbn_t prevlbn = 0; 9999#endif 10000 int deplist; 10001 10002 if (inodedep->id_state & IOSTARTED) 10003 panic("initiate_write_inodeblock_ufs1: already started"); 10004 inodedep->id_state |= IOSTARTED; 10005 fs = inodedep->id_fs; 10006 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10007 LOCK_OWNED(ump); 10008 dp = (struct ufs1_dinode *)bp->b_data + 10009 ino_to_fsbo(fs, inodedep->id_ino); 10010 10011 /* 10012 * If we're on the unlinked list but have not yet written our 10013 * next pointer initialize it here. 10014 */ 10015 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10016 struct inodedep *inon; 10017 10018 inon = TAILQ_NEXT(inodedep, id_unlinked); 10019 dp->di_freelink = inon ? inon->id_ino : 0; 10020 } 10021 /* 10022 * If the bitmap is not yet written, then the allocated 10023 * inode cannot be written to disk. 10024 */ 10025 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10026 if (inodedep->id_savedino1 != NULL) 10027 panic("initiate_write_inodeblock_ufs1: I/O underway"); 10028 FREE_LOCK(ump); 10029 sip = malloc(sizeof(struct ufs1_dinode), 10030 M_SAVEDINO, M_SOFTDEP_FLAGS); 10031 ACQUIRE_LOCK(ump); 10032 inodedep->id_savedino1 = sip; 10033 *inodedep->id_savedino1 = *dp; 10034 bzero((caddr_t)dp, sizeof(struct ufs1_dinode)); 10035 dp->di_gen = inodedep->id_savedino1->di_gen; 10036 dp->di_freelink = inodedep->id_savedino1->di_freelink; 10037 return; 10038 } 10039 /* 10040 * If no dependencies, then there is nothing to roll back. 10041 */ 10042 inodedep->id_savedsize = dp->di_size; 10043 inodedep->id_savedextsize = 0; 10044 inodedep->id_savednlink = dp->di_nlink; 10045 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10046 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10047 return; 10048 /* 10049 * Revert the link count to that of the first unwritten journal entry. 10050 */ 10051 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10052 if (inoref) 10053 dp->di_nlink = inoref->if_nlink; 10054 /* 10055 * Set the dependencies to busy. 10056 */ 10057 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10058 adp = TAILQ_NEXT(adp, ad_next)) { 10059#ifdef INVARIANTS 10060 if (deplist != 0 && prevlbn >= adp->ad_offset) 10061 panic("softdep_write_inodeblock: lbn order"); 10062 prevlbn = adp->ad_offset; 10063 if (adp->ad_offset < NDADDR && 10064 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10065 panic("%s: direct pointer #%jd mismatch %d != %jd", 10066 "softdep_write_inodeblock", 10067 (intmax_t)adp->ad_offset, 10068 dp->di_db[adp->ad_offset], 10069 (intmax_t)adp->ad_newblkno); 10070 if (adp->ad_offset >= NDADDR && 10071 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 10072 panic("%s: indirect pointer #%jd mismatch %d != %jd", 10073 "softdep_write_inodeblock", 10074 (intmax_t)adp->ad_offset - NDADDR, 10075 dp->di_ib[adp->ad_offset - NDADDR], 10076 (intmax_t)adp->ad_newblkno); 10077 deplist |= 1 << adp->ad_offset; 10078 if ((adp->ad_state & ATTACHED) == 0) 10079 panic("softdep_write_inodeblock: Unknown state 0x%x", 10080 adp->ad_state); 10081#endif /* INVARIANTS */ 10082 adp->ad_state &= ~ATTACHED; 10083 adp->ad_state |= UNDONE; 10084 } 10085 /* 10086 * The on-disk inode cannot claim to be any larger than the last 10087 * fragment that has been written. Otherwise, the on-disk inode 10088 * might have fragments that were not the last block in the file 10089 * which would corrupt the filesystem. 10090 */ 10091 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10092 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10093 if (adp->ad_offset >= NDADDR) 10094 break; 10095 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10096 /* keep going until hitting a rollback to a frag */ 10097 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10098 continue; 10099 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10100 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 10101#ifdef INVARIANTS 10102 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10103 panic("softdep_write_inodeblock: lost dep1"); 10104#endif /* INVARIANTS */ 10105 dp->di_db[i] = 0; 10106 } 10107 for (i = 0; i < NIADDR; i++) { 10108#ifdef INVARIANTS 10109 if (dp->di_ib[i] != 0 && 10110 (deplist & ((1 << NDADDR) << i)) == 0) 10111 panic("softdep_write_inodeblock: lost dep2"); 10112#endif /* INVARIANTS */ 10113 dp->di_ib[i] = 0; 10114 } 10115 return; 10116 } 10117 /* 10118 * If we have zero'ed out the last allocated block of the file, 10119 * roll back the size to the last currently allocated block. 10120 * We know that this last allocated block is a full-sized as 10121 * we already checked for fragments in the loop above. 10122 */ 10123 if (lastadp != NULL && 10124 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10125 for (i = lastadp->ad_offset; i >= 0; i--) 10126 if (dp->di_db[i] != 0) 10127 break; 10128 dp->di_size = (i + 1) * fs->fs_bsize; 10129 } 10130 /* 10131 * The only dependencies are for indirect blocks. 10132 * 10133 * The file size for indirect block additions is not guaranteed. 10134 * Such a guarantee would be non-trivial to achieve. The conventional 10135 * synchronous write implementation also does not make this guarantee. 10136 * Fsck should catch and fix discrepancies. Arguably, the file size 10137 * can be over-estimated without destroying integrity when the file 10138 * moves into the indirect blocks (i.e., is large). If we want to 10139 * postpone fsck, we are stuck with this argument. 10140 */ 10141 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10142 dp->di_ib[adp->ad_offset - NDADDR] = 0; 10143} 10144 10145/* 10146 * Version of initiate_write_inodeblock that handles UFS2 dinodes. 10147 * Note that any bug fixes made to this routine must be done in the 10148 * version found above. 10149 * 10150 * Called from within the procedure above to deal with unsatisfied 10151 * allocation dependencies in an inodeblock. The buffer must be 10152 * locked, thus, no I/O completion operations can occur while we 10153 * are manipulating its associated dependencies. 10154 */ 10155static void 10156initiate_write_inodeblock_ufs2(inodedep, bp) 10157 struct inodedep *inodedep; 10158 struct buf *bp; /* The inode block */ 10159{ 10160 struct allocdirect *adp, *lastadp; 10161 struct ufs2_dinode *dp; 10162 struct ufs2_dinode *sip; 10163 struct inoref *inoref; 10164 struct ufsmount *ump; 10165 struct fs *fs; 10166 ufs_lbn_t i; 10167#ifdef INVARIANTS 10168 ufs_lbn_t prevlbn = 0; 10169#endif 10170 int deplist; 10171 10172 if (inodedep->id_state & IOSTARTED) 10173 panic("initiate_write_inodeblock_ufs2: already started"); 10174 inodedep->id_state |= IOSTARTED; 10175 fs = inodedep->id_fs; 10176 ump = VFSTOUFS(inodedep->id_list.wk_mp); 10177 LOCK_OWNED(ump); 10178 dp = (struct ufs2_dinode *)bp->b_data + 10179 ino_to_fsbo(fs, inodedep->id_ino); 10180 10181 /* 10182 * If we're on the unlinked list but have not yet written our 10183 * next pointer initialize it here. 10184 */ 10185 if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) { 10186 struct inodedep *inon; 10187 10188 inon = TAILQ_NEXT(inodedep, id_unlinked); 10189 dp->di_freelink = inon ? inon->id_ino : 0; 10190 } 10191 /* 10192 * If the bitmap is not yet written, then the allocated 10193 * inode cannot be written to disk. 10194 */ 10195 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 10196 if (inodedep->id_savedino2 != NULL) 10197 panic("initiate_write_inodeblock_ufs2: I/O underway"); 10198 FREE_LOCK(ump); 10199 sip = malloc(sizeof(struct ufs2_dinode), 10200 M_SAVEDINO, M_SOFTDEP_FLAGS); 10201 ACQUIRE_LOCK(ump); 10202 inodedep->id_savedino2 = sip; 10203 *inodedep->id_savedino2 = *dp; 10204 bzero((caddr_t)dp, sizeof(struct ufs2_dinode)); 10205 dp->di_gen = inodedep->id_savedino2->di_gen; 10206 dp->di_freelink = inodedep->id_savedino2->di_freelink; 10207 return; 10208 } 10209 /* 10210 * If no dependencies, then there is nothing to roll back. 10211 */ 10212 inodedep->id_savedsize = dp->di_size; 10213 inodedep->id_savedextsize = dp->di_extsize; 10214 inodedep->id_savednlink = dp->di_nlink; 10215 if (TAILQ_EMPTY(&inodedep->id_inoupdt) && 10216 TAILQ_EMPTY(&inodedep->id_extupdt) && 10217 TAILQ_EMPTY(&inodedep->id_inoreflst)) 10218 return; 10219 /* 10220 * Revert the link count to that of the first unwritten journal entry. 10221 */ 10222 inoref = TAILQ_FIRST(&inodedep->id_inoreflst); 10223 if (inoref) 10224 dp->di_nlink = inoref->if_nlink; 10225 10226 /* 10227 * Set the ext data dependencies to busy. 10228 */ 10229 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10230 adp = TAILQ_NEXT(adp, ad_next)) { 10231#ifdef INVARIANTS 10232 if (deplist != 0 && prevlbn >= adp->ad_offset) 10233 panic("softdep_write_inodeblock: lbn order"); 10234 prevlbn = adp->ad_offset; 10235 if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno) 10236 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10237 "softdep_write_inodeblock", 10238 (intmax_t)adp->ad_offset, 10239 (intmax_t)dp->di_extb[adp->ad_offset], 10240 (intmax_t)adp->ad_newblkno); 10241 deplist |= 1 << adp->ad_offset; 10242 if ((adp->ad_state & ATTACHED) == 0) 10243 panic("softdep_write_inodeblock: Unknown state 0x%x", 10244 adp->ad_state); 10245#endif /* INVARIANTS */ 10246 adp->ad_state &= ~ATTACHED; 10247 adp->ad_state |= UNDONE; 10248 } 10249 /* 10250 * The on-disk inode cannot claim to be any larger than the last 10251 * fragment that has been written. Otherwise, the on-disk inode 10252 * might have fragments that were not the last block in the ext 10253 * data which would corrupt the filesystem. 10254 */ 10255 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; 10256 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10257 dp->di_extb[adp->ad_offset] = adp->ad_oldblkno; 10258 /* keep going until hitting a rollback to a frag */ 10259 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10260 continue; 10261 dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10262 for (i = adp->ad_offset + 1; i < NXADDR; i++) { 10263#ifdef INVARIANTS 10264 if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0) 10265 panic("softdep_write_inodeblock: lost dep1"); 10266#endif /* INVARIANTS */ 10267 dp->di_extb[i] = 0; 10268 } 10269 lastadp = NULL; 10270 break; 10271 } 10272 /* 10273 * If we have zero'ed out the last allocated block of the ext 10274 * data, roll back the size to the last currently allocated block. 10275 * We know that this last allocated block is a full-sized as 10276 * we already checked for fragments in the loop above. 10277 */ 10278 if (lastadp != NULL && 10279 dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10280 for (i = lastadp->ad_offset; i >= 0; i--) 10281 if (dp->di_extb[i] != 0) 10282 break; 10283 dp->di_extsize = (i + 1) * fs->fs_bsize; 10284 } 10285 /* 10286 * Set the file data dependencies to busy. 10287 */ 10288 for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10289 adp = TAILQ_NEXT(adp, ad_next)) { 10290#ifdef INVARIANTS 10291 if (deplist != 0 && prevlbn >= adp->ad_offset) 10292 panic("softdep_write_inodeblock: lbn order"); 10293 if ((adp->ad_state & ATTACHED) == 0) 10294 panic("inodedep %p and adp %p not attached", inodedep, adp); 10295 prevlbn = adp->ad_offset; 10296 if (adp->ad_offset < NDADDR && 10297 dp->di_db[adp->ad_offset] != adp->ad_newblkno) 10298 panic("%s: direct pointer #%jd mismatch %jd != %jd", 10299 "softdep_write_inodeblock", 10300 (intmax_t)adp->ad_offset, 10301 (intmax_t)dp->di_db[adp->ad_offset], 10302 (intmax_t)adp->ad_newblkno); 10303 if (adp->ad_offset >= NDADDR && 10304 dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno) 10305 panic("%s indirect pointer #%jd mismatch %jd != %jd", 10306 "softdep_write_inodeblock:", 10307 (intmax_t)adp->ad_offset - NDADDR, 10308 (intmax_t)dp->di_ib[adp->ad_offset - NDADDR], 10309 (intmax_t)adp->ad_newblkno); 10310 deplist |= 1 << adp->ad_offset; 10311 if ((adp->ad_state & ATTACHED) == 0) 10312 panic("softdep_write_inodeblock: Unknown state 0x%x", 10313 adp->ad_state); 10314#endif /* INVARIANTS */ 10315 adp->ad_state &= ~ATTACHED; 10316 adp->ad_state |= UNDONE; 10317 } 10318 /* 10319 * The on-disk inode cannot claim to be any larger than the last 10320 * fragment that has been written. Otherwise, the on-disk inode 10321 * might have fragments that were not the last block in the file 10322 * which would corrupt the filesystem. 10323 */ 10324 for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; 10325 lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) { 10326 if (adp->ad_offset >= NDADDR) 10327 break; 10328 dp->di_db[adp->ad_offset] = adp->ad_oldblkno; 10329 /* keep going until hitting a rollback to a frag */ 10330 if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize) 10331 continue; 10332 dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize; 10333 for (i = adp->ad_offset + 1; i < NDADDR; i++) { 10334#ifdef INVARIANTS 10335 if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0) 10336 panic("softdep_write_inodeblock: lost dep2"); 10337#endif /* INVARIANTS */ 10338 dp->di_db[i] = 0; 10339 } 10340 for (i = 0; i < NIADDR; i++) { 10341#ifdef INVARIANTS 10342 if (dp->di_ib[i] != 0 && 10343 (deplist & ((1 << NDADDR) << i)) == 0) 10344 panic("softdep_write_inodeblock: lost dep3"); 10345#endif /* INVARIANTS */ 10346 dp->di_ib[i] = 0; 10347 } 10348 return; 10349 } 10350 /* 10351 * If we have zero'ed out the last allocated block of the file, 10352 * roll back the size to the last currently allocated block. 10353 * We know that this last allocated block is a full-sized as 10354 * we already checked for fragments in the loop above. 10355 */ 10356 if (lastadp != NULL && 10357 dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) { 10358 for (i = lastadp->ad_offset; i >= 0; i--) 10359 if (dp->di_db[i] != 0) 10360 break; 10361 dp->di_size = (i + 1) * fs->fs_bsize; 10362 } 10363 /* 10364 * The only dependencies are for indirect blocks. 10365 * 10366 * The file size for indirect block additions is not guaranteed. 10367 * Such a guarantee would be non-trivial to achieve. The conventional 10368 * synchronous write implementation also does not make this guarantee. 10369 * Fsck should catch and fix discrepancies. Arguably, the file size 10370 * can be over-estimated without destroying integrity when the file 10371 * moves into the indirect blocks (i.e., is large). If we want to 10372 * postpone fsck, we are stuck with this argument. 10373 */ 10374 for (; adp; adp = TAILQ_NEXT(adp, ad_next)) 10375 dp->di_ib[adp->ad_offset - NDADDR] = 0; 10376} 10377 10378/* 10379 * Cancel an indirdep as a result of truncation. Release all of the 10380 * children allocindirs and place their journal work on the appropriate 10381 * list. 10382 */ 10383static void 10384cancel_indirdep(indirdep, bp, freeblks) 10385 struct indirdep *indirdep; 10386 struct buf *bp; 10387 struct freeblks *freeblks; 10388{ 10389 struct allocindir *aip; 10390 10391 /* 10392 * None of the indirect pointers will ever be visible, 10393 * so they can simply be tossed. GOINGAWAY ensures 10394 * that allocated pointers will be saved in the buffer 10395 * cache until they are freed. Note that they will 10396 * only be able to be found by their physical address 10397 * since the inode mapping the logical address will 10398 * be gone. The save buffer used for the safe copy 10399 * was allocated in setup_allocindir_phase2 using 10400 * the physical address so it could be used for this 10401 * purpose. Hence we swap the safe copy with the real 10402 * copy, allowing the safe copy to be freed and holding 10403 * on to the real copy for later use in indir_trunc. 10404 */ 10405 if (indirdep->ir_state & GOINGAWAY) 10406 panic("cancel_indirdep: already gone"); 10407 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 10408 indirdep->ir_state |= DEPCOMPLETE; 10409 LIST_REMOVE(indirdep, ir_next); 10410 } 10411 indirdep->ir_state |= GOINGAWAY; 10412 VFSTOUFS(indirdep->ir_list.wk_mp)->softdep_numindirdeps += 1; 10413 /* 10414 * Pass in bp for blocks still have journal writes 10415 * pending so we can cancel them on their own. 10416 */ 10417 while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0) 10418 cancel_allocindir(aip, bp, freeblks, 0); 10419 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) 10420 cancel_allocindir(aip, NULL, freeblks, 0); 10421 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) 10422 cancel_allocindir(aip, NULL, freeblks, 0); 10423 while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0) 10424 cancel_allocindir(aip, NULL, freeblks, 0); 10425 /* 10426 * If there are pending partial truncations we need to keep the 10427 * old block copy around until they complete. This is because 10428 * the current b_data is not a perfect superset of the available 10429 * blocks. 10430 */ 10431 if (TAILQ_EMPTY(&indirdep->ir_trunc)) 10432 bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount); 10433 else 10434 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10435 WORKLIST_REMOVE(&indirdep->ir_list); 10436 WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list); 10437 indirdep->ir_bp = NULL; 10438 indirdep->ir_freeblks = freeblks; 10439} 10440 10441/* 10442 * Free an indirdep once it no longer has new pointers to track. 10443 */ 10444static void 10445free_indirdep(indirdep) 10446 struct indirdep *indirdep; 10447{ 10448 10449 KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc), 10450 ("free_indirdep: Indir trunc list not empty.")); 10451 KASSERT(LIST_EMPTY(&indirdep->ir_completehd), 10452 ("free_indirdep: Complete head not empty.")); 10453 KASSERT(LIST_EMPTY(&indirdep->ir_writehd), 10454 ("free_indirdep: write head not empty.")); 10455 KASSERT(LIST_EMPTY(&indirdep->ir_donehd), 10456 ("free_indirdep: done head not empty.")); 10457 KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd), 10458 ("free_indirdep: deplist head not empty.")); 10459 KASSERT((indirdep->ir_state & DEPCOMPLETE), 10460 ("free_indirdep: %p still on newblk list.", indirdep)); 10461 KASSERT(indirdep->ir_saveddata == NULL, 10462 ("free_indirdep: %p still has saved data.", indirdep)); 10463 if (indirdep->ir_state & ONWORKLIST) 10464 WORKLIST_REMOVE(&indirdep->ir_list); 10465 WORKITEM_FREE(indirdep, D_INDIRDEP); 10466} 10467 10468/* 10469 * Called before a write to an indirdep. This routine is responsible for 10470 * rolling back pointers to a safe state which includes only those 10471 * allocindirs which have been completed. 10472 */ 10473static void 10474initiate_write_indirdep(indirdep, bp) 10475 struct indirdep *indirdep; 10476 struct buf *bp; 10477{ 10478 struct ufsmount *ump; 10479 10480 indirdep->ir_state |= IOSTARTED; 10481 if (indirdep->ir_state & GOINGAWAY) 10482 panic("disk_io_initiation: indirdep gone"); 10483 /* 10484 * If there are no remaining dependencies, this will be writing 10485 * the real pointers. 10486 */ 10487 if (LIST_EMPTY(&indirdep->ir_deplisthd) && 10488 TAILQ_EMPTY(&indirdep->ir_trunc)) 10489 return; 10490 /* 10491 * Replace up-to-date version with safe version. 10492 */ 10493 if (indirdep->ir_saveddata == NULL) { 10494 ump = VFSTOUFS(indirdep->ir_list.wk_mp); 10495 LOCK_OWNED(ump); 10496 FREE_LOCK(ump); 10497 indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP, 10498 M_SOFTDEP_FLAGS); 10499 ACQUIRE_LOCK(ump); 10500 } 10501 indirdep->ir_state &= ~ATTACHED; 10502 indirdep->ir_state |= UNDONE; 10503 bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount); 10504 bcopy(indirdep->ir_savebp->b_data, bp->b_data, 10505 bp->b_bcount); 10506} 10507 10508/* 10509 * Called when an inode has been cleared in a cg bitmap. This finally 10510 * eliminates any canceled jaddrefs 10511 */ 10512void 10513softdep_setup_inofree(mp, bp, ino, wkhd) 10514 struct mount *mp; 10515 struct buf *bp; 10516 ino_t ino; 10517 struct workhead *wkhd; 10518{ 10519 struct worklist *wk, *wkn; 10520 struct inodedep *inodedep; 10521 struct ufsmount *ump; 10522 uint8_t *inosused; 10523 struct cg *cgp; 10524 struct fs *fs; 10525 10526 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 10527 ("softdep_setup_inofree called on non-softdep filesystem")); 10528 ump = VFSTOUFS(mp); 10529 ACQUIRE_LOCK(ump); 10530 fs = ump->um_fs; 10531 cgp = (struct cg *)bp->b_data; 10532 inosused = cg_inosused(cgp); 10533 if (isset(inosused, ino % fs->fs_ipg)) 10534 panic("softdep_setup_inofree: inode %ju not freed.", 10535 (uintmax_t)ino); 10536 if (inodedep_lookup(mp, ino, 0, &inodedep)) 10537 panic("softdep_setup_inofree: ino %ju has existing inodedep %p", 10538 (uintmax_t)ino, inodedep); 10539 if (wkhd) { 10540 LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) { 10541 if (wk->wk_type != D_JADDREF) 10542 continue; 10543 WORKLIST_REMOVE(wk); 10544 /* 10545 * We can free immediately even if the jaddref 10546 * isn't attached in a background write as now 10547 * the bitmaps are reconciled. 10548 */ 10549 wk->wk_state |= COMPLETE | ATTACHED; 10550 free_jaddref(WK_JADDREF(wk)); 10551 } 10552 jwork_move(&bp->b_dep, wkhd); 10553 } 10554 FREE_LOCK(ump); 10555} 10556 10557 10558/* 10559 * Called via ffs_blkfree() after a set of frags has been cleared from a cg 10560 * map. Any dependencies waiting for the write to clear are added to the 10561 * buf's list and any jnewblks that are being canceled are discarded 10562 * immediately. 10563 */ 10564void 10565softdep_setup_blkfree(mp, bp, blkno, frags, wkhd) 10566 struct mount *mp; 10567 struct buf *bp; 10568 ufs2_daddr_t blkno; 10569 int frags; 10570 struct workhead *wkhd; 10571{ 10572 struct bmsafemap *bmsafemap; 10573 struct jnewblk *jnewblk; 10574 struct ufsmount *ump; 10575 struct worklist *wk; 10576 struct fs *fs; 10577#ifdef SUJ_DEBUG 10578 uint8_t *blksfree; 10579 struct cg *cgp; 10580 ufs2_daddr_t jstart; 10581 ufs2_daddr_t jend; 10582 ufs2_daddr_t end; 10583 long bno; 10584 int i; 10585#endif 10586 10587 CTR3(KTR_SUJ, 10588 "softdep_setup_blkfree: blkno %jd frags %d wk head %p", 10589 blkno, frags, wkhd); 10590 10591 ump = VFSTOUFS(mp); 10592 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0, 10593 ("softdep_setup_blkfree called on non-softdep filesystem")); 10594 ACQUIRE_LOCK(ump); 10595 /* Lookup the bmsafemap so we track when it is dirty. */ 10596 fs = ump->um_fs; 10597 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10598 /* 10599 * Detach any jnewblks which have been canceled. They must linger 10600 * until the bitmap is cleared again by ffs_blkfree() to prevent 10601 * an unjournaled allocation from hitting the disk. 10602 */ 10603 if (wkhd) { 10604 while ((wk = LIST_FIRST(wkhd)) != NULL) { 10605 CTR2(KTR_SUJ, 10606 "softdep_setup_blkfree: blkno %jd wk type %d", 10607 blkno, wk->wk_type); 10608 WORKLIST_REMOVE(wk); 10609 if (wk->wk_type != D_JNEWBLK) { 10610 WORKLIST_INSERT(&bmsafemap->sm_freehd, wk); 10611 continue; 10612 } 10613 jnewblk = WK_JNEWBLK(wk); 10614 KASSERT(jnewblk->jn_state & GOINGAWAY, 10615 ("softdep_setup_blkfree: jnewblk not canceled.")); 10616#ifdef SUJ_DEBUG 10617 /* 10618 * Assert that this block is free in the bitmap 10619 * before we discard the jnewblk. 10620 */ 10621 cgp = (struct cg *)bp->b_data; 10622 blksfree = cg_blksfree(cgp); 10623 bno = dtogd(fs, jnewblk->jn_blkno); 10624 for (i = jnewblk->jn_oldfrags; 10625 i < jnewblk->jn_frags; i++) { 10626 if (isset(blksfree, bno + i)) 10627 continue; 10628 panic("softdep_setup_blkfree: not free"); 10629 } 10630#endif 10631 /* 10632 * Even if it's not attached we can free immediately 10633 * as the new bitmap is correct. 10634 */ 10635 wk->wk_state |= COMPLETE | ATTACHED; 10636 free_jnewblk(jnewblk); 10637 } 10638 } 10639 10640#ifdef SUJ_DEBUG 10641 /* 10642 * Assert that we are not freeing a block which has an outstanding 10643 * allocation dependency. 10644 */ 10645 fs = VFSTOUFS(mp)->um_fs; 10646 bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL); 10647 end = blkno + frags; 10648 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10649 /* 10650 * Don't match against blocks that will be freed when the 10651 * background write is done. 10652 */ 10653 if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) == 10654 (COMPLETE | DEPCOMPLETE)) 10655 continue; 10656 jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags; 10657 jend = jnewblk->jn_blkno + jnewblk->jn_frags; 10658 if ((blkno >= jstart && blkno < jend) || 10659 (end > jstart && end <= jend)) { 10660 printf("state 0x%X %jd - %d %d dep %p\n", 10661 jnewblk->jn_state, jnewblk->jn_blkno, 10662 jnewblk->jn_oldfrags, jnewblk->jn_frags, 10663 jnewblk->jn_dep); 10664 panic("softdep_setup_blkfree: " 10665 "%jd-%jd(%d) overlaps with %jd-%jd", 10666 blkno, end, frags, jstart, jend); 10667 } 10668 } 10669#endif 10670 FREE_LOCK(ump); 10671} 10672 10673/* 10674 * Revert a block allocation when the journal record that describes it 10675 * is not yet written. 10676 */ 10677static int 10678jnewblk_rollback(jnewblk, fs, cgp, blksfree) 10679 struct jnewblk *jnewblk; 10680 struct fs *fs; 10681 struct cg *cgp; 10682 uint8_t *blksfree; 10683{ 10684 ufs1_daddr_t fragno; 10685 long cgbno, bbase; 10686 int frags, blk; 10687 int i; 10688 10689 frags = 0; 10690 cgbno = dtogd(fs, jnewblk->jn_blkno); 10691 /* 10692 * We have to test which frags need to be rolled back. We may 10693 * be operating on a stale copy when doing background writes. 10694 */ 10695 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) 10696 if (isclr(blksfree, cgbno + i)) 10697 frags++; 10698 if (frags == 0) 10699 return (0); 10700 /* 10701 * This is mostly ffs_blkfree() sans some validation and 10702 * superblock updates. 10703 */ 10704 if (frags == fs->fs_frag) { 10705 fragno = fragstoblks(fs, cgbno); 10706 ffs_setblock(fs, blksfree, fragno); 10707 ffs_clusteracct(fs, cgp, fragno, 1); 10708 cgp->cg_cs.cs_nbfree++; 10709 } else { 10710 cgbno += jnewblk->jn_oldfrags; 10711 bbase = cgbno - fragnum(fs, cgbno); 10712 /* Decrement the old frags. */ 10713 blk = blkmap(fs, blksfree, bbase); 10714 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 10715 /* Deallocate the fragment */ 10716 for (i = 0; i < frags; i++) 10717 setbit(blksfree, cgbno + i); 10718 cgp->cg_cs.cs_nffree += frags; 10719 /* Add back in counts associated with the new frags */ 10720 blk = blkmap(fs, blksfree, bbase); 10721 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 10722 /* If a complete block has been reassembled, account for it. */ 10723 fragno = fragstoblks(fs, bbase); 10724 if (ffs_isblock(fs, blksfree, fragno)) { 10725 cgp->cg_cs.cs_nffree -= fs->fs_frag; 10726 ffs_clusteracct(fs, cgp, fragno, 1); 10727 cgp->cg_cs.cs_nbfree++; 10728 } 10729 } 10730 stat_jnewblk++; 10731 jnewblk->jn_state &= ~ATTACHED; 10732 jnewblk->jn_state |= UNDONE; 10733 10734 return (frags); 10735} 10736 10737static void 10738initiate_write_bmsafemap(bmsafemap, bp) 10739 struct bmsafemap *bmsafemap; 10740 struct buf *bp; /* The cg block. */ 10741{ 10742 struct jaddref *jaddref; 10743 struct jnewblk *jnewblk; 10744 uint8_t *inosused; 10745 uint8_t *blksfree; 10746 struct cg *cgp; 10747 struct fs *fs; 10748 ino_t ino; 10749 10750 if (bmsafemap->sm_state & IOSTARTED) 10751 return; 10752 bmsafemap->sm_state |= IOSTARTED; 10753 /* 10754 * Clear any inode allocations which are pending journal writes. 10755 */ 10756 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) { 10757 cgp = (struct cg *)bp->b_data; 10758 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10759 inosused = cg_inosused(cgp); 10760 LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) { 10761 ino = jaddref->ja_ino % fs->fs_ipg; 10762 if (isset(inosused, ino)) { 10763 if ((jaddref->ja_mode & IFMT) == IFDIR) 10764 cgp->cg_cs.cs_ndir--; 10765 cgp->cg_cs.cs_nifree++; 10766 clrbit(inosused, ino); 10767 jaddref->ja_state &= ~ATTACHED; 10768 jaddref->ja_state |= UNDONE; 10769 stat_jaddref++; 10770 } else 10771 panic("initiate_write_bmsafemap: inode %ju " 10772 "marked free", (uintmax_t)jaddref->ja_ino); 10773 } 10774 } 10775 /* 10776 * Clear any block allocations which are pending journal writes. 10777 */ 10778 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 10779 cgp = (struct cg *)bp->b_data; 10780 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 10781 blksfree = cg_blksfree(cgp); 10782 LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) { 10783 if (jnewblk_rollback(jnewblk, fs, cgp, blksfree)) 10784 continue; 10785 panic("initiate_write_bmsafemap: block %jd " 10786 "marked free", jnewblk->jn_blkno); 10787 } 10788 } 10789 /* 10790 * Move allocation lists to the written lists so they can be 10791 * cleared once the block write is complete. 10792 */ 10793 LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr, 10794 inodedep, id_deps); 10795 LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr, 10796 newblk, nb_deps); 10797 LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist, 10798 wk_list); 10799} 10800 10801/* 10802 * This routine is called during the completion interrupt 10803 * service routine for a disk write (from the procedure called 10804 * by the device driver to inform the filesystem caches of 10805 * a request completion). It should be called early in this 10806 * procedure, before the block is made available to other 10807 * processes or other routines are called. 10808 * 10809 */ 10810static void 10811softdep_disk_write_complete(bp) 10812 struct buf *bp; /* describes the completed disk write */ 10813{ 10814 struct worklist *wk; 10815 struct worklist *owk; 10816 struct ufsmount *ump; 10817 struct workhead reattach; 10818 struct freeblks *freeblks; 10819 struct buf *sbp; 10820 10821 /* 10822 * If an error occurred while doing the write, then the data 10823 * has not hit the disk and the dependencies cannot be unrolled. 10824 */ 10825 if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) 10826 return; 10827 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 10828 return; 10829 ump = VFSTOUFS(wk->wk_mp); 10830 LIST_INIT(&reattach); 10831 /* 10832 * This lock must not be released anywhere in this code segment. 10833 */ 10834 sbp = NULL; 10835 owk = NULL; 10836 ACQUIRE_LOCK(ump); 10837 while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) { 10838 WORKLIST_REMOVE(wk); 10839 dep_write[wk->wk_type]++; 10840 if (wk == owk) 10841 panic("duplicate worklist: %p\n", wk); 10842 owk = wk; 10843 switch (wk->wk_type) { 10844 10845 case D_PAGEDEP: 10846 if (handle_written_filepage(WK_PAGEDEP(wk), bp)) 10847 WORKLIST_INSERT(&reattach, wk); 10848 continue; 10849 10850 case D_INODEDEP: 10851 if (handle_written_inodeblock(WK_INODEDEP(wk), bp)) 10852 WORKLIST_INSERT(&reattach, wk); 10853 continue; 10854 10855 case D_BMSAFEMAP: 10856 if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp)) 10857 WORKLIST_INSERT(&reattach, wk); 10858 continue; 10859 10860 case D_MKDIR: 10861 handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY); 10862 continue; 10863 10864 case D_ALLOCDIRECT: 10865 wk->wk_state |= COMPLETE; 10866 handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL); 10867 continue; 10868 10869 case D_ALLOCINDIR: 10870 wk->wk_state |= COMPLETE; 10871 handle_allocindir_partdone(WK_ALLOCINDIR(wk)); 10872 continue; 10873 10874 case D_INDIRDEP: 10875 if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp)) 10876 WORKLIST_INSERT(&reattach, wk); 10877 continue; 10878 10879 case D_FREEBLKS: 10880 wk->wk_state |= COMPLETE; 10881 freeblks = WK_FREEBLKS(wk); 10882 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE && 10883 LIST_EMPTY(&freeblks->fb_jblkdephd)) 10884 add_to_worklist(wk, WK_NODELAY); 10885 continue; 10886 10887 case D_FREEWORK: 10888 handle_written_freework(WK_FREEWORK(wk)); 10889 break; 10890 10891 case D_JSEGDEP: 10892 free_jsegdep(WK_JSEGDEP(wk)); 10893 continue; 10894 10895 case D_JSEG: 10896 handle_written_jseg(WK_JSEG(wk), bp); 10897 continue; 10898 10899 case D_SBDEP: 10900 if (handle_written_sbdep(WK_SBDEP(wk), bp)) 10901 WORKLIST_INSERT(&reattach, wk); 10902 continue; 10903 10904 case D_FREEDEP: 10905 free_freedep(WK_FREEDEP(wk)); 10906 continue; 10907 10908 default: 10909 panic("handle_disk_write_complete: Unknown type %s", 10910 TYPENAME(wk->wk_type)); 10911 /* NOTREACHED */ 10912 } 10913 } 10914 /* 10915 * Reattach any requests that must be redone. 10916 */ 10917 while ((wk = LIST_FIRST(&reattach)) != NULL) { 10918 WORKLIST_REMOVE(wk); 10919 WORKLIST_INSERT(&bp->b_dep, wk); 10920 } 10921 FREE_LOCK(ump); 10922 if (sbp) 10923 brelse(sbp); 10924} 10925 10926/* 10927 * Called from within softdep_disk_write_complete above. Note that 10928 * this routine is always called from interrupt level with further 10929 * splbio interrupts blocked. 10930 */ 10931static void 10932handle_allocdirect_partdone(adp, wkhd) 10933 struct allocdirect *adp; /* the completed allocdirect */ 10934 struct workhead *wkhd; /* Work to do when inode is writtne. */ 10935{ 10936 struct allocdirectlst *listhead; 10937 struct allocdirect *listadp; 10938 struct inodedep *inodedep; 10939 long bsize; 10940 10941 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 10942 return; 10943 /* 10944 * The on-disk inode cannot claim to be any larger than the last 10945 * fragment that has been written. Otherwise, the on-disk inode 10946 * might have fragments that were not the last block in the file 10947 * which would corrupt the filesystem. Thus, we cannot free any 10948 * allocdirects after one whose ad_oldblkno claims a fragment as 10949 * these blocks must be rolled back to zero before writing the inode. 10950 * We check the currently active set of allocdirects in id_inoupdt 10951 * or id_extupdt as appropriate. 10952 */ 10953 inodedep = adp->ad_inodedep; 10954 bsize = inodedep->id_fs->fs_bsize; 10955 if (adp->ad_state & EXTDATA) 10956 listhead = &inodedep->id_extupdt; 10957 else 10958 listhead = &inodedep->id_inoupdt; 10959 TAILQ_FOREACH(listadp, listhead, ad_next) { 10960 /* found our block */ 10961 if (listadp == adp) 10962 break; 10963 /* continue if ad_oldlbn is not a fragment */ 10964 if (listadp->ad_oldsize == 0 || 10965 listadp->ad_oldsize == bsize) 10966 continue; 10967 /* hit a fragment */ 10968 return; 10969 } 10970 /* 10971 * If we have reached the end of the current list without 10972 * finding the just finished dependency, then it must be 10973 * on the future dependency list. Future dependencies cannot 10974 * be freed until they are moved to the current list. 10975 */ 10976 if (listadp == NULL) { 10977#ifdef DEBUG 10978 if (adp->ad_state & EXTDATA) 10979 listhead = &inodedep->id_newextupdt; 10980 else 10981 listhead = &inodedep->id_newinoupdt; 10982 TAILQ_FOREACH(listadp, listhead, ad_next) 10983 /* found our block */ 10984 if (listadp == adp) 10985 break; 10986 if (listadp == NULL) 10987 panic("handle_allocdirect_partdone: lost dep"); 10988#endif /* DEBUG */ 10989 return; 10990 } 10991 /* 10992 * If we have found the just finished dependency, then queue 10993 * it along with anything that follows it that is complete. 10994 * Since the pointer has not yet been written in the inode 10995 * as the dependency prevents it, place the allocdirect on the 10996 * bufwait list where it will be freed once the pointer is 10997 * valid. 10998 */ 10999 if (wkhd == NULL) 11000 wkhd = &inodedep->id_bufwait; 11001 for (; adp; adp = listadp) { 11002 listadp = TAILQ_NEXT(adp, ad_next); 11003 if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE) 11004 return; 11005 TAILQ_REMOVE(listhead, adp, ad_next); 11006 WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list); 11007 } 11008} 11009 11010/* 11011 * Called from within softdep_disk_write_complete above. This routine 11012 * completes successfully written allocindirs. 11013 */ 11014static void 11015handle_allocindir_partdone(aip) 11016 struct allocindir *aip; /* the completed allocindir */ 11017{ 11018 struct indirdep *indirdep; 11019 11020 if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE) 11021 return; 11022 indirdep = aip->ai_indirdep; 11023 LIST_REMOVE(aip, ai_next); 11024 /* 11025 * Don't set a pointer while the buffer is undergoing IO or while 11026 * we have active truncations. 11027 */ 11028 if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) { 11029 LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next); 11030 return; 11031 } 11032 if (indirdep->ir_state & UFS1FMT) 11033 ((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11034 aip->ai_newblkno; 11035 else 11036 ((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] = 11037 aip->ai_newblkno; 11038 /* 11039 * Await the pointer write before freeing the allocindir. 11040 */ 11041 LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next); 11042} 11043 11044/* 11045 * Release segments held on a jwork list. 11046 */ 11047static void 11048handle_jwork(wkhd) 11049 struct workhead *wkhd; 11050{ 11051 struct worklist *wk; 11052 11053 while ((wk = LIST_FIRST(wkhd)) != NULL) { 11054 WORKLIST_REMOVE(wk); 11055 switch (wk->wk_type) { 11056 case D_JSEGDEP: 11057 free_jsegdep(WK_JSEGDEP(wk)); 11058 continue; 11059 case D_FREEDEP: 11060 free_freedep(WK_FREEDEP(wk)); 11061 continue; 11062 case D_FREEFRAG: 11063 rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep)); 11064 WORKITEM_FREE(wk, D_FREEFRAG); 11065 continue; 11066 case D_FREEWORK: 11067 handle_written_freework(WK_FREEWORK(wk)); 11068 continue; 11069 default: 11070 panic("handle_jwork: Unknown type %s\n", 11071 TYPENAME(wk->wk_type)); 11072 } 11073 } 11074} 11075 11076/* 11077 * Handle the bufwait list on an inode when it is safe to release items 11078 * held there. This normally happens after an inode block is written but 11079 * may be delayed and handled later if there are pending journal items that 11080 * are not yet safe to be released. 11081 */ 11082static struct freefile * 11083handle_bufwait(inodedep, refhd) 11084 struct inodedep *inodedep; 11085 struct workhead *refhd; 11086{ 11087 struct jaddref *jaddref; 11088 struct freefile *freefile; 11089 struct worklist *wk; 11090 11091 freefile = NULL; 11092 while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) { 11093 WORKLIST_REMOVE(wk); 11094 switch (wk->wk_type) { 11095 case D_FREEFILE: 11096 /* 11097 * We defer adding freefile to the worklist 11098 * until all other additions have been made to 11099 * ensure that it will be done after all the 11100 * old blocks have been freed. 11101 */ 11102 if (freefile != NULL) 11103 panic("handle_bufwait: freefile"); 11104 freefile = WK_FREEFILE(wk); 11105 continue; 11106 11107 case D_MKDIR: 11108 handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT); 11109 continue; 11110 11111 case D_DIRADD: 11112 diradd_inode_written(WK_DIRADD(wk), inodedep); 11113 continue; 11114 11115 case D_FREEFRAG: 11116 wk->wk_state |= COMPLETE; 11117 if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE) 11118 add_to_worklist(wk, 0); 11119 continue; 11120 11121 case D_DIRREM: 11122 wk->wk_state |= COMPLETE; 11123 add_to_worklist(wk, 0); 11124 continue; 11125 11126 case D_ALLOCDIRECT: 11127 case D_ALLOCINDIR: 11128 free_newblk(WK_NEWBLK(wk)); 11129 continue; 11130 11131 case D_JNEWBLK: 11132 wk->wk_state |= COMPLETE; 11133 free_jnewblk(WK_JNEWBLK(wk)); 11134 continue; 11135 11136 /* 11137 * Save freed journal segments and add references on 11138 * the supplied list which will delay their release 11139 * until the cg bitmap is cleared on disk. 11140 */ 11141 case D_JSEGDEP: 11142 if (refhd == NULL) 11143 free_jsegdep(WK_JSEGDEP(wk)); 11144 else 11145 WORKLIST_INSERT(refhd, wk); 11146 continue; 11147 11148 case D_JADDREF: 11149 jaddref = WK_JADDREF(wk); 11150 TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, 11151 if_deps); 11152 /* 11153 * Transfer any jaddrefs to the list to be freed with 11154 * the bitmap if we're handling a removed file. 11155 */ 11156 if (refhd == NULL) { 11157 wk->wk_state |= COMPLETE; 11158 free_jaddref(jaddref); 11159 } else 11160 WORKLIST_INSERT(refhd, wk); 11161 continue; 11162 11163 default: 11164 panic("handle_bufwait: Unknown type %p(%s)", 11165 wk, TYPENAME(wk->wk_type)); 11166 /* NOTREACHED */ 11167 } 11168 } 11169 return (freefile); 11170} 11171/* 11172 * Called from within softdep_disk_write_complete above to restore 11173 * in-memory inode block contents to their most up-to-date state. Note 11174 * that this routine is always called from interrupt level with further 11175 * splbio interrupts blocked. 11176 */ 11177static int 11178handle_written_inodeblock(inodedep, bp) 11179 struct inodedep *inodedep; 11180 struct buf *bp; /* buffer containing the inode block */ 11181{ 11182 struct freefile *freefile; 11183 struct allocdirect *adp, *nextadp; 11184 struct ufs1_dinode *dp1 = NULL; 11185 struct ufs2_dinode *dp2 = NULL; 11186 struct workhead wkhd; 11187 int hadchanges, fstype; 11188 ino_t freelink; 11189 11190 LIST_INIT(&wkhd); 11191 hadchanges = 0; 11192 freefile = NULL; 11193 if ((inodedep->id_state & IOSTARTED) == 0) 11194 panic("handle_written_inodeblock: not started"); 11195 inodedep->id_state &= ~IOSTARTED; 11196 if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) { 11197 fstype = UFS1; 11198 dp1 = (struct ufs1_dinode *)bp->b_data + 11199 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11200 freelink = dp1->di_freelink; 11201 } else { 11202 fstype = UFS2; 11203 dp2 = (struct ufs2_dinode *)bp->b_data + 11204 ino_to_fsbo(inodedep->id_fs, inodedep->id_ino); 11205 freelink = dp2->di_freelink; 11206 } 11207 /* 11208 * Leave this inodeblock dirty until it's in the list. 11209 */ 11210 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) { 11211 struct inodedep *inon; 11212 11213 inon = TAILQ_NEXT(inodedep, id_unlinked); 11214 if ((inon == NULL && freelink == 0) || 11215 (inon && inon->id_ino == freelink)) { 11216 if (inon) 11217 inon->id_state |= UNLINKPREV; 11218 inodedep->id_state |= UNLINKNEXT; 11219 } 11220 hadchanges = 1; 11221 } 11222 /* 11223 * If we had to rollback the inode allocation because of 11224 * bitmaps being incomplete, then simply restore it. 11225 * Keep the block dirty so that it will not be reclaimed until 11226 * all associated dependencies have been cleared and the 11227 * corresponding updates written to disk. 11228 */ 11229 if (inodedep->id_savedino1 != NULL) { 11230 hadchanges = 1; 11231 if (fstype == UFS1) 11232 *dp1 = *inodedep->id_savedino1; 11233 else 11234 *dp2 = *inodedep->id_savedino2; 11235 free(inodedep->id_savedino1, M_SAVEDINO); 11236 inodedep->id_savedino1 = NULL; 11237 if ((bp->b_flags & B_DELWRI) == 0) 11238 stat_inode_bitmap++; 11239 bdirty(bp); 11240 /* 11241 * If the inode is clear here and GOINGAWAY it will never 11242 * be written. Process the bufwait and clear any pending 11243 * work which may include the freefile. 11244 */ 11245 if (inodedep->id_state & GOINGAWAY) 11246 goto bufwait; 11247 return (1); 11248 } 11249 inodedep->id_state |= COMPLETE; 11250 /* 11251 * Roll forward anything that had to be rolled back before 11252 * the inode could be updated. 11253 */ 11254 for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) { 11255 nextadp = TAILQ_NEXT(adp, ad_next); 11256 if (adp->ad_state & ATTACHED) 11257 panic("handle_written_inodeblock: new entry"); 11258 if (fstype == UFS1) { 11259 if (adp->ad_offset < NDADDR) { 11260 if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11261 panic("%s %s #%jd mismatch %d != %jd", 11262 "handle_written_inodeblock:", 11263 "direct pointer", 11264 (intmax_t)adp->ad_offset, 11265 dp1->di_db[adp->ad_offset], 11266 (intmax_t)adp->ad_oldblkno); 11267 dp1->di_db[adp->ad_offset] = adp->ad_newblkno; 11268 } else { 11269 if (dp1->di_ib[adp->ad_offset - NDADDR] != 0) 11270 panic("%s: %s #%jd allocated as %d", 11271 "handle_written_inodeblock", 11272 "indirect pointer", 11273 (intmax_t)adp->ad_offset - NDADDR, 11274 dp1->di_ib[adp->ad_offset - NDADDR]); 11275 dp1->di_ib[adp->ad_offset - NDADDR] = 11276 adp->ad_newblkno; 11277 } 11278 } else { 11279 if (adp->ad_offset < NDADDR) { 11280 if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno) 11281 panic("%s: %s #%jd %s %jd != %jd", 11282 "handle_written_inodeblock", 11283 "direct pointer", 11284 (intmax_t)adp->ad_offset, "mismatch", 11285 (intmax_t)dp2->di_db[adp->ad_offset], 11286 (intmax_t)adp->ad_oldblkno); 11287 dp2->di_db[adp->ad_offset] = adp->ad_newblkno; 11288 } else { 11289 if (dp2->di_ib[adp->ad_offset - NDADDR] != 0) 11290 panic("%s: %s #%jd allocated as %jd", 11291 "handle_written_inodeblock", 11292 "indirect pointer", 11293 (intmax_t)adp->ad_offset - NDADDR, 11294 (intmax_t) 11295 dp2->di_ib[adp->ad_offset - NDADDR]); 11296 dp2->di_ib[adp->ad_offset - NDADDR] = 11297 adp->ad_newblkno; 11298 } 11299 } 11300 adp->ad_state &= ~UNDONE; 11301 adp->ad_state |= ATTACHED; 11302 hadchanges = 1; 11303 } 11304 for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) { 11305 nextadp = TAILQ_NEXT(adp, ad_next); 11306 if (adp->ad_state & ATTACHED) 11307 panic("handle_written_inodeblock: new entry"); 11308 if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno) 11309 panic("%s: direct pointers #%jd %s %jd != %jd", 11310 "handle_written_inodeblock", 11311 (intmax_t)adp->ad_offset, "mismatch", 11312 (intmax_t)dp2->di_extb[adp->ad_offset], 11313 (intmax_t)adp->ad_oldblkno); 11314 dp2->di_extb[adp->ad_offset] = adp->ad_newblkno; 11315 adp->ad_state &= ~UNDONE; 11316 adp->ad_state |= ATTACHED; 11317 hadchanges = 1; 11318 } 11319 if (hadchanges && (bp->b_flags & B_DELWRI) == 0) 11320 stat_direct_blk_ptrs++; 11321 /* 11322 * Reset the file size to its most up-to-date value. 11323 */ 11324 if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1) 11325 panic("handle_written_inodeblock: bad size"); 11326 if (inodedep->id_savednlink > LINK_MAX) 11327 panic("handle_written_inodeblock: Invalid link count " 11328 "%d for inodedep %p", inodedep->id_savednlink, inodedep); 11329 if (fstype == UFS1) { 11330 if (dp1->di_nlink != inodedep->id_savednlink) { 11331 dp1->di_nlink = inodedep->id_savednlink; 11332 hadchanges = 1; 11333 } 11334 if (dp1->di_size != inodedep->id_savedsize) { 11335 dp1->di_size = inodedep->id_savedsize; 11336 hadchanges = 1; 11337 } 11338 } else { 11339 if (dp2->di_nlink != inodedep->id_savednlink) { 11340 dp2->di_nlink = inodedep->id_savednlink; 11341 hadchanges = 1; 11342 } 11343 if (dp2->di_size != inodedep->id_savedsize) { 11344 dp2->di_size = inodedep->id_savedsize; 11345 hadchanges = 1; 11346 } 11347 if (dp2->di_extsize != inodedep->id_savedextsize) { 11348 dp2->di_extsize = inodedep->id_savedextsize; 11349 hadchanges = 1; 11350 } 11351 } 11352 inodedep->id_savedsize = -1; 11353 inodedep->id_savedextsize = -1; 11354 inodedep->id_savednlink = -1; 11355 /* 11356 * If there were any rollbacks in the inode block, then it must be 11357 * marked dirty so that its will eventually get written back in 11358 * its correct form. 11359 */ 11360 if (hadchanges) 11361 bdirty(bp); 11362bufwait: 11363 /* 11364 * Process any allocdirects that completed during the update. 11365 */ 11366 if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL) 11367 handle_allocdirect_partdone(adp, &wkhd); 11368 if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL) 11369 handle_allocdirect_partdone(adp, &wkhd); 11370 /* 11371 * Process deallocations that were held pending until the 11372 * inode had been written to disk. Freeing of the inode 11373 * is delayed until after all blocks have been freed to 11374 * avoid creation of new <vfsid, inum, lbn> triples 11375 * before the old ones have been deleted. Completely 11376 * unlinked inodes are not processed until the unlinked 11377 * inode list is written or the last reference is removed. 11378 */ 11379 if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) { 11380 freefile = handle_bufwait(inodedep, NULL); 11381 if (freefile && !LIST_EMPTY(&wkhd)) { 11382 WORKLIST_INSERT(&wkhd, &freefile->fx_list); 11383 freefile = NULL; 11384 } 11385 } 11386 /* 11387 * Move rolled forward dependency completions to the bufwait list 11388 * now that those that were already written have been processed. 11389 */ 11390 if (!LIST_EMPTY(&wkhd) && hadchanges == 0) 11391 panic("handle_written_inodeblock: bufwait but no changes"); 11392 jwork_move(&inodedep->id_bufwait, &wkhd); 11393 11394 if (freefile != NULL) { 11395 /* 11396 * If the inode is goingaway it was never written. Fake up 11397 * the state here so free_inodedep() can succeed. 11398 */ 11399 if (inodedep->id_state & GOINGAWAY) 11400 inodedep->id_state |= COMPLETE | DEPCOMPLETE; 11401 if (free_inodedep(inodedep) == 0) 11402 panic("handle_written_inodeblock: live inodedep %p", 11403 inodedep); 11404 add_to_worklist(&freefile->fx_list, 0); 11405 return (0); 11406 } 11407 11408 /* 11409 * If no outstanding dependencies, free it. 11410 */ 11411 if (free_inodedep(inodedep) || 11412 (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 && 11413 TAILQ_FIRST(&inodedep->id_inoupdt) == 0 && 11414 TAILQ_FIRST(&inodedep->id_extupdt) == 0 && 11415 LIST_FIRST(&inodedep->id_bufwait) == 0)) 11416 return (0); 11417 return (hadchanges); 11418} 11419 11420static int 11421handle_written_indirdep(indirdep, bp, bpp) 11422 struct indirdep *indirdep; 11423 struct buf *bp; 11424 struct buf **bpp; 11425{ 11426 struct allocindir *aip; 11427 struct buf *sbp; 11428 int chgs; 11429 11430 if (indirdep->ir_state & GOINGAWAY) 11431 panic("handle_written_indirdep: indirdep gone"); 11432 if ((indirdep->ir_state & IOSTARTED) == 0) 11433 panic("handle_written_indirdep: IO not started"); 11434 chgs = 0; 11435 /* 11436 * If there were rollbacks revert them here. 11437 */ 11438 if (indirdep->ir_saveddata) { 11439 bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount); 11440 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11441 free(indirdep->ir_saveddata, M_INDIRDEP); 11442 indirdep->ir_saveddata = NULL; 11443 } 11444 chgs = 1; 11445 } 11446 indirdep->ir_state &= ~(UNDONE | IOSTARTED); 11447 indirdep->ir_state |= ATTACHED; 11448 /* 11449 * Move allocindirs with written pointers to the completehd if 11450 * the indirdep's pointer is not yet written. Otherwise 11451 * free them here. 11452 */ 11453 while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) { 11454 LIST_REMOVE(aip, ai_next); 11455 if ((indirdep->ir_state & DEPCOMPLETE) == 0) { 11456 LIST_INSERT_HEAD(&indirdep->ir_completehd, aip, 11457 ai_next); 11458 newblk_freefrag(&aip->ai_block); 11459 continue; 11460 } 11461 free_newblk(&aip->ai_block); 11462 } 11463 /* 11464 * Move allocindirs that have finished dependency processing from 11465 * the done list to the write list after updating the pointers. 11466 */ 11467 if (TAILQ_EMPTY(&indirdep->ir_trunc)) { 11468 while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) { 11469 handle_allocindir_partdone(aip); 11470 if (aip == LIST_FIRST(&indirdep->ir_donehd)) 11471 panic("disk_write_complete: not gone"); 11472 chgs = 1; 11473 } 11474 } 11475 /* 11476 * Preserve the indirdep if there were any changes or if it is not 11477 * yet valid on disk. 11478 */ 11479 if (chgs) { 11480 stat_indir_blk_ptrs++; 11481 bdirty(bp); 11482 return (1); 11483 } 11484 /* 11485 * If there were no changes we can discard the savedbp and detach 11486 * ourselves from the buf. We are only carrying completed pointers 11487 * in this case. 11488 */ 11489 sbp = indirdep->ir_savebp; 11490 sbp->b_flags |= B_INVAL | B_NOCACHE; 11491 indirdep->ir_savebp = NULL; 11492 indirdep->ir_bp = NULL; 11493 if (*bpp != NULL) 11494 panic("handle_written_indirdep: bp already exists."); 11495 *bpp = sbp; 11496 /* 11497 * The indirdep may not be freed until its parent points at it. 11498 */ 11499 if (indirdep->ir_state & DEPCOMPLETE) 11500 free_indirdep(indirdep); 11501 11502 return (0); 11503} 11504 11505/* 11506 * Process a diradd entry after its dependent inode has been written. 11507 * This routine must be called with splbio interrupts blocked. 11508 */ 11509static void 11510diradd_inode_written(dap, inodedep) 11511 struct diradd *dap; 11512 struct inodedep *inodedep; 11513{ 11514 11515 dap->da_state |= COMPLETE; 11516 complete_diradd(dap); 11517 WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list); 11518} 11519 11520/* 11521 * Returns true if the bmsafemap will have rollbacks when written. Must only 11522 * be called with the soft updates lock and the buf lock on the cg held. 11523 */ 11524static int 11525bmsafemap_backgroundwrite(bmsafemap, bp) 11526 struct bmsafemap *bmsafemap; 11527 struct buf *bp; 11528{ 11529 int dirty; 11530 11531 LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp)); 11532 dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) | 11533 !LIST_EMPTY(&bmsafemap->sm_jnewblkhd); 11534 /* 11535 * If we're initiating a background write we need to process the 11536 * rollbacks as they exist now, not as they exist when IO starts. 11537 * No other consumers will look at the contents of the shadowed 11538 * buf so this is safe to do here. 11539 */ 11540 if (bp->b_xflags & BX_BKGRDMARKER) 11541 initiate_write_bmsafemap(bmsafemap, bp); 11542 11543 return (dirty); 11544} 11545 11546/* 11547 * Re-apply an allocation when a cg write is complete. 11548 */ 11549static int 11550jnewblk_rollforward(jnewblk, fs, cgp, blksfree) 11551 struct jnewblk *jnewblk; 11552 struct fs *fs; 11553 struct cg *cgp; 11554 uint8_t *blksfree; 11555{ 11556 ufs1_daddr_t fragno; 11557 ufs2_daddr_t blkno; 11558 long cgbno, bbase; 11559 int frags, blk; 11560 int i; 11561 11562 frags = 0; 11563 cgbno = dtogd(fs, jnewblk->jn_blkno); 11564 for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) { 11565 if (isclr(blksfree, cgbno + i)) 11566 panic("jnewblk_rollforward: re-allocated fragment"); 11567 frags++; 11568 } 11569 if (frags == fs->fs_frag) { 11570 blkno = fragstoblks(fs, cgbno); 11571 ffs_clrblock(fs, blksfree, (long)blkno); 11572 ffs_clusteracct(fs, cgp, blkno, -1); 11573 cgp->cg_cs.cs_nbfree--; 11574 } else { 11575 bbase = cgbno - fragnum(fs, cgbno); 11576 cgbno += jnewblk->jn_oldfrags; 11577 /* If a complete block had been reassembled, account for it. */ 11578 fragno = fragstoblks(fs, bbase); 11579 if (ffs_isblock(fs, blksfree, fragno)) { 11580 cgp->cg_cs.cs_nffree += fs->fs_frag; 11581 ffs_clusteracct(fs, cgp, fragno, -1); 11582 cgp->cg_cs.cs_nbfree--; 11583 } 11584 /* Decrement the old frags. */ 11585 blk = blkmap(fs, blksfree, bbase); 11586 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 11587 /* Allocate the fragment */ 11588 for (i = 0; i < frags; i++) 11589 clrbit(blksfree, cgbno + i); 11590 cgp->cg_cs.cs_nffree -= frags; 11591 /* Add back in counts associated with the new frags */ 11592 blk = blkmap(fs, blksfree, bbase); 11593 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 11594 } 11595 return (frags); 11596} 11597 11598/* 11599 * Complete a write to a bmsafemap structure. Roll forward any bitmap 11600 * changes if it's not a background write. Set all written dependencies 11601 * to DEPCOMPLETE and free the structure if possible. 11602 */ 11603static int 11604handle_written_bmsafemap(bmsafemap, bp) 11605 struct bmsafemap *bmsafemap; 11606 struct buf *bp; 11607{ 11608 struct newblk *newblk; 11609 struct inodedep *inodedep; 11610 struct jaddref *jaddref, *jatmp; 11611 struct jnewblk *jnewblk, *jntmp; 11612 struct ufsmount *ump; 11613 uint8_t *inosused; 11614 uint8_t *blksfree; 11615 struct cg *cgp; 11616 struct fs *fs; 11617 ino_t ino; 11618 int foreground; 11619 int chgs; 11620 11621 if ((bmsafemap->sm_state & IOSTARTED) == 0) 11622 panic("initiate_write_bmsafemap: Not started\n"); 11623 ump = VFSTOUFS(bmsafemap->sm_list.wk_mp); 11624 chgs = 0; 11625 bmsafemap->sm_state &= ~IOSTARTED; 11626 foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0; 11627 /* 11628 * Release journal work that was waiting on the write. 11629 */ 11630 handle_jwork(&bmsafemap->sm_freewr); 11631 11632 /* 11633 * Restore unwritten inode allocation pending jaddref writes. 11634 */ 11635 if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) { 11636 cgp = (struct cg *)bp->b_data; 11637 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11638 inosused = cg_inosused(cgp); 11639 LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd, 11640 ja_bmdeps, jatmp) { 11641 if ((jaddref->ja_state & UNDONE) == 0) 11642 continue; 11643 ino = jaddref->ja_ino % fs->fs_ipg; 11644 if (isset(inosused, ino)) 11645 panic("handle_written_bmsafemap: " 11646 "re-allocated inode"); 11647 /* Do the roll-forward only if it's a real copy. */ 11648 if (foreground) { 11649 if ((jaddref->ja_mode & IFMT) == IFDIR) 11650 cgp->cg_cs.cs_ndir++; 11651 cgp->cg_cs.cs_nifree--; 11652 setbit(inosused, ino); 11653 chgs = 1; 11654 } 11655 jaddref->ja_state &= ~UNDONE; 11656 jaddref->ja_state |= ATTACHED; 11657 free_jaddref(jaddref); 11658 } 11659 } 11660 /* 11661 * Restore any block allocations which are pending journal writes. 11662 */ 11663 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) { 11664 cgp = (struct cg *)bp->b_data; 11665 fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs; 11666 blksfree = cg_blksfree(cgp); 11667 LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps, 11668 jntmp) { 11669 if ((jnewblk->jn_state & UNDONE) == 0) 11670 continue; 11671 /* Do the roll-forward only if it's a real copy. */ 11672 if (foreground && 11673 jnewblk_rollforward(jnewblk, fs, cgp, blksfree)) 11674 chgs = 1; 11675 jnewblk->jn_state &= ~(UNDONE | NEWBLOCK); 11676 jnewblk->jn_state |= ATTACHED; 11677 free_jnewblk(jnewblk); 11678 } 11679 } 11680 while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) { 11681 newblk->nb_state |= DEPCOMPLETE; 11682 newblk->nb_state &= ~ONDEPLIST; 11683 newblk->nb_bmsafemap = NULL; 11684 LIST_REMOVE(newblk, nb_deps); 11685 if (newblk->nb_list.wk_type == D_ALLOCDIRECT) 11686 handle_allocdirect_partdone( 11687 WK_ALLOCDIRECT(&newblk->nb_list), NULL); 11688 else if (newblk->nb_list.wk_type == D_ALLOCINDIR) 11689 handle_allocindir_partdone( 11690 WK_ALLOCINDIR(&newblk->nb_list)); 11691 else if (newblk->nb_list.wk_type != D_NEWBLK) 11692 panic("handle_written_bmsafemap: Unexpected type: %s", 11693 TYPENAME(newblk->nb_list.wk_type)); 11694 } 11695 while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) { 11696 inodedep->id_state |= DEPCOMPLETE; 11697 inodedep->id_state &= ~ONDEPLIST; 11698 LIST_REMOVE(inodedep, id_deps); 11699 inodedep->id_bmsafemap = NULL; 11700 } 11701 LIST_REMOVE(bmsafemap, sm_next); 11702 if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) && 11703 LIST_EMPTY(&bmsafemap->sm_jnewblkhd) && 11704 LIST_EMPTY(&bmsafemap->sm_newblkhd) && 11705 LIST_EMPTY(&bmsafemap->sm_inodedephd) && 11706 LIST_EMPTY(&bmsafemap->sm_freehd)) { 11707 LIST_REMOVE(bmsafemap, sm_hash); 11708 WORKITEM_FREE(bmsafemap, D_BMSAFEMAP); 11709 return (0); 11710 } 11711 LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next); 11712 if (foreground) 11713 bdirty(bp); 11714 return (1); 11715} 11716 11717/* 11718 * Try to free a mkdir dependency. 11719 */ 11720static void 11721complete_mkdir(mkdir) 11722 struct mkdir *mkdir; 11723{ 11724 struct diradd *dap; 11725 11726 if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE) 11727 return; 11728 LIST_REMOVE(mkdir, md_mkdirs); 11729 dap = mkdir->md_diradd; 11730 dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)); 11731 if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) { 11732 dap->da_state |= DEPCOMPLETE; 11733 complete_diradd(dap); 11734 } 11735 WORKITEM_FREE(mkdir, D_MKDIR); 11736} 11737 11738/* 11739 * Handle the completion of a mkdir dependency. 11740 */ 11741static void 11742handle_written_mkdir(mkdir, type) 11743 struct mkdir *mkdir; 11744 int type; 11745{ 11746 11747 if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type) 11748 panic("handle_written_mkdir: bad type"); 11749 mkdir->md_state |= COMPLETE; 11750 complete_mkdir(mkdir); 11751} 11752 11753static int 11754free_pagedep(pagedep) 11755 struct pagedep *pagedep; 11756{ 11757 int i; 11758 11759 if (pagedep->pd_state & NEWBLOCK) 11760 return (0); 11761 if (!LIST_EMPTY(&pagedep->pd_dirremhd)) 11762 return (0); 11763 for (i = 0; i < DAHASHSZ; i++) 11764 if (!LIST_EMPTY(&pagedep->pd_diraddhd[i])) 11765 return (0); 11766 if (!LIST_EMPTY(&pagedep->pd_pendinghd)) 11767 return (0); 11768 if (!LIST_EMPTY(&pagedep->pd_jmvrefhd)) 11769 return (0); 11770 if (pagedep->pd_state & ONWORKLIST) 11771 WORKLIST_REMOVE(&pagedep->pd_list); 11772 LIST_REMOVE(pagedep, pd_hash); 11773 WORKITEM_FREE(pagedep, D_PAGEDEP); 11774 11775 return (1); 11776} 11777 11778/* 11779 * Called from within softdep_disk_write_complete above. 11780 * A write operation was just completed. Removed inodes can 11781 * now be freed and associated block pointers may be committed. 11782 * Note that this routine is always called from interrupt level 11783 * with further splbio interrupts blocked. 11784 */ 11785static int 11786handle_written_filepage(pagedep, bp) 11787 struct pagedep *pagedep; 11788 struct buf *bp; /* buffer containing the written page */ 11789{ 11790 struct dirrem *dirrem; 11791 struct diradd *dap, *nextdap; 11792 struct direct *ep; 11793 int i, chgs; 11794 11795 if ((pagedep->pd_state & IOSTARTED) == 0) 11796 panic("handle_written_filepage: not started"); 11797 pagedep->pd_state &= ~IOSTARTED; 11798 /* 11799 * Process any directory removals that have been committed. 11800 */ 11801 while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) { 11802 LIST_REMOVE(dirrem, dm_next); 11803 dirrem->dm_state |= COMPLETE; 11804 dirrem->dm_dirinum = pagedep->pd_ino; 11805 KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd), 11806 ("handle_written_filepage: Journal entries not written.")); 11807 add_to_worklist(&dirrem->dm_list, 0); 11808 } 11809 /* 11810 * Free any directory additions that have been committed. 11811 * If it is a newly allocated block, we have to wait until 11812 * the on-disk directory inode claims the new block. 11813 */ 11814 if ((pagedep->pd_state & NEWBLOCK) == 0) 11815 while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL) 11816 free_diradd(dap, NULL); 11817 /* 11818 * Uncommitted directory entries must be restored. 11819 */ 11820 for (chgs = 0, i = 0; i < DAHASHSZ; i++) { 11821 for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap; 11822 dap = nextdap) { 11823 nextdap = LIST_NEXT(dap, da_pdlist); 11824 if (dap->da_state & ATTACHED) 11825 panic("handle_written_filepage: attached"); 11826 ep = (struct direct *) 11827 ((char *)bp->b_data + dap->da_offset); 11828 ep->d_ino = dap->da_newinum; 11829 dap->da_state &= ~UNDONE; 11830 dap->da_state |= ATTACHED; 11831 chgs = 1; 11832 /* 11833 * If the inode referenced by the directory has 11834 * been written out, then the dependency can be 11835 * moved to the pending list. 11836 */ 11837 if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) { 11838 LIST_REMOVE(dap, da_pdlist); 11839 LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, 11840 da_pdlist); 11841 } 11842 } 11843 } 11844 /* 11845 * If there were any rollbacks in the directory, then it must be 11846 * marked dirty so that its will eventually get written back in 11847 * its correct form. 11848 */ 11849 if (chgs) { 11850 if ((bp->b_flags & B_DELWRI) == 0) 11851 stat_dir_entry++; 11852 bdirty(bp); 11853 return (1); 11854 } 11855 /* 11856 * If we are not waiting for a new directory block to be 11857 * claimed by its inode, then the pagedep will be freed. 11858 * Otherwise it will remain to track any new entries on 11859 * the page in case they are fsync'ed. 11860 */ 11861 free_pagedep(pagedep); 11862 return (0); 11863} 11864 11865/* 11866 * Writing back in-core inode structures. 11867 * 11868 * The filesystem only accesses an inode's contents when it occupies an 11869 * "in-core" inode structure. These "in-core" structures are separate from 11870 * the page frames used to cache inode blocks. Only the latter are 11871 * transferred to/from the disk. So, when the updated contents of the 11872 * "in-core" inode structure are copied to the corresponding in-memory inode 11873 * block, the dependencies are also transferred. The following procedure is 11874 * called when copying a dirty "in-core" inode to a cached inode block. 11875 */ 11876 11877/* 11878 * Called when an inode is loaded from disk. If the effective link count 11879 * differed from the actual link count when it was last flushed, then we 11880 * need to ensure that the correct effective link count is put back. 11881 */ 11882void 11883softdep_load_inodeblock(ip) 11884 struct inode *ip; /* the "in_core" copy of the inode */ 11885{ 11886 struct inodedep *inodedep; 11887 11888 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 11889 ("softdep_load_inodeblock called on non-softdep filesystem")); 11890 /* 11891 * Check for alternate nlink count. 11892 */ 11893 ip->i_effnlink = ip->i_nlink; 11894 ACQUIRE_LOCK(ip->i_ump); 11895 if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, 11896 &inodedep) == 0) { 11897 FREE_LOCK(ip->i_ump); 11898 return; 11899 } 11900 ip->i_effnlink -= inodedep->id_nlinkdelta; 11901 FREE_LOCK(ip->i_ump); 11902} 11903 11904/* 11905 * This routine is called just before the "in-core" inode 11906 * information is to be copied to the in-memory inode block. 11907 * Recall that an inode block contains several inodes. If 11908 * the force flag is set, then the dependencies will be 11909 * cleared so that the update can always be made. Note that 11910 * the buffer is locked when this routine is called, so we 11911 * will never be in the middle of writing the inode block 11912 * to disk. 11913 */ 11914void 11915softdep_update_inodeblock(ip, bp, waitfor) 11916 struct inode *ip; /* the "in_core" copy of the inode */ 11917 struct buf *bp; /* the buffer containing the inode block */ 11918 int waitfor; /* nonzero => update must be allowed */ 11919{ 11920 struct inodedep *inodedep; 11921 struct inoref *inoref; 11922 struct ufsmount *ump; 11923 struct worklist *wk; 11924 struct mount *mp; 11925 struct buf *ibp; 11926 struct fs *fs; 11927 int error; 11928 11929 ump = ip->i_ump; 11930 mp = UFSTOVFS(ump); 11931 KASSERT(MOUNTEDSOFTDEP(mp) != 0, 11932 ("softdep_update_inodeblock called on non-softdep filesystem")); 11933 fs = ip->i_fs; 11934 /* 11935 * Preserve the freelink that is on disk. clear_unlinked_inodedep() 11936 * does not have access to the in-core ip so must write directly into 11937 * the inode block buffer when setting freelink. 11938 */ 11939 if (fs->fs_magic == FS_UFS1_MAGIC) 11940 DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data + 11941 ino_to_fsbo(fs, ip->i_number))->di_freelink); 11942 else 11943 DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data + 11944 ino_to_fsbo(fs, ip->i_number))->di_freelink); 11945 /* 11946 * If the effective link count is not equal to the actual link 11947 * count, then we must track the difference in an inodedep while 11948 * the inode is (potentially) tossed out of the cache. Otherwise, 11949 * if there is no existing inodedep, then there are no dependencies 11950 * to track. 11951 */ 11952 ACQUIRE_LOCK(ump); 11953again: 11954 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 11955 FREE_LOCK(ump); 11956 if (ip->i_effnlink != ip->i_nlink) 11957 panic("softdep_update_inodeblock: bad link count"); 11958 return; 11959 } 11960 if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink) 11961 panic("softdep_update_inodeblock: bad delta"); 11962 /* 11963 * If we're flushing all dependencies we must also move any waiting 11964 * for journal writes onto the bufwait list prior to I/O. 11965 */ 11966 if (waitfor) { 11967 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 11968 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 11969 == DEPCOMPLETE) { 11970 jwait(&inoref->if_list, MNT_WAIT); 11971 goto again; 11972 } 11973 } 11974 } 11975 /* 11976 * Changes have been initiated. Anything depending on these 11977 * changes cannot occur until this inode has been written. 11978 */ 11979 inodedep->id_state &= ~COMPLETE; 11980 if ((inodedep->id_state & ONWORKLIST) == 0) 11981 WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list); 11982 /* 11983 * Any new dependencies associated with the incore inode must 11984 * now be moved to the list associated with the buffer holding 11985 * the in-memory copy of the inode. Once merged process any 11986 * allocdirects that are completed by the merger. 11987 */ 11988 merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt); 11989 if (!TAILQ_EMPTY(&inodedep->id_inoupdt)) 11990 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt), 11991 NULL); 11992 merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt); 11993 if (!TAILQ_EMPTY(&inodedep->id_extupdt)) 11994 handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt), 11995 NULL); 11996 /* 11997 * Now that the inode has been pushed into the buffer, the 11998 * operations dependent on the inode being written to disk 11999 * can be moved to the id_bufwait so that they will be 12000 * processed when the buffer I/O completes. 12001 */ 12002 while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) { 12003 WORKLIST_REMOVE(wk); 12004 WORKLIST_INSERT(&inodedep->id_bufwait, wk); 12005 } 12006 /* 12007 * Newly allocated inodes cannot be written until the bitmap 12008 * that allocates them have been written (indicated by 12009 * DEPCOMPLETE being set in id_state). If we are doing a 12010 * forced sync (e.g., an fsync on a file), we force the bitmap 12011 * to be written so that the update can be done. 12012 */ 12013 if (waitfor == 0) { 12014 FREE_LOCK(ump); 12015 return; 12016 } 12017retry: 12018 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) { 12019 FREE_LOCK(ump); 12020 return; 12021 } 12022 ibp = inodedep->id_bmsafemap->sm_buf; 12023 ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT); 12024 if (ibp == NULL) { 12025 /* 12026 * If ibp came back as NULL, the dependency could have been 12027 * freed while we slept. Look it up again, and check to see 12028 * that it has completed. 12029 */ 12030 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) 12031 goto retry; 12032 FREE_LOCK(ump); 12033 return; 12034 } 12035 FREE_LOCK(ump); 12036 if ((error = bwrite(ibp)) != 0) 12037 softdep_error("softdep_update_inodeblock: bwrite", error); 12038} 12039 12040/* 12041 * Merge the a new inode dependency list (such as id_newinoupdt) into an 12042 * old inode dependency list (such as id_inoupdt). This routine must be 12043 * called with splbio interrupts blocked. 12044 */ 12045static void 12046merge_inode_lists(newlisthead, oldlisthead) 12047 struct allocdirectlst *newlisthead; 12048 struct allocdirectlst *oldlisthead; 12049{ 12050 struct allocdirect *listadp, *newadp; 12051 12052 newadp = TAILQ_FIRST(newlisthead); 12053 for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) { 12054 if (listadp->ad_offset < newadp->ad_offset) { 12055 listadp = TAILQ_NEXT(listadp, ad_next); 12056 continue; 12057 } 12058 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12059 TAILQ_INSERT_BEFORE(listadp, newadp, ad_next); 12060 if (listadp->ad_offset == newadp->ad_offset) { 12061 allocdirect_merge(oldlisthead, newadp, 12062 listadp); 12063 listadp = newadp; 12064 } 12065 newadp = TAILQ_FIRST(newlisthead); 12066 } 12067 while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) { 12068 TAILQ_REMOVE(newlisthead, newadp, ad_next); 12069 TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next); 12070 } 12071} 12072 12073/* 12074 * If we are doing an fsync, then we must ensure that any directory 12075 * entries for the inode have been written after the inode gets to disk. 12076 */ 12077int 12078softdep_fsync(vp) 12079 struct vnode *vp; /* the "in_core" copy of the inode */ 12080{ 12081 struct inodedep *inodedep; 12082 struct pagedep *pagedep; 12083 struct inoref *inoref; 12084 struct ufsmount *ump; 12085 struct worklist *wk; 12086 struct diradd *dap; 12087 struct mount *mp; 12088 struct vnode *pvp; 12089 struct inode *ip; 12090 struct buf *bp; 12091 struct fs *fs; 12092 struct thread *td = curthread; 12093 int error, flushparent, pagedep_new_block; 12094 ino_t parentino; 12095 ufs_lbn_t lbn; 12096 12097 ip = VTOI(vp); 12098 fs = ip->i_fs; 12099 ump = ip->i_ump; 12100 mp = vp->v_mount; 12101 if (MOUNTEDSOFTDEP(mp) == 0) 12102 return (0); 12103 ACQUIRE_LOCK(ump); 12104restart: 12105 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) { 12106 FREE_LOCK(ump); 12107 return (0); 12108 } 12109 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12110 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12111 == DEPCOMPLETE) { 12112 jwait(&inoref->if_list, MNT_WAIT); 12113 goto restart; 12114 } 12115 } 12116 if (!LIST_EMPTY(&inodedep->id_inowait) || 12117 !TAILQ_EMPTY(&inodedep->id_extupdt) || 12118 !TAILQ_EMPTY(&inodedep->id_newextupdt) || 12119 !TAILQ_EMPTY(&inodedep->id_inoupdt) || 12120 !TAILQ_EMPTY(&inodedep->id_newinoupdt)) 12121 panic("softdep_fsync: pending ops %p", inodedep); 12122 for (error = 0, flushparent = 0; ; ) { 12123 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL) 12124 break; 12125 if (wk->wk_type != D_DIRADD) 12126 panic("softdep_fsync: Unexpected type %s", 12127 TYPENAME(wk->wk_type)); 12128 dap = WK_DIRADD(wk); 12129 /* 12130 * Flush our parent if this directory entry has a MKDIR_PARENT 12131 * dependency or is contained in a newly allocated block. 12132 */ 12133 if (dap->da_state & DIRCHG) 12134 pagedep = dap->da_previous->dm_pagedep; 12135 else 12136 pagedep = dap->da_pagedep; 12137 parentino = pagedep->pd_ino; 12138 lbn = pagedep->pd_lbn; 12139 if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE) 12140 panic("softdep_fsync: dirty"); 12141 if ((dap->da_state & MKDIR_PARENT) || 12142 (pagedep->pd_state & NEWBLOCK)) 12143 flushparent = 1; 12144 else 12145 flushparent = 0; 12146 /* 12147 * If we are being fsync'ed as part of vgone'ing this vnode, 12148 * then we will not be able to release and recover the 12149 * vnode below, so we just have to give up on writing its 12150 * directory entry out. It will eventually be written, just 12151 * not now, but then the user was not asking to have it 12152 * written, so we are not breaking any promises. 12153 */ 12154 if (vp->v_iflag & VI_DOOMED) 12155 break; 12156 /* 12157 * We prevent deadlock by always fetching inodes from the 12158 * root, moving down the directory tree. Thus, when fetching 12159 * our parent directory, we first try to get the lock. If 12160 * that fails, we must unlock ourselves before requesting 12161 * the lock on our parent. See the comment in ufs_lookup 12162 * for details on possible races. 12163 */ 12164 FREE_LOCK(ump); 12165 if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp, 12166 FFSV_FORCEINSMQ)) { 12167 error = vfs_busy(mp, MBF_NOWAIT); 12168 if (error != 0) { 12169 vfs_ref(mp); 12170 VOP_UNLOCK(vp, 0); 12171 error = vfs_busy(mp, 0); 12172 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 12173 vfs_rel(mp); 12174 if (error != 0) 12175 return (ENOENT); 12176 if (vp->v_iflag & VI_DOOMED) { 12177 vfs_unbusy(mp); 12178 return (ENOENT); 12179 } 12180 } 12181 VOP_UNLOCK(vp, 0); 12182 error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, 12183 &pvp, FFSV_FORCEINSMQ); 12184 vfs_unbusy(mp); 12185 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 12186 if (vp->v_iflag & VI_DOOMED) { 12187 if (error == 0) 12188 vput(pvp); 12189 error = ENOENT; 12190 } 12191 if (error != 0) 12192 return (error); 12193 } 12194 /* 12195 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps 12196 * that are contained in direct blocks will be resolved by 12197 * doing a ffs_update. Pagedeps contained in indirect blocks 12198 * may require a complete sync'ing of the directory. So, we 12199 * try the cheap and fast ffs_update first, and if that fails, 12200 * then we do the slower ffs_syncvnode of the directory. 12201 */ 12202 if (flushparent) { 12203 int locked; 12204 12205 if ((error = ffs_update(pvp, 1)) != 0) { 12206 vput(pvp); 12207 return (error); 12208 } 12209 ACQUIRE_LOCK(ump); 12210 locked = 1; 12211 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) { 12212 if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) { 12213 if (wk->wk_type != D_DIRADD) 12214 panic("softdep_fsync: Unexpected type %s", 12215 TYPENAME(wk->wk_type)); 12216 dap = WK_DIRADD(wk); 12217 if (dap->da_state & DIRCHG) 12218 pagedep = dap->da_previous->dm_pagedep; 12219 else 12220 pagedep = dap->da_pagedep; 12221 pagedep_new_block = pagedep->pd_state & NEWBLOCK; 12222 FREE_LOCK(ump); 12223 locked = 0; 12224 if (pagedep_new_block && (error = 12225 ffs_syncvnode(pvp, MNT_WAIT, 0))) { 12226 vput(pvp); 12227 return (error); 12228 } 12229 } 12230 } 12231 if (locked) 12232 FREE_LOCK(ump); 12233 } 12234 /* 12235 * Flush directory page containing the inode's name. 12236 */ 12237 error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred, 12238 &bp); 12239 if (error == 0) 12240 error = bwrite(bp); 12241 else 12242 brelse(bp); 12243 vput(pvp); 12244 if (error != 0) 12245 return (error); 12246 ACQUIRE_LOCK(ump); 12247 if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) 12248 break; 12249 } 12250 FREE_LOCK(ump); 12251 return (0); 12252} 12253 12254/* 12255 * Flush all the dirty bitmaps associated with the block device 12256 * before flushing the rest of the dirty blocks so as to reduce 12257 * the number of dependencies that will have to be rolled back. 12258 * 12259 * XXX Unused? 12260 */ 12261void 12262softdep_fsync_mountdev(vp) 12263 struct vnode *vp; 12264{ 12265 struct buf *bp, *nbp; 12266 struct worklist *wk; 12267 struct bufobj *bo; 12268 12269 if (!vn_isdisk(vp, NULL)) 12270 panic("softdep_fsync_mountdev: vnode not a disk"); 12271 bo = &vp->v_bufobj; 12272restart: 12273 BO_LOCK(bo); 12274 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { 12275 /* 12276 * If it is already scheduled, skip to the next buffer. 12277 */ 12278 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) 12279 continue; 12280 12281 if ((bp->b_flags & B_DELWRI) == 0) 12282 panic("softdep_fsync_mountdev: not dirty"); 12283 /* 12284 * We are only interested in bitmaps with outstanding 12285 * dependencies. 12286 */ 12287 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL || 12288 wk->wk_type != D_BMSAFEMAP || 12289 (bp->b_vflags & BV_BKGRDINPROG)) { 12290 BUF_UNLOCK(bp); 12291 continue; 12292 } 12293 BO_UNLOCK(bo); 12294 bremfree(bp); 12295 (void) bawrite(bp); 12296 goto restart; 12297 } 12298 drain_output(vp); 12299 BO_UNLOCK(bo); 12300} 12301 12302/* 12303 * Sync all cylinder groups that were dirty at the time this function is 12304 * called. Newly dirtied cgs will be inserted before the sentinel. This 12305 * is used to flush freedep activity that may be holding up writes to a 12306 * indirect block. 12307 */ 12308static int 12309sync_cgs(mp, waitfor) 12310 struct mount *mp; 12311 int waitfor; 12312{ 12313 struct bmsafemap *bmsafemap; 12314 struct bmsafemap *sentinel; 12315 struct ufsmount *ump; 12316 struct buf *bp; 12317 int error; 12318 12319 sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK); 12320 sentinel->sm_cg = -1; 12321 ump = VFSTOUFS(mp); 12322 error = 0; 12323 ACQUIRE_LOCK(ump); 12324 LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next); 12325 for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL; 12326 bmsafemap = LIST_NEXT(sentinel, sm_next)) { 12327 /* Skip sentinels and cgs with no work to release. */ 12328 if (bmsafemap->sm_cg == -1 || 12329 (LIST_EMPTY(&bmsafemap->sm_freehd) && 12330 LIST_EMPTY(&bmsafemap->sm_freewr))) { 12331 LIST_REMOVE(sentinel, sm_next); 12332 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12333 continue; 12334 } 12335 /* 12336 * If we don't get the lock and we're waiting try again, if 12337 * not move on to the next buf and try to sync it. 12338 */ 12339 bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor); 12340 if (bp == NULL && waitfor == MNT_WAIT) 12341 continue; 12342 LIST_REMOVE(sentinel, sm_next); 12343 LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next); 12344 if (bp == NULL) 12345 continue; 12346 FREE_LOCK(ump); 12347 if (waitfor == MNT_NOWAIT) 12348 bawrite(bp); 12349 else 12350 error = bwrite(bp); 12351 ACQUIRE_LOCK(ump); 12352 if (error) 12353 break; 12354 } 12355 LIST_REMOVE(sentinel, sm_next); 12356 FREE_LOCK(ump); 12357 free(sentinel, M_BMSAFEMAP); 12358 return (error); 12359} 12360 12361/* 12362 * This routine is called when we are trying to synchronously flush a 12363 * file. This routine must eliminate any filesystem metadata dependencies 12364 * so that the syncing routine can succeed. 12365 */ 12366int 12367softdep_sync_metadata(struct vnode *vp) 12368{ 12369 struct inode *ip; 12370 int error; 12371 12372 ip = VTOI(vp); 12373 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 12374 ("softdep_sync_metadata called on non-softdep filesystem")); 12375 /* 12376 * Ensure that any direct block dependencies have been cleared, 12377 * truncations are started, and inode references are journaled. 12378 */ 12379 ACQUIRE_LOCK(ip->i_ump); 12380 /* 12381 * Write all journal records to prevent rollbacks on devvp. 12382 */ 12383 if (vp->v_type == VCHR) 12384 softdep_flushjournal(vp->v_mount); 12385 error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number); 12386 /* 12387 * Ensure that all truncates are written so we won't find deps on 12388 * indirect blocks. 12389 */ 12390 process_truncates(vp); 12391 FREE_LOCK(ip->i_ump); 12392 12393 return (error); 12394} 12395 12396/* 12397 * This routine is called when we are attempting to sync a buf with 12398 * dependencies. If waitfor is MNT_NOWAIT it attempts to schedule any 12399 * other IO it can but returns EBUSY if the buffer is not yet able to 12400 * be written. Dependencies which will not cause rollbacks will always 12401 * return 0. 12402 */ 12403int 12404softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor) 12405{ 12406 struct indirdep *indirdep; 12407 struct pagedep *pagedep; 12408 struct allocindir *aip; 12409 struct newblk *newblk; 12410 struct ufsmount *ump; 12411 struct buf *nbp; 12412 struct worklist *wk; 12413 int i, error; 12414 12415 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 12416 ("softdep_sync_buf called on non-softdep filesystem")); 12417 /* 12418 * For VCHR we just don't want to force flush any dependencies that 12419 * will cause rollbacks. 12420 */ 12421 if (vp->v_type == VCHR) { 12422 if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0)) 12423 return (EBUSY); 12424 return (0); 12425 } 12426 ump = VTOI(vp)->i_ump; 12427 ACQUIRE_LOCK(ump); 12428 /* 12429 * As we hold the buffer locked, none of its dependencies 12430 * will disappear. 12431 */ 12432 error = 0; 12433top: 12434 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 12435 switch (wk->wk_type) { 12436 12437 case D_ALLOCDIRECT: 12438 case D_ALLOCINDIR: 12439 newblk = WK_NEWBLK(wk); 12440 if (newblk->nb_jnewblk != NULL) { 12441 if (waitfor == MNT_NOWAIT) { 12442 error = EBUSY; 12443 goto out_unlock; 12444 } 12445 jwait(&newblk->nb_jnewblk->jn_list, waitfor); 12446 goto top; 12447 } 12448 if (newblk->nb_state & DEPCOMPLETE || 12449 waitfor == MNT_NOWAIT) 12450 continue; 12451 nbp = newblk->nb_bmsafemap->sm_buf; 12452 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12453 if (nbp == NULL) 12454 goto top; 12455 FREE_LOCK(ump); 12456 if ((error = bwrite(nbp)) != 0) 12457 goto out; 12458 ACQUIRE_LOCK(ump); 12459 continue; 12460 12461 case D_INDIRDEP: 12462 indirdep = WK_INDIRDEP(wk); 12463 if (waitfor == MNT_NOWAIT) { 12464 if (!TAILQ_EMPTY(&indirdep->ir_trunc) || 12465 !LIST_EMPTY(&indirdep->ir_deplisthd)) { 12466 error = EBUSY; 12467 goto out_unlock; 12468 } 12469 } 12470 if (!TAILQ_EMPTY(&indirdep->ir_trunc)) 12471 panic("softdep_sync_buf: truncation pending."); 12472 restart: 12473 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 12474 newblk = (struct newblk *)aip; 12475 if (newblk->nb_jnewblk != NULL) { 12476 jwait(&newblk->nb_jnewblk->jn_list, 12477 waitfor); 12478 goto restart; 12479 } 12480 if (newblk->nb_state & DEPCOMPLETE) 12481 continue; 12482 nbp = newblk->nb_bmsafemap->sm_buf; 12483 nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor); 12484 if (nbp == NULL) 12485 goto restart; 12486 FREE_LOCK(ump); 12487 if ((error = bwrite(nbp)) != 0) 12488 goto out; 12489 ACQUIRE_LOCK(ump); 12490 goto restart; 12491 } 12492 continue; 12493 12494 case D_PAGEDEP: 12495 /* 12496 * Only flush directory entries in synchronous passes. 12497 */ 12498 if (waitfor != MNT_WAIT) { 12499 error = EBUSY; 12500 goto out_unlock; 12501 } 12502 /* 12503 * While syncing snapshots, we must allow recursive 12504 * lookups. 12505 */ 12506 BUF_AREC(bp); 12507 /* 12508 * We are trying to sync a directory that may 12509 * have dependencies on both its own metadata 12510 * and/or dependencies on the inodes of any 12511 * recently allocated files. We walk its diradd 12512 * lists pushing out the associated inode. 12513 */ 12514 pagedep = WK_PAGEDEP(wk); 12515 for (i = 0; i < DAHASHSZ; i++) { 12516 if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0) 12517 continue; 12518 if ((error = flush_pagedep_deps(vp, wk->wk_mp, 12519 &pagedep->pd_diraddhd[i]))) { 12520 BUF_NOREC(bp); 12521 goto out_unlock; 12522 } 12523 } 12524 BUF_NOREC(bp); 12525 continue; 12526 12527 case D_FREEWORK: 12528 case D_FREEDEP: 12529 case D_JSEGDEP: 12530 case D_JNEWBLK: 12531 continue; 12532 12533 default: 12534 panic("softdep_sync_buf: Unknown type %s", 12535 TYPENAME(wk->wk_type)); 12536 /* NOTREACHED */ 12537 } 12538 } 12539out_unlock: 12540 FREE_LOCK(ump); 12541out: 12542 return (error); 12543} 12544 12545/* 12546 * Flush the dependencies associated with an inodedep. 12547 * Called with splbio blocked. 12548 */ 12549static int 12550flush_inodedep_deps(vp, mp, ino) 12551 struct vnode *vp; 12552 struct mount *mp; 12553 ino_t ino; 12554{ 12555 struct inodedep *inodedep; 12556 struct inoref *inoref; 12557 struct ufsmount *ump; 12558 int error, waitfor; 12559 12560 /* 12561 * This work is done in two passes. The first pass grabs most 12562 * of the buffers and begins asynchronously writing them. The 12563 * only way to wait for these asynchronous writes is to sleep 12564 * on the filesystem vnode which may stay busy for a long time 12565 * if the filesystem is active. So, instead, we make a second 12566 * pass over the dependencies blocking on each write. In the 12567 * usual case we will be blocking against a write that we 12568 * initiated, so when it is done the dependency will have been 12569 * resolved. Thus the second pass is expected to end quickly. 12570 * We give a brief window at the top of the loop to allow 12571 * any pending I/O to complete. 12572 */ 12573 ump = VFSTOUFS(mp); 12574 LOCK_OWNED(ump); 12575 for (error = 0, waitfor = MNT_NOWAIT; ; ) { 12576 if (error) 12577 return (error); 12578 FREE_LOCK(ump); 12579 ACQUIRE_LOCK(ump); 12580restart: 12581 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 12582 return (0); 12583 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12584 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12585 == DEPCOMPLETE) { 12586 jwait(&inoref->if_list, MNT_WAIT); 12587 goto restart; 12588 } 12589 } 12590 if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) || 12591 flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) || 12592 flush_deplist(&inodedep->id_extupdt, waitfor, &error) || 12593 flush_deplist(&inodedep->id_newextupdt, waitfor, &error)) 12594 continue; 12595 /* 12596 * If pass2, we are done, otherwise do pass 2. 12597 */ 12598 if (waitfor == MNT_WAIT) 12599 break; 12600 waitfor = MNT_WAIT; 12601 } 12602 /* 12603 * Try freeing inodedep in case all dependencies have been removed. 12604 */ 12605 if (inodedep_lookup(mp, ino, 0, &inodedep) != 0) 12606 (void) free_inodedep(inodedep); 12607 return (0); 12608} 12609 12610/* 12611 * Flush an inode dependency list. 12612 * Called with splbio blocked. 12613 */ 12614static int 12615flush_deplist(listhead, waitfor, errorp) 12616 struct allocdirectlst *listhead; 12617 int waitfor; 12618 int *errorp; 12619{ 12620 struct allocdirect *adp; 12621 struct newblk *newblk; 12622 struct ufsmount *ump; 12623 struct buf *bp; 12624 12625 if ((adp = TAILQ_FIRST(listhead)) == NULL) 12626 return (0); 12627 ump = VFSTOUFS(adp->ad_list.wk_mp); 12628 LOCK_OWNED(ump); 12629 TAILQ_FOREACH(adp, listhead, ad_next) { 12630 newblk = (struct newblk *)adp; 12631 if (newblk->nb_jnewblk != NULL) { 12632 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12633 return (1); 12634 } 12635 if (newblk->nb_state & DEPCOMPLETE) 12636 continue; 12637 bp = newblk->nb_bmsafemap->sm_buf; 12638 bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor); 12639 if (bp == NULL) { 12640 if (waitfor == MNT_NOWAIT) 12641 continue; 12642 return (1); 12643 } 12644 FREE_LOCK(ump); 12645 if (waitfor == MNT_NOWAIT) 12646 bawrite(bp); 12647 else 12648 *errorp = bwrite(bp); 12649 ACQUIRE_LOCK(ump); 12650 return (1); 12651 } 12652 return (0); 12653} 12654 12655/* 12656 * Flush dependencies associated with an allocdirect block. 12657 */ 12658static int 12659flush_newblk_dep(vp, mp, lbn) 12660 struct vnode *vp; 12661 struct mount *mp; 12662 ufs_lbn_t lbn; 12663{ 12664 struct newblk *newblk; 12665 struct ufsmount *ump; 12666 struct bufobj *bo; 12667 struct inode *ip; 12668 struct buf *bp; 12669 ufs2_daddr_t blkno; 12670 int error; 12671 12672 error = 0; 12673 bo = &vp->v_bufobj; 12674 ip = VTOI(vp); 12675 blkno = DIP(ip, i_db[lbn]); 12676 if (blkno == 0) 12677 panic("flush_newblk_dep: Missing block"); 12678 ump = VFSTOUFS(mp); 12679 ACQUIRE_LOCK(ump); 12680 /* 12681 * Loop until all dependencies related to this block are satisfied. 12682 * We must be careful to restart after each sleep in case a write 12683 * completes some part of this process for us. 12684 */ 12685 for (;;) { 12686 if (newblk_lookup(mp, blkno, 0, &newblk) == 0) { 12687 FREE_LOCK(ump); 12688 break; 12689 } 12690 if (newblk->nb_list.wk_type != D_ALLOCDIRECT) 12691 panic("flush_newblk_deps: Bad newblk %p", newblk); 12692 /* 12693 * Flush the journal. 12694 */ 12695 if (newblk->nb_jnewblk != NULL) { 12696 jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT); 12697 continue; 12698 } 12699 /* 12700 * Write the bitmap dependency. 12701 */ 12702 if ((newblk->nb_state & DEPCOMPLETE) == 0) { 12703 bp = newblk->nb_bmsafemap->sm_buf; 12704 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 12705 if (bp == NULL) 12706 continue; 12707 FREE_LOCK(ump); 12708 error = bwrite(bp); 12709 if (error) 12710 break; 12711 ACQUIRE_LOCK(ump); 12712 continue; 12713 } 12714 /* 12715 * Write the buffer. 12716 */ 12717 FREE_LOCK(ump); 12718 BO_LOCK(bo); 12719 bp = gbincore(bo, lbn); 12720 if (bp != NULL) { 12721 error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | 12722 LK_INTERLOCK, BO_LOCKPTR(bo)); 12723 if (error == ENOLCK) { 12724 ACQUIRE_LOCK(ump); 12725 continue; /* Slept, retry */ 12726 } 12727 if (error != 0) 12728 break; /* Failed */ 12729 if (bp->b_flags & B_DELWRI) { 12730 bremfree(bp); 12731 error = bwrite(bp); 12732 if (error) 12733 break; 12734 } else 12735 BUF_UNLOCK(bp); 12736 } else 12737 BO_UNLOCK(bo); 12738 /* 12739 * We have to wait for the direct pointers to 12740 * point at the newdirblk before the dependency 12741 * will go away. 12742 */ 12743 error = ffs_update(vp, 1); 12744 if (error) 12745 break; 12746 ACQUIRE_LOCK(ump); 12747 } 12748 return (error); 12749} 12750 12751/* 12752 * Eliminate a pagedep dependency by flushing out all its diradd dependencies. 12753 * Called with splbio blocked. 12754 */ 12755static int 12756flush_pagedep_deps(pvp, mp, diraddhdp) 12757 struct vnode *pvp; 12758 struct mount *mp; 12759 struct diraddhd *diraddhdp; 12760{ 12761 struct inodedep *inodedep; 12762 struct inoref *inoref; 12763 struct ufsmount *ump; 12764 struct diradd *dap; 12765 struct vnode *vp; 12766 int error = 0; 12767 struct buf *bp; 12768 ino_t inum; 12769 struct diraddhd unfinished; 12770 12771 LIST_INIT(&unfinished); 12772 ump = VFSTOUFS(mp); 12773 LOCK_OWNED(ump); 12774restart: 12775 while ((dap = LIST_FIRST(diraddhdp)) != NULL) { 12776 /* 12777 * Flush ourselves if this directory entry 12778 * has a MKDIR_PARENT dependency. 12779 */ 12780 if (dap->da_state & MKDIR_PARENT) { 12781 FREE_LOCK(ump); 12782 if ((error = ffs_update(pvp, 1)) != 0) 12783 break; 12784 ACQUIRE_LOCK(ump); 12785 /* 12786 * If that cleared dependencies, go on to next. 12787 */ 12788 if (dap != LIST_FIRST(diraddhdp)) 12789 continue; 12790 /* 12791 * All MKDIR_PARENT dependencies and all the 12792 * NEWBLOCK pagedeps that are contained in direct 12793 * blocks were resolved by doing above ffs_update. 12794 * Pagedeps contained in indirect blocks may 12795 * require a complete sync'ing of the directory. 12796 * We are in the midst of doing a complete sync, 12797 * so if they are not resolved in this pass we 12798 * defer them for now as they will be sync'ed by 12799 * our caller shortly. 12800 */ 12801 LIST_REMOVE(dap, da_pdlist); 12802 LIST_INSERT_HEAD(&unfinished, dap, da_pdlist); 12803 continue; 12804 } 12805 /* 12806 * A newly allocated directory must have its "." and 12807 * ".." entries written out before its name can be 12808 * committed in its parent. 12809 */ 12810 inum = dap->da_newinum; 12811 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12812 panic("flush_pagedep_deps: lost inode1"); 12813 /* 12814 * Wait for any pending journal adds to complete so we don't 12815 * cause rollbacks while syncing. 12816 */ 12817 TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) { 12818 if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY)) 12819 == DEPCOMPLETE) { 12820 jwait(&inoref->if_list, MNT_WAIT); 12821 goto restart; 12822 } 12823 } 12824 if (dap->da_state & MKDIR_BODY) { 12825 FREE_LOCK(ump); 12826 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 12827 FFSV_FORCEINSMQ))) 12828 break; 12829 error = flush_newblk_dep(vp, mp, 0); 12830 /* 12831 * If we still have the dependency we might need to 12832 * update the vnode to sync the new link count to 12833 * disk. 12834 */ 12835 if (error == 0 && dap == LIST_FIRST(diraddhdp)) 12836 error = ffs_update(vp, 1); 12837 vput(vp); 12838 if (error != 0) 12839 break; 12840 ACQUIRE_LOCK(ump); 12841 /* 12842 * If that cleared dependencies, go on to next. 12843 */ 12844 if (dap != LIST_FIRST(diraddhdp)) 12845 continue; 12846 if (dap->da_state & MKDIR_BODY) { 12847 inodedep_lookup(UFSTOVFS(ump), inum, 0, 12848 &inodedep); 12849 panic("flush_pagedep_deps: MKDIR_BODY " 12850 "inodedep %p dap %p vp %p", 12851 inodedep, dap, vp); 12852 } 12853 } 12854 /* 12855 * Flush the inode on which the directory entry depends. 12856 * Having accounted for MKDIR_PARENT and MKDIR_BODY above, 12857 * the only remaining dependency is that the updated inode 12858 * count must get pushed to disk. The inode has already 12859 * been pushed into its inode buffer (via VOP_UPDATE) at 12860 * the time of the reference count change. So we need only 12861 * locate that buffer, ensure that there will be no rollback 12862 * caused by a bitmap dependency, then write the inode buffer. 12863 */ 12864retry: 12865 if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0) 12866 panic("flush_pagedep_deps: lost inode"); 12867 /* 12868 * If the inode still has bitmap dependencies, 12869 * push them to disk. 12870 */ 12871 if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) { 12872 bp = inodedep->id_bmsafemap->sm_buf; 12873 bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT); 12874 if (bp == NULL) 12875 goto retry; 12876 FREE_LOCK(ump); 12877 if ((error = bwrite(bp)) != 0) 12878 break; 12879 ACQUIRE_LOCK(ump); 12880 if (dap != LIST_FIRST(diraddhdp)) 12881 continue; 12882 } 12883 /* 12884 * If the inode is still sitting in a buffer waiting 12885 * to be written or waiting for the link count to be 12886 * adjusted update it here to flush it to disk. 12887 */ 12888 if (dap == LIST_FIRST(diraddhdp)) { 12889 FREE_LOCK(ump); 12890 if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp, 12891 FFSV_FORCEINSMQ))) 12892 break; 12893 error = ffs_update(vp, 1); 12894 vput(vp); 12895 if (error) 12896 break; 12897 ACQUIRE_LOCK(ump); 12898 } 12899 /* 12900 * If we have failed to get rid of all the dependencies 12901 * then something is seriously wrong. 12902 */ 12903 if (dap == LIST_FIRST(diraddhdp)) { 12904 inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep); 12905 panic("flush_pagedep_deps: failed to flush " 12906 "inodedep %p ino %ju dap %p", 12907 inodedep, (uintmax_t)inum, dap); 12908 } 12909 } 12910 if (error) 12911 ACQUIRE_LOCK(ump); 12912 while ((dap = LIST_FIRST(&unfinished)) != NULL) { 12913 LIST_REMOVE(dap, da_pdlist); 12914 LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist); 12915 } 12916 return (error); 12917} 12918 12919/* 12920 * A large burst of file addition or deletion activity can drive the 12921 * memory load excessively high. First attempt to slow things down 12922 * using the techniques below. If that fails, this routine requests 12923 * the offending operations to fall back to running synchronously 12924 * until the memory load returns to a reasonable level. 12925 */ 12926int 12927softdep_slowdown(vp) 12928 struct vnode *vp; 12929{ 12930 struct ufsmount *ump; 12931 int jlow; 12932 int max_softdeps_hard; 12933 12934 KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0, 12935 ("softdep_slowdown called on non-softdep filesystem")); 12936 ump = VFSTOUFS(vp->v_mount); 12937 ACQUIRE_LOCK(ump); 12938 jlow = 0; 12939 /* 12940 * Check for journal space if needed. 12941 */ 12942 if (DOINGSUJ(vp)) { 12943 if (journal_space(ump, 0) == 0) 12944 jlow = 1; 12945 } 12946 max_softdeps_hard = max_softdeps * 11 / 10; 12947 if (dep_current[D_DIRREM] < max_softdeps_hard / 2 && 12948 dep_current[D_INODEDEP] < max_softdeps_hard && 12949 VFSTOUFS(vp->v_mount)->softdep_numindirdeps < maxindirdeps && 12950 dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0) { 12951 FREE_LOCK(ump); 12952 return (0); 12953 } 12954 if (VFSTOUFS(vp->v_mount)->softdep_numindirdeps >= maxindirdeps || jlow) 12955 softdep_speedup(); 12956 stat_sync_limit_hit += 1; 12957 FREE_LOCK(ump); 12958 if (DOINGSUJ(vp)) 12959 return (0); 12960 return (1); 12961} 12962 12963/* 12964 * Called by the allocation routines when they are about to fail 12965 * in the hope that we can free up the requested resource (inodes 12966 * or disk space). 12967 * 12968 * First check to see if the work list has anything on it. If it has, 12969 * clean up entries until we successfully free the requested resource. 12970 * Because this process holds inodes locked, we cannot handle any remove 12971 * requests that might block on a locked inode as that could lead to 12972 * deadlock. If the worklist yields none of the requested resource, 12973 * start syncing out vnodes to free up the needed space. 12974 */ 12975int 12976softdep_request_cleanup(fs, vp, cred, resource) 12977 struct fs *fs; 12978 struct vnode *vp; 12979 struct ucred *cred; 12980 int resource; 12981{ 12982 struct ufsmount *ump; 12983 struct mount *mp; 12984 struct vnode *lvp, *mvp; 12985 long starttime; 12986 ufs2_daddr_t needed; 12987 int error; 12988 12989 /* 12990 * If we are being called because of a process doing a 12991 * copy-on-write, then it is not safe to process any 12992 * worklist items as we will recurse into the copyonwrite 12993 * routine. This will result in an incoherent snapshot. 12994 * If the vnode that we hold is a snapshot, we must avoid 12995 * handling other resources that could cause deadlock. 12996 */ 12997 if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp))) 12998 return (0); 12999 13000 if (resource == FLUSH_BLOCKS_WAIT) 13001 stat_cleanup_blkrequests += 1; 13002 else 13003 stat_cleanup_inorequests += 1; 13004 13005 mp = vp->v_mount; 13006 ump = VFSTOUFS(mp); 13007 mtx_assert(UFS_MTX(ump), MA_OWNED); 13008 UFS_UNLOCK(ump); 13009 error = ffs_update(vp, 1); 13010 if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) { 13011 UFS_LOCK(ump); 13012 return (0); 13013 } 13014 /* 13015 * If we are in need of resources, consider pausing for 13016 * tickdelay to give ourselves some breathing room. 13017 */ 13018 ACQUIRE_LOCK(ump); 13019 process_removes(vp); 13020 process_truncates(vp); 13021 request_cleanup(UFSTOVFS(ump), resource); 13022 FREE_LOCK(ump); 13023 /* 13024 * Now clean up at least as many resources as we will need. 13025 * 13026 * When requested to clean up inodes, the number that are needed 13027 * is set by the number of simultaneous writers (mnt_writeopcount) 13028 * plus a bit of slop (2) in case some more writers show up while 13029 * we are cleaning. 13030 * 13031 * When requested to free up space, the amount of space that 13032 * we need is enough blocks to allocate a full-sized segment 13033 * (fs_contigsumsize). The number of such segments that will 13034 * be needed is set by the number of simultaneous writers 13035 * (mnt_writeopcount) plus a bit of slop (2) in case some more 13036 * writers show up while we are cleaning. 13037 * 13038 * Additionally, if we are unpriviledged and allocating space, 13039 * we need to ensure that we clean up enough blocks to get the 13040 * needed number of blocks over the threshhold of the minimum 13041 * number of blocks required to be kept free by the filesystem 13042 * (fs_minfree). 13043 */ 13044 if (resource == FLUSH_INODES_WAIT) { 13045 needed = vp->v_mount->mnt_writeopcount + 2; 13046 } else if (resource == FLUSH_BLOCKS_WAIT) { 13047 needed = (vp->v_mount->mnt_writeopcount + 2) * 13048 fs->fs_contigsumsize; 13049 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0)) 13050 needed += fragstoblks(fs, 13051 roundup((fs->fs_dsize * fs->fs_minfree / 100) - 13052 fs->fs_cstotal.cs_nffree, fs->fs_frag)); 13053 } else { 13054 UFS_LOCK(ump); 13055 printf("softdep_request_cleanup: Unknown resource type %d\n", 13056 resource); 13057 return (0); 13058 } 13059 starttime = time_second; 13060retry: 13061 if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 && 13062 fs->fs_cstotal.cs_nbfree <= needed) || 13063 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13064 fs->fs_cstotal.cs_nifree <= needed)) { 13065 ACQUIRE_LOCK(ump); 13066 if (ump->softdep_on_worklist > 0 && 13067 process_worklist_item(UFSTOVFS(ump), 13068 ump->softdep_on_worklist, LK_NOWAIT) != 0) 13069 stat_worklist_push += 1; 13070 FREE_LOCK(ump); 13071 } 13072 /* 13073 * If we still need resources and there are no more worklist 13074 * entries to process to obtain them, we have to start flushing 13075 * the dirty vnodes to force the release of additional requests 13076 * to the worklist that we can then process to reap addition 13077 * resources. We walk the vnodes associated with the mount point 13078 * until we get the needed worklist requests that we can reap. 13079 */ 13080 if ((resource == FLUSH_BLOCKS_WAIT && 13081 fs->fs_cstotal.cs_nbfree <= needed) || 13082 (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 && 13083 fs->fs_cstotal.cs_nifree <= needed)) { 13084 MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) { 13085 if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) { 13086 VI_UNLOCK(lvp); 13087 continue; 13088 } 13089 if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT, 13090 curthread)) 13091 continue; 13092 if (lvp->v_vflag & VV_NOSYNC) { /* unlinked */ 13093 vput(lvp); 13094 continue; 13095 } 13096 (void) ffs_syncvnode(lvp, MNT_NOWAIT, 0); 13097 vput(lvp); 13098 } 13099 lvp = ump->um_devvp; 13100 if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) { 13101 VOP_FSYNC(lvp, MNT_NOWAIT, curthread); 13102 VOP_UNLOCK(lvp, 0); 13103 } 13104 if (ump->softdep_on_worklist > 0) { 13105 stat_cleanup_retries += 1; 13106 goto retry; 13107 } 13108 stat_cleanup_failures += 1; 13109 } 13110 if (time_second - starttime > stat_cleanup_high_delay) 13111 stat_cleanup_high_delay = time_second - starttime; 13112 UFS_LOCK(ump); 13113 return (1); 13114} 13115 13116/* 13117 * If memory utilization has gotten too high, deliberately slow things 13118 * down and speed up the I/O processing. 13119 */ 13120static int 13121request_cleanup(mp, resource) 13122 struct mount *mp; 13123 int resource; 13124{ 13125 struct thread *td = curthread; 13126 struct ufsmount *ump; 13127 13128 ump = VFSTOUFS(mp); 13129 LOCK_OWNED(ump); 13130 /* 13131 * We never hold up the filesystem syncer or buf daemon. 13132 */ 13133 if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF)) 13134 return (0); 13135 /* 13136 * First check to see if the work list has gotten backlogged. 13137 * If it has, co-opt this process to help clean up two entries. 13138 * Because this process may hold inodes locked, we cannot 13139 * handle any remove requests that might block on a locked 13140 * inode as that could lead to deadlock. We set TDP_SOFTDEP 13141 * to avoid recursively processing the worklist. 13142 */ 13143 if (ump->softdep_on_worklist > max_softdeps / 10) { 13144 td->td_pflags |= TDP_SOFTDEP; 13145 process_worklist_item(mp, 2, LK_NOWAIT); 13146 td->td_pflags &= ~TDP_SOFTDEP; 13147 stat_worklist_push += 2; 13148 return(1); 13149 } 13150 /* 13151 * Next, we attempt to speed up the syncer process. If that 13152 * is successful, then we allow the process to continue. 13153 */ 13154 if (softdep_speedup() && 13155 resource != FLUSH_BLOCKS_WAIT && 13156 resource != FLUSH_INODES_WAIT) 13157 return(0); 13158 /* 13159 * If we are resource constrained on inode dependencies, try 13160 * flushing some dirty inodes. Otherwise, we are constrained 13161 * by file deletions, so try accelerating flushes of directories 13162 * with removal dependencies. We would like to do the cleanup 13163 * here, but we probably hold an inode locked at this point and 13164 * that might deadlock against one that we try to clean. So, 13165 * the best that we can do is request the syncer daemon to do 13166 * the cleanup for us. 13167 */ 13168 switch (resource) { 13169 13170 case FLUSH_INODES: 13171 case FLUSH_INODES_WAIT: 13172 stat_ino_limit_push += 1; 13173 req_clear_inodedeps += 1; 13174 stat_countp = &stat_ino_limit_hit; 13175 break; 13176 13177 case FLUSH_BLOCKS: 13178 case FLUSH_BLOCKS_WAIT: 13179 stat_blk_limit_push += 1; 13180 req_clear_remove += 1; 13181 stat_countp = &stat_blk_limit_hit; 13182 break; 13183 13184 default: 13185 panic("request_cleanup: unknown type"); 13186 } 13187 /* 13188 * Hopefully the syncer daemon will catch up and awaken us. 13189 * We wait at most tickdelay before proceeding in any case. 13190 */ 13191 proc_waiting += 1; 13192 if (callout_pending(&softdep_callout) == FALSE) 13193 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, 13194 pause_timer, 0); 13195 13196 msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0); 13197 proc_waiting -= 1; 13198 return (1); 13199} 13200 13201/* 13202 * Awaken processes pausing in request_cleanup and clear proc_waiting 13203 * to indicate that there is no longer a timer running. Pause_timer 13204 * will be called with the global softdep mutex (&lk) locked. 13205 */ 13206static void 13207pause_timer(arg) 13208 void *arg; 13209{ 13210 13211 rw_assert(&lk, RA_WLOCKED); 13212 /* 13213 * The callout_ API has acquired mtx and will hold it around this 13214 * function call. 13215 */ 13216 *stat_countp += 1; 13217 wakeup_one(&proc_waiting); 13218 if (proc_waiting > 0) 13219 callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2, 13220 pause_timer, 0); 13221} 13222 13223/* 13224 * If requested, try removing inode or removal dependencies. 13225 */ 13226static void 13227check_clear_deps(mp) 13228 struct mount *mp; 13229{ 13230 13231 rw_assert(&lk, RA_WLOCKED); 13232 /* 13233 * If we are suspended, it may be because of our using 13234 * too many inodedeps, so help clear them out. 13235 */ 13236 if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended) 13237 clear_inodedeps(mp); 13238 /* 13239 * General requests for cleanup of backed up dependencies 13240 */ 13241 if (req_clear_inodedeps) { 13242 req_clear_inodedeps -= 1; 13243 clear_inodedeps(mp); 13244 wakeup_one(&proc_waiting); 13245 } 13246 if (req_clear_remove) { 13247 req_clear_remove -= 1; 13248 clear_remove(mp); 13249 wakeup_one(&proc_waiting); 13250 } 13251} 13252 13253/* 13254 * Flush out a directory with at least one removal dependency in an effort to 13255 * reduce the number of dirrem, freefile, and freeblks dependency structures. 13256 */ 13257static void 13258clear_remove(mp) 13259 struct mount *mp; 13260{ 13261 struct pagedep_hashhead *pagedephd; 13262 struct pagedep *pagedep; 13263 struct ufsmount *ump; 13264 struct vnode *vp; 13265 struct bufobj *bo; 13266 int error, cnt; 13267 ino_t ino; 13268 13269 ump = VFSTOUFS(mp); 13270 LOCK_OWNED(ump); 13271 13272 for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) { 13273 pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++]; 13274 if (ump->pagedep_nextclean > ump->pagedep_hash_size) 13275 ump->pagedep_nextclean = 0; 13276 LIST_FOREACH(pagedep, pagedephd, pd_hash) { 13277 if (LIST_EMPTY(&pagedep->pd_dirremhd)) 13278 continue; 13279 ino = pagedep->pd_ino; 13280 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13281 continue; 13282 FREE_LOCK(ump); 13283 13284 /* 13285 * Let unmount clear deps 13286 */ 13287 error = vfs_busy(mp, MBF_NOWAIT); 13288 if (error != 0) 13289 goto finish_write; 13290 error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13291 FFSV_FORCEINSMQ); 13292 vfs_unbusy(mp); 13293 if (error != 0) { 13294 softdep_error("clear_remove: vget", error); 13295 goto finish_write; 13296 } 13297 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13298 softdep_error("clear_remove: fsync", error); 13299 bo = &vp->v_bufobj; 13300 BO_LOCK(bo); 13301 drain_output(vp); 13302 BO_UNLOCK(bo); 13303 vput(vp); 13304 finish_write: 13305 vn_finished_write(mp); 13306 ACQUIRE_LOCK(ump); 13307 return; 13308 } 13309 } 13310} 13311 13312/* 13313 * Clear out a block of dirty inodes in an effort to reduce 13314 * the number of inodedep dependency structures. 13315 */ 13316static void 13317clear_inodedeps(mp) 13318 struct mount *mp; 13319{ 13320 struct inodedep_hashhead *inodedephd; 13321 struct inodedep *inodedep; 13322 struct ufsmount *ump; 13323 struct vnode *vp; 13324 struct fs *fs; 13325 int error, cnt; 13326 ino_t firstino, lastino, ino; 13327 13328 ump = VFSTOUFS(mp); 13329 fs = ump->um_fs; 13330 LOCK_OWNED(ump); 13331 /* 13332 * Pick a random inode dependency to be cleared. 13333 * We will then gather up all the inodes in its block 13334 * that have dependencies and flush them out. 13335 */ 13336 for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) { 13337 inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++]; 13338 if (ump->inodedep_nextclean > ump->inodedep_hash_size) 13339 ump->inodedep_nextclean = 0; 13340 if ((inodedep = LIST_FIRST(inodedephd)) != NULL) 13341 break; 13342 } 13343 if (inodedep == NULL) 13344 return; 13345 /* 13346 * Find the last inode in the block with dependencies. 13347 */ 13348 firstino = inodedep->id_ino & ~(INOPB(fs) - 1); 13349 for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--) 13350 if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0) 13351 break; 13352 /* 13353 * Asynchronously push all but the last inode with dependencies. 13354 * Synchronously push the last inode with dependencies to ensure 13355 * that the inode block gets written to free up the inodedeps. 13356 */ 13357 for (ino = firstino; ino <= lastino; ino++) { 13358 if (inodedep_lookup(mp, ino, 0, &inodedep) == 0) 13359 continue; 13360 if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) 13361 continue; 13362 FREE_LOCK(ump); 13363 error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */ 13364 if (error != 0) { 13365 vn_finished_write(mp); 13366 ACQUIRE_LOCK(ump); 13367 return; 13368 } 13369 if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp, 13370 FFSV_FORCEINSMQ)) != 0) { 13371 softdep_error("clear_inodedeps: vget", error); 13372 vfs_unbusy(mp); 13373 vn_finished_write(mp); 13374 ACQUIRE_LOCK(ump); 13375 return; 13376 } 13377 vfs_unbusy(mp); 13378 if (ino == lastino) { 13379 if ((error = ffs_syncvnode(vp, MNT_WAIT, 0))) 13380 softdep_error("clear_inodedeps: fsync1", error); 13381 } else { 13382 if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0))) 13383 softdep_error("clear_inodedeps: fsync2", error); 13384 BO_LOCK(&vp->v_bufobj); 13385 drain_output(vp); 13386 BO_UNLOCK(&vp->v_bufobj); 13387 } 13388 vput(vp); 13389 vn_finished_write(mp); 13390 ACQUIRE_LOCK(ump); 13391 } 13392} 13393 13394void 13395softdep_buf_append(bp, wkhd) 13396 struct buf *bp; 13397 struct workhead *wkhd; 13398{ 13399 struct worklist *wk; 13400 struct ufsmount *ump; 13401 13402 if ((wk = LIST_FIRST(wkhd)) == NULL) 13403 return; 13404 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 13405 ("softdep_buf_append called on non-softdep filesystem")); 13406 ump = VFSTOUFS(wk->wk_mp); 13407 ACQUIRE_LOCK(ump); 13408 while ((wk = LIST_FIRST(wkhd)) != NULL) { 13409 WORKLIST_REMOVE(wk); 13410 WORKLIST_INSERT(&bp->b_dep, wk); 13411 } 13412 FREE_LOCK(ump); 13413 13414} 13415 13416void 13417softdep_inode_append(ip, cred, wkhd) 13418 struct inode *ip; 13419 struct ucred *cred; 13420 struct workhead *wkhd; 13421{ 13422 struct buf *bp; 13423 struct fs *fs; 13424 int error; 13425 13426 KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0, 13427 ("softdep_inode_append called on non-softdep filesystem")); 13428 fs = ip->i_fs; 13429 error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)), 13430 (int)fs->fs_bsize, cred, &bp); 13431 if (error) { 13432 bqrelse(bp); 13433 softdep_freework(wkhd); 13434 return; 13435 } 13436 softdep_buf_append(bp, wkhd); 13437 bqrelse(bp); 13438} 13439 13440void 13441softdep_freework(wkhd) 13442 struct workhead *wkhd; 13443{ 13444 struct worklist *wk; 13445 struct ufsmount *ump; 13446 13447 if ((wk = LIST_FIRST(wkhd)) == NULL) 13448 return; 13449 KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0, 13450 ("softdep_freework called on non-softdep filesystem")); 13451 ump = VFSTOUFS(wk->wk_mp); 13452 ACQUIRE_LOCK(ump); 13453 handle_jwork(wkhd); 13454 FREE_LOCK(ump); 13455} 13456 13457/* 13458 * Function to determine if the buffer has outstanding dependencies 13459 * that will cause a roll-back if the buffer is written. If wantcount 13460 * is set, return number of dependencies, otherwise just yes or no. 13461 */ 13462static int 13463softdep_count_dependencies(bp, wantcount) 13464 struct buf *bp; 13465 int wantcount; 13466{ 13467 struct worklist *wk; 13468 struct ufsmount *ump; 13469 struct bmsafemap *bmsafemap; 13470 struct freework *freework; 13471 struct inodedep *inodedep; 13472 struct indirdep *indirdep; 13473 struct freeblks *freeblks; 13474 struct allocindir *aip; 13475 struct pagedep *pagedep; 13476 struct dirrem *dirrem; 13477 struct newblk *newblk; 13478 struct mkdir *mkdir; 13479 struct diradd *dap; 13480 int i, retval; 13481 13482 retval = 0; 13483 if ((wk = LIST_FIRST(&bp->b_dep)) == NULL) 13484 return (0); 13485 ump = VFSTOUFS(wk->wk_mp); 13486 ACQUIRE_LOCK(ump); 13487 LIST_FOREACH(wk, &bp->b_dep, wk_list) { 13488 switch (wk->wk_type) { 13489 13490 case D_INODEDEP: 13491 inodedep = WK_INODEDEP(wk); 13492 if ((inodedep->id_state & DEPCOMPLETE) == 0) { 13493 /* bitmap allocation dependency */ 13494 retval += 1; 13495 if (!wantcount) 13496 goto out; 13497 } 13498 if (TAILQ_FIRST(&inodedep->id_inoupdt)) { 13499 /* direct block pointer dependency */ 13500 retval += 1; 13501 if (!wantcount) 13502 goto out; 13503 } 13504 if (TAILQ_FIRST(&inodedep->id_extupdt)) { 13505 /* direct block pointer dependency */ 13506 retval += 1; 13507 if (!wantcount) 13508 goto out; 13509 } 13510 if (TAILQ_FIRST(&inodedep->id_inoreflst)) { 13511 /* Add reference dependency. */ 13512 retval += 1; 13513 if (!wantcount) 13514 goto out; 13515 } 13516 continue; 13517 13518 case D_INDIRDEP: 13519 indirdep = WK_INDIRDEP(wk); 13520 13521 TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) { 13522 /* indirect truncation dependency */ 13523 retval += 1; 13524 if (!wantcount) 13525 goto out; 13526 } 13527 13528 LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) { 13529 /* indirect block pointer dependency */ 13530 retval += 1; 13531 if (!wantcount) 13532 goto out; 13533 } 13534 continue; 13535 13536 case D_PAGEDEP: 13537 pagedep = WK_PAGEDEP(wk); 13538 LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) { 13539 if (LIST_FIRST(&dirrem->dm_jremrefhd)) { 13540 /* Journal remove ref dependency. */ 13541 retval += 1; 13542 if (!wantcount) 13543 goto out; 13544 } 13545 } 13546 for (i = 0; i < DAHASHSZ; i++) { 13547 13548 LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) { 13549 /* directory entry dependency */ 13550 retval += 1; 13551 if (!wantcount) 13552 goto out; 13553 } 13554 } 13555 continue; 13556 13557 case D_BMSAFEMAP: 13558 bmsafemap = WK_BMSAFEMAP(wk); 13559 if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) { 13560 /* Add reference dependency. */ 13561 retval += 1; 13562 if (!wantcount) 13563 goto out; 13564 } 13565 if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) { 13566 /* Allocate block dependency. */ 13567 retval += 1; 13568 if (!wantcount) 13569 goto out; 13570 } 13571 continue; 13572 13573 case D_FREEBLKS: 13574 freeblks = WK_FREEBLKS(wk); 13575 if (LIST_FIRST(&freeblks->fb_jblkdephd)) { 13576 /* Freeblk journal dependency. */ 13577 retval += 1; 13578 if (!wantcount) 13579 goto out; 13580 } 13581 continue; 13582 13583 case D_ALLOCDIRECT: 13584 case D_ALLOCINDIR: 13585 newblk = WK_NEWBLK(wk); 13586 if (newblk->nb_jnewblk) { 13587 /* Journal allocate dependency. */ 13588 retval += 1; 13589 if (!wantcount) 13590 goto out; 13591 } 13592 continue; 13593 13594 case D_MKDIR: 13595 mkdir = WK_MKDIR(wk); 13596 if (mkdir->md_jaddref) { 13597 /* Journal reference dependency. */ 13598 retval += 1; 13599 if (!wantcount) 13600 goto out; 13601 } 13602 continue; 13603 13604 case D_FREEWORK: 13605 case D_FREEDEP: 13606 case D_JSEGDEP: 13607 case D_JSEG: 13608 case D_SBDEP: 13609 /* never a dependency on these blocks */ 13610 continue; 13611 13612 default: 13613 panic("softdep_count_dependencies: Unexpected type %s", 13614 TYPENAME(wk->wk_type)); 13615 /* NOTREACHED */ 13616 } 13617 } 13618out: 13619 FREE_LOCK(ump); 13620 return retval; 13621} 13622 13623/* 13624 * Acquire exclusive access to a buffer. 13625 * Must be called with a locked mtx parameter. 13626 * Return acquired buffer or NULL on failure. 13627 */ 13628static struct buf * 13629getdirtybuf(bp, lock, waitfor) 13630 struct buf *bp; 13631 struct rwlock *lock; 13632 int waitfor; 13633{ 13634 int error; 13635 13636 if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) { 13637 if (waitfor != MNT_WAIT) 13638 return (NULL); 13639 error = BUF_LOCK(bp, 13640 LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock); 13641 /* 13642 * Even if we sucessfully acquire bp here, we have dropped 13643 * lock, which may violates our guarantee. 13644 */ 13645 if (error == 0) 13646 BUF_UNLOCK(bp); 13647 else if (error != ENOLCK) 13648 panic("getdirtybuf: inconsistent lock: %d", error); 13649 rw_wlock(lock); 13650 return (NULL); 13651 } 13652 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13653 if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) { 13654 rw_wunlock(lock); 13655 BO_LOCK(bp->b_bufobj); 13656 BUF_UNLOCK(bp); 13657 if ((bp->b_vflags & BV_BKGRDINPROG) != 0) { 13658 bp->b_vflags |= BV_BKGRDWAIT; 13659 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), 13660 PRIBIO | PDROP, "getbuf", 0); 13661 } else 13662 BO_UNLOCK(bp->b_bufobj); 13663 rw_wlock(lock); 13664 return (NULL); 13665 } 13666 BUF_UNLOCK(bp); 13667 if (waitfor != MNT_WAIT) 13668 return (NULL); 13669 /* 13670 * The lock argument must be bp->b_vp's mutex in 13671 * this case. 13672 */ 13673#ifdef DEBUG_VFS_LOCKS 13674 if (bp->b_vp->v_type != VCHR) 13675 ASSERT_BO_WLOCKED(bp->b_bufobj); 13676#endif 13677 bp->b_vflags |= BV_BKGRDWAIT; 13678 rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0); 13679 return (NULL); 13680 } 13681 if ((bp->b_flags & B_DELWRI) == 0) { 13682 BUF_UNLOCK(bp); 13683 return (NULL); 13684 } 13685 bremfree(bp); 13686 return (bp); 13687} 13688 13689 13690/* 13691 * Check if it is safe to suspend the file system now. On entry, 13692 * the vnode interlock for devvp should be held. Return 0 with 13693 * the mount interlock held if the file system can be suspended now, 13694 * otherwise return EAGAIN with the mount interlock held. 13695 */ 13696int 13697softdep_check_suspend(struct mount *mp, 13698 struct vnode *devvp, 13699 int softdep_depcnt, 13700 int softdep_accdepcnt, 13701 int secondary_writes, 13702 int secondary_accwrites) 13703{ 13704 struct bufobj *bo; 13705 struct ufsmount *ump; 13706 int error; 13707 13708 bo = &devvp->v_bufobj; 13709 ASSERT_BO_WLOCKED(bo); 13710 13711 /* 13712 * If we are not running with soft updates, then we need only 13713 * deal with secondary writes as we try to suspend. 13714 */ 13715 if (MOUNTEDSOFTDEP(mp) == 0) { 13716 MNT_ILOCK(mp); 13717 while (mp->mnt_secondary_writes != 0) { 13718 BO_UNLOCK(bo); 13719 msleep(&mp->mnt_secondary_writes, MNT_MTX(mp), 13720 (PUSER - 1) | PDROP, "secwr", 0); 13721 BO_LOCK(bo); 13722 MNT_ILOCK(mp); 13723 } 13724 13725 /* 13726 * Reasons for needing more work before suspend: 13727 * - Dirty buffers on devvp. 13728 * - Secondary writes occurred after start of vnode sync loop 13729 */ 13730 error = 0; 13731 if (bo->bo_numoutput > 0 || 13732 bo->bo_dirty.bv_cnt > 0 || 13733 secondary_writes != 0 || 13734 mp->mnt_secondary_writes != 0 || 13735 secondary_accwrites != mp->mnt_secondary_accwrites) 13736 error = EAGAIN; 13737 BO_UNLOCK(bo); 13738 return (error); 13739 } 13740 13741 /* 13742 * If we are running with soft updates, then we need to coordinate 13743 * with them as we try to suspend. 13744 */ 13745 ump = VFSTOUFS(mp); 13746 for (;;) { 13747 if (!TRY_ACQUIRE_LOCK(ump)) { 13748 BO_UNLOCK(bo); 13749 ACQUIRE_LOCK(ump); 13750 FREE_LOCK(ump); 13751 BO_LOCK(bo); 13752 continue; 13753 } 13754 MNT_ILOCK(mp); 13755 if (mp->mnt_secondary_writes != 0) { 13756 FREE_LOCK(ump); 13757 BO_UNLOCK(bo); 13758 msleep(&mp->mnt_secondary_writes, 13759 MNT_MTX(mp), 13760 (PUSER - 1) | PDROP, "secwr", 0); 13761 BO_LOCK(bo); 13762 continue; 13763 } 13764 break; 13765 } 13766 13767 /* 13768 * Reasons for needing more work before suspend: 13769 * - Dirty buffers on devvp. 13770 * - Softdep activity occurred after start of vnode sync loop 13771 * - Secondary writes occurred after start of vnode sync loop 13772 */ 13773 error = 0; 13774 if (bo->bo_numoutput > 0 || 13775 bo->bo_dirty.bv_cnt > 0 || 13776 softdep_depcnt != 0 || 13777 ump->softdep_deps != 0 || 13778 softdep_accdepcnt != ump->softdep_accdeps || 13779 secondary_writes != 0 || 13780 mp->mnt_secondary_writes != 0 || 13781 secondary_accwrites != mp->mnt_secondary_accwrites) 13782 error = EAGAIN; 13783 FREE_LOCK(ump); 13784 BO_UNLOCK(bo); 13785 return (error); 13786} 13787 13788 13789/* 13790 * Get the number of dependency structures for the file system, both 13791 * the current number and the total number allocated. These will 13792 * later be used to detect that softdep processing has occurred. 13793 */ 13794void 13795softdep_get_depcounts(struct mount *mp, 13796 int *softdep_depsp, 13797 int *softdep_accdepsp) 13798{ 13799 struct ufsmount *ump; 13800 13801 if (MOUNTEDSOFTDEP(mp) == 0) { 13802 *softdep_depsp = 0; 13803 *softdep_accdepsp = 0; 13804 return; 13805 } 13806 ump = VFSTOUFS(mp); 13807 ACQUIRE_LOCK(ump); 13808 *softdep_depsp = ump->softdep_deps; 13809 *softdep_accdepsp = ump->softdep_accdeps; 13810 FREE_LOCK(ump); 13811} 13812 13813/* 13814 * Wait for pending output on a vnode to complete. 13815 * Must be called with vnode lock and interlock locked. 13816 * 13817 * XXX: Should just be a call to bufobj_wwait(). 13818 */ 13819static void 13820drain_output(vp) 13821 struct vnode *vp; 13822{ 13823 struct bufobj *bo; 13824 13825 bo = &vp->v_bufobj; 13826 ASSERT_VOP_LOCKED(vp, "drain_output"); 13827 ASSERT_BO_WLOCKED(bo); 13828 13829 while (bo->bo_numoutput) { 13830 bo->bo_flag |= BO_WWAIT; 13831 msleep((caddr_t)&bo->bo_numoutput, 13832 BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0); 13833 } 13834} 13835 13836/* 13837 * Called whenever a buffer that is being invalidated or reallocated 13838 * contains dependencies. This should only happen if an I/O error has 13839 * occurred. The routine is called with the buffer locked. 13840 */ 13841static void 13842softdep_deallocate_dependencies(bp) 13843 struct buf *bp; 13844{ 13845 13846 if ((bp->b_ioflags & BIO_ERROR) == 0) 13847 panic("softdep_deallocate_dependencies: dangling deps"); 13848 if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL) 13849 softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error); 13850 else 13851 printf("softdep_deallocate_dependencies: " 13852 "got error %d while accessing filesystem\n", bp->b_error); 13853 if (bp->b_error != ENXIO) 13854 panic("softdep_deallocate_dependencies: unrecovered I/O error"); 13855} 13856 13857/* 13858 * Function to handle asynchronous write errors in the filesystem. 13859 */ 13860static void 13861softdep_error(func, error) 13862 char *func; 13863 int error; 13864{ 13865 13866 /* XXX should do something better! */ 13867 printf("%s: got error %d while accessing filesystem\n", func, error); 13868} 13869 13870#ifdef DDB 13871 13872static void 13873inodedep_print(struct inodedep *inodedep, int verbose) 13874{ 13875 db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d" 13876 " saveino %p\n", 13877 inodedep, inodedep->id_fs, inodedep->id_state, 13878 (intmax_t)inodedep->id_ino, 13879 (intmax_t)fsbtodb(inodedep->id_fs, 13880 ino_to_fsba(inodedep->id_fs, inodedep->id_ino)), 13881 inodedep->id_nlinkdelta, inodedep->id_savednlink, 13882 inodedep->id_savedino1); 13883 13884 if (verbose == 0) 13885 return; 13886 13887 db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, " 13888 "mkdiradd %p\n", 13889 LIST_FIRST(&inodedep->id_pendinghd), 13890 LIST_FIRST(&inodedep->id_bufwait), 13891 LIST_FIRST(&inodedep->id_inowait), 13892 TAILQ_FIRST(&inodedep->id_inoreflst), 13893 inodedep->id_mkdiradd); 13894 db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n", 13895 TAILQ_FIRST(&inodedep->id_inoupdt), 13896 TAILQ_FIRST(&inodedep->id_newinoupdt), 13897 TAILQ_FIRST(&inodedep->id_extupdt), 13898 TAILQ_FIRST(&inodedep->id_newextupdt)); 13899} 13900 13901DB_SHOW_COMMAND(inodedep, db_show_inodedep) 13902{ 13903 13904 if (have_addr == 0) { 13905 db_printf("Address required\n"); 13906 return; 13907 } 13908 inodedep_print((struct inodedep*)addr, 1); 13909} 13910 13911DB_SHOW_COMMAND(inodedeps, db_show_inodedeps) 13912{ 13913 struct inodedep_hashhead *inodedephd; 13914 struct inodedep *inodedep; 13915 struct ufsmount *ump; 13916 int cnt; 13917 13918 if (have_addr == 0) { 13919 db_printf("Address required\n"); 13920 return; 13921 } 13922 ump = (struct ufsmount *)addr; 13923 for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) { 13924 inodedephd = &ump->inodedep_hashtbl[cnt]; 13925 LIST_FOREACH(inodedep, inodedephd, id_hash) { 13926 inodedep_print(inodedep, 0); 13927 } 13928 } 13929} 13930 13931DB_SHOW_COMMAND(worklist, db_show_worklist) 13932{ 13933 struct worklist *wk; 13934 13935 if (have_addr == 0) { 13936 db_printf("Address required\n"); 13937 return; 13938 } 13939 wk = (struct worklist *)addr; 13940 printf("worklist: %p type %s state 0x%X\n", 13941 wk, TYPENAME(wk->wk_type), wk->wk_state); 13942} 13943 13944DB_SHOW_COMMAND(workhead, db_show_workhead) 13945{ 13946 struct workhead *wkhd; 13947 struct worklist *wk; 13948 int i; 13949 13950 if (have_addr == 0) { 13951 db_printf("Address required\n"); 13952 return; 13953 } 13954 wkhd = (struct workhead *)addr; 13955 wk = LIST_FIRST(wkhd); 13956 for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list)) 13957 db_printf("worklist: %p type %s state 0x%X", 13958 wk, TYPENAME(wk->wk_type), wk->wk_state); 13959 if (i == 100) 13960 db_printf("workhead overflow"); 13961 printf("\n"); 13962} 13963 13964 13965DB_SHOW_COMMAND(mkdirs, db_show_mkdirs) 13966{ 13967 struct mkdirlist *mkdirlisthd; 13968 struct jaddref *jaddref; 13969 struct diradd *diradd; 13970 struct mkdir *mkdir; 13971 13972 if (have_addr == 0) { 13973 db_printf("Address required\n"); 13974 return; 13975 } 13976 mkdirlisthd = (struct mkdirlist *)addr; 13977 LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) { 13978 diradd = mkdir->md_diradd; 13979 db_printf("mkdir: %p state 0x%X dap %p state 0x%X", 13980 mkdir, mkdir->md_state, diradd, diradd->da_state); 13981 if ((jaddref = mkdir->md_jaddref) != NULL) 13982 db_printf(" jaddref %p jaddref state 0x%X", 13983 jaddref, jaddref->ja_state); 13984 db_printf("\n"); 13985 } 13986} 13987 13988/* exported to ffs_vfsops.c */ 13989extern void db_print_ffs(struct ufsmount *ump); 13990void 13991db_print_ffs(struct ufsmount *ump) 13992{ 13993 db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n", 13994 ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname, 13995 ump->um_devvp, ump->um_fs, ump->softdep_on_worklist, 13996 ump->softdep_deps, ump->softdep_req); 13997} 13998 13999#endif /* DDB */ 14000 14001#endif /* SOFTUPDATES */ 14002