ffs_softdep.c revision 270157
1/*-
2 * Copyright 1998, 2000 Marshall Kirk McKusick.
3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
4 * All rights reserved.
5 *
6 * The soft updates code is derived from the appendix of a University
7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
8 * "Soft Updates: A Solution to the Metadata Update Problem in File
9 * Systems", CSE-TR-254-95, August 1995).
10 *
11 * Further information about soft updates can be obtained from:
12 *
13 *	Marshall Kirk McKusick		http://www.mckusick.com/softdep/
14 *	1614 Oxford Street		mckusick@mckusick.com
15 *	Berkeley, CA 94709-1608		+1-510-843-9542
16 *	USA
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 *
22 * 1. Redistributions of source code must retain the above copyright
23 *    notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 *    notice, this list of conditions and the following disclaimer in the
26 *    documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 *	from: @(#)ffs_softdep.c	9.59 (McKusick) 6/21/00
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_softdep.c 270157 2014-08-18 22:53:48Z mckusick $");
44
45#include "opt_ffs.h"
46#include "opt_quota.h"
47#include "opt_ddb.h"
48
49/*
50 * For now we want the safety net that the DEBUG flag provides.
51 */
52#ifndef DEBUG
53#define DEBUG
54#endif
55
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/systm.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kdb.h>
62#include <sys/kthread.h>
63#include <sys/ktr.h>
64#include <sys/limits.h>
65#include <sys/lock.h>
66#include <sys/malloc.h>
67#include <sys/mount.h>
68#include <sys/mutex.h>
69#include <sys/namei.h>
70#include <sys/priv.h>
71#include <sys/proc.h>
72#include <sys/rwlock.h>
73#include <sys/stat.h>
74#include <sys/sysctl.h>
75#include <sys/syslog.h>
76#include <sys/vnode.h>
77#include <sys/conf.h>
78
79#include <ufs/ufs/dir.h>
80#include <ufs/ufs/extattr.h>
81#include <ufs/ufs/quota.h>
82#include <ufs/ufs/inode.h>
83#include <ufs/ufs/ufsmount.h>
84#include <ufs/ffs/fs.h>
85#include <ufs/ffs/softdep.h>
86#include <ufs/ffs/ffs_extern.h>
87#include <ufs/ufs/ufs_extern.h>
88
89#include <vm/vm.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_object.h>
92
93#include <geom/geom.h>
94
95#include <ddb/ddb.h>
96
97#define	KTR_SUJ	0	/* Define to KTR_SPARE. */
98
99#ifndef SOFTUPDATES
100
101int
102softdep_flushfiles(oldmnt, flags, td)
103	struct mount *oldmnt;
104	int flags;
105	struct thread *td;
106{
107
108	panic("softdep_flushfiles called");
109}
110
111int
112softdep_mount(devvp, mp, fs, cred)
113	struct vnode *devvp;
114	struct mount *mp;
115	struct fs *fs;
116	struct ucred *cred;
117{
118
119	return (0);
120}
121
122void
123softdep_initialize()
124{
125
126	return;
127}
128
129void
130softdep_uninitialize()
131{
132
133	return;
134}
135
136void
137softdep_unmount(mp)
138	struct mount *mp;
139{
140
141	panic("softdep_unmount called");
142}
143
144void
145softdep_setup_sbupdate(ump, fs, bp)
146	struct ufsmount *ump;
147	struct fs *fs;
148	struct buf *bp;
149{
150
151	panic("softdep_setup_sbupdate called");
152}
153
154void
155softdep_setup_inomapdep(bp, ip, newinum, mode)
156	struct buf *bp;
157	struct inode *ip;
158	ino_t newinum;
159	int mode;
160{
161
162	panic("softdep_setup_inomapdep called");
163}
164
165void
166softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
167	struct buf *bp;
168	struct mount *mp;
169	ufs2_daddr_t newblkno;
170	int frags;
171	int oldfrags;
172{
173
174	panic("softdep_setup_blkmapdep called");
175}
176
177void
178softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
179	struct inode *ip;
180	ufs_lbn_t lbn;
181	ufs2_daddr_t newblkno;
182	ufs2_daddr_t oldblkno;
183	long newsize;
184	long oldsize;
185	struct buf *bp;
186{
187
188	panic("softdep_setup_allocdirect called");
189}
190
191void
192softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
193	struct inode *ip;
194	ufs_lbn_t lbn;
195	ufs2_daddr_t newblkno;
196	ufs2_daddr_t oldblkno;
197	long newsize;
198	long oldsize;
199	struct buf *bp;
200{
201
202	panic("softdep_setup_allocext called");
203}
204
205void
206softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
207	struct inode *ip;
208	ufs_lbn_t lbn;
209	struct buf *bp;
210	int ptrno;
211	ufs2_daddr_t newblkno;
212	ufs2_daddr_t oldblkno;
213	struct buf *nbp;
214{
215
216	panic("softdep_setup_allocindir_page called");
217}
218
219void
220softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
221	struct buf *nbp;
222	struct inode *ip;
223	struct buf *bp;
224	int ptrno;
225	ufs2_daddr_t newblkno;
226{
227
228	panic("softdep_setup_allocindir_meta called");
229}
230
231void
232softdep_journal_freeblocks(ip, cred, length, flags)
233	struct inode *ip;
234	struct ucred *cred;
235	off_t length;
236	int flags;
237{
238
239	panic("softdep_journal_freeblocks called");
240}
241
242void
243softdep_journal_fsync(ip)
244	struct inode *ip;
245{
246
247	panic("softdep_journal_fsync called");
248}
249
250void
251softdep_setup_freeblocks(ip, length, flags)
252	struct inode *ip;
253	off_t length;
254	int flags;
255{
256
257	panic("softdep_setup_freeblocks called");
258}
259
260void
261softdep_freefile(pvp, ino, mode)
262		struct vnode *pvp;
263		ino_t ino;
264		int mode;
265{
266
267	panic("softdep_freefile called");
268}
269
270int
271softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
272	struct buf *bp;
273	struct inode *dp;
274	off_t diroffset;
275	ino_t newinum;
276	struct buf *newdirbp;
277	int isnewblk;
278{
279
280	panic("softdep_setup_directory_add called");
281}
282
283void
284softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
285	struct buf *bp;
286	struct inode *dp;
287	caddr_t base;
288	caddr_t oldloc;
289	caddr_t newloc;
290	int entrysize;
291{
292
293	panic("softdep_change_directoryentry_offset called");
294}
295
296void
297softdep_setup_remove(bp, dp, ip, isrmdir)
298	struct buf *bp;
299	struct inode *dp;
300	struct inode *ip;
301	int isrmdir;
302{
303
304	panic("softdep_setup_remove called");
305}
306
307void
308softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
309	struct buf *bp;
310	struct inode *dp;
311	struct inode *ip;
312	ino_t newinum;
313	int isrmdir;
314{
315
316	panic("softdep_setup_directory_change called");
317}
318
319void
320softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
321	struct mount *mp;
322	struct buf *bp;
323	ufs2_daddr_t blkno;
324	int frags;
325	struct workhead *wkhd;
326{
327
328	panic("%s called", __FUNCTION__);
329}
330
331void
332softdep_setup_inofree(mp, bp, ino, wkhd)
333	struct mount *mp;
334	struct buf *bp;
335	ino_t ino;
336	struct workhead *wkhd;
337{
338
339	panic("%s called", __FUNCTION__);
340}
341
342void
343softdep_setup_unlink(dp, ip)
344	struct inode *dp;
345	struct inode *ip;
346{
347
348	panic("%s called", __FUNCTION__);
349}
350
351void
352softdep_setup_link(dp, ip)
353	struct inode *dp;
354	struct inode *ip;
355{
356
357	panic("%s called", __FUNCTION__);
358}
359
360void
361softdep_revert_link(dp, ip)
362	struct inode *dp;
363	struct inode *ip;
364{
365
366	panic("%s called", __FUNCTION__);
367}
368
369void
370softdep_setup_rmdir(dp, ip)
371	struct inode *dp;
372	struct inode *ip;
373{
374
375	panic("%s called", __FUNCTION__);
376}
377
378void
379softdep_revert_rmdir(dp, ip)
380	struct inode *dp;
381	struct inode *ip;
382{
383
384	panic("%s called", __FUNCTION__);
385}
386
387void
388softdep_setup_create(dp, ip)
389	struct inode *dp;
390	struct inode *ip;
391{
392
393	panic("%s called", __FUNCTION__);
394}
395
396void
397softdep_revert_create(dp, ip)
398	struct inode *dp;
399	struct inode *ip;
400{
401
402	panic("%s called", __FUNCTION__);
403}
404
405void
406softdep_setup_mkdir(dp, ip)
407	struct inode *dp;
408	struct inode *ip;
409{
410
411	panic("%s called", __FUNCTION__);
412}
413
414void
415softdep_revert_mkdir(dp, ip)
416	struct inode *dp;
417	struct inode *ip;
418{
419
420	panic("%s called", __FUNCTION__);
421}
422
423void
424softdep_setup_dotdot_link(dp, ip)
425	struct inode *dp;
426	struct inode *ip;
427{
428
429	panic("%s called", __FUNCTION__);
430}
431
432int
433softdep_prealloc(vp, waitok)
434	struct vnode *vp;
435	int waitok;
436{
437
438	panic("%s called", __FUNCTION__);
439}
440
441int
442softdep_journal_lookup(mp, vpp)
443	struct mount *mp;
444	struct vnode **vpp;
445{
446
447	return (ENOENT);
448}
449
450void
451softdep_change_linkcnt(ip)
452	struct inode *ip;
453{
454
455	panic("softdep_change_linkcnt called");
456}
457
458void
459softdep_load_inodeblock(ip)
460	struct inode *ip;
461{
462
463	panic("softdep_load_inodeblock called");
464}
465
466void
467softdep_update_inodeblock(ip, bp, waitfor)
468	struct inode *ip;
469	struct buf *bp;
470	int waitfor;
471{
472
473	panic("softdep_update_inodeblock called");
474}
475
476int
477softdep_fsync(vp)
478	struct vnode *vp;	/* the "in_core" copy of the inode */
479{
480
481	return (0);
482}
483
484void
485softdep_fsync_mountdev(vp)
486	struct vnode *vp;
487{
488
489	return;
490}
491
492int
493softdep_flushworklist(oldmnt, countp, td)
494	struct mount *oldmnt;
495	int *countp;
496	struct thread *td;
497{
498
499	*countp = 0;
500	return (0);
501}
502
503int
504softdep_sync_metadata(struct vnode *vp)
505{
506
507	panic("softdep_sync_metadata called");
508}
509
510int
511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
512{
513
514	panic("softdep_sync_buf called");
515}
516
517int
518softdep_slowdown(vp)
519	struct vnode *vp;
520{
521
522	panic("softdep_slowdown called");
523}
524
525int
526softdep_request_cleanup(fs, vp, cred, resource)
527	struct fs *fs;
528	struct vnode *vp;
529	struct ucred *cred;
530	int resource;
531{
532
533	return (0);
534}
535
536int
537softdep_check_suspend(struct mount *mp,
538		      struct vnode *devvp,
539		      int softdep_depcnt,
540		      int softdep_accdepcnt,
541		      int secondary_writes,
542		      int secondary_accwrites)
543{
544	struct bufobj *bo;
545	int error;
546
547	(void) softdep_depcnt,
548	(void) softdep_accdepcnt;
549
550	bo = &devvp->v_bufobj;
551	ASSERT_BO_WLOCKED(bo);
552
553	MNT_ILOCK(mp);
554	while (mp->mnt_secondary_writes != 0) {
555		BO_UNLOCK(bo);
556		msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
557		    (PUSER - 1) | PDROP, "secwr", 0);
558		BO_LOCK(bo);
559		MNT_ILOCK(mp);
560	}
561
562	/*
563	 * Reasons for needing more work before suspend:
564	 * - Dirty buffers on devvp.
565	 * - Secondary writes occurred after start of vnode sync loop
566	 */
567	error = 0;
568	if (bo->bo_numoutput > 0 ||
569	    bo->bo_dirty.bv_cnt > 0 ||
570	    secondary_writes != 0 ||
571	    mp->mnt_secondary_writes != 0 ||
572	    secondary_accwrites != mp->mnt_secondary_accwrites)
573		error = EAGAIN;
574	BO_UNLOCK(bo);
575	return (error);
576}
577
578void
579softdep_get_depcounts(struct mount *mp,
580		      int *softdepactivep,
581		      int *softdepactiveaccp)
582{
583	(void) mp;
584	*softdepactivep = 0;
585	*softdepactiveaccp = 0;
586}
587
588void
589softdep_buf_append(bp, wkhd)
590	struct buf *bp;
591	struct workhead *wkhd;
592{
593
594	panic("softdep_buf_appendwork called");
595}
596
597void
598softdep_inode_append(ip, cred, wkhd)
599	struct inode *ip;
600	struct ucred *cred;
601	struct workhead *wkhd;
602{
603
604	panic("softdep_inode_appendwork called");
605}
606
607void
608softdep_freework(wkhd)
609	struct workhead *wkhd;
610{
611
612	panic("softdep_freework called");
613}
614
615#else
616
617FEATURE(softupdates, "FFS soft-updates support");
618
619static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0,
620    "soft updates stats");
621static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0,
622    "total dependencies allocated");
623static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0,
624    "high use dependencies allocated");
625static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0,
626    "current dependencies allocated");
627static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0,
628    "current dependencies written");
629
630unsigned long dep_current[D_LAST + 1];
631unsigned long dep_highuse[D_LAST + 1];
632unsigned long dep_total[D_LAST + 1];
633unsigned long dep_write[D_LAST + 1];
634
635#define	SOFTDEP_TYPE(type, str, long)					\
636    static MALLOC_DEFINE(M_ ## type, #str, long);			\
637    SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD,	\
638	&dep_total[D_ ## type], 0, "");					\
639    SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, 	\
640	&dep_current[D_ ## type], 0, "");				\
641    SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, 	\
642	&dep_highuse[D_ ## type], 0, "");				\
643    SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, 	\
644	&dep_write[D_ ## type], 0, "");
645
646SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
647SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
648SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
649    "Block or frag allocated from cyl group map");
650SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
651SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
652SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
653SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
654SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
655SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
656SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
657SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
658SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
659SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
660SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
661SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
662SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
663SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
664SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
665SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
666SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
667SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
668SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
669SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
670SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
671SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
672SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
673SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
674
675static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
676
677static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
678static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
679static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data");
680
681#define M_SOFTDEP_FLAGS	(M_WAITOK)
682
683/*
684 * translate from workitem type to memory type
685 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
686 */
687static struct malloc_type *memtype[] = {
688	M_PAGEDEP,
689	M_INODEDEP,
690	M_BMSAFEMAP,
691	M_NEWBLK,
692	M_ALLOCDIRECT,
693	M_INDIRDEP,
694	M_ALLOCINDIR,
695	M_FREEFRAG,
696	M_FREEBLKS,
697	M_FREEFILE,
698	M_DIRADD,
699	M_MKDIR,
700	M_DIRREM,
701	M_NEWDIRBLK,
702	M_FREEWORK,
703	M_FREEDEP,
704	M_JADDREF,
705	M_JREMREF,
706	M_JMVREF,
707	M_JNEWBLK,
708	M_JFREEBLK,
709	M_JFREEFRAG,
710	M_JSEG,
711	M_JSEGDEP,
712	M_SBDEP,
713	M_JTRUNC,
714	M_JFSYNC,
715	M_SENTINEL
716};
717
718#define DtoM(type) (memtype[type])
719
720/*
721 * Names of malloc types.
722 */
723#define TYPENAME(type)  \
724	((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???")
725/*
726 * End system adaptation definitions.
727 */
728
729#define	DOTDOT_OFFSET	offsetof(struct dirtemplate, dotdot_ino)
730#define	DOT_OFFSET	offsetof(struct dirtemplate, dot_ino)
731
732/*
733 * Internal function prototypes.
734 */
735static	void check_clear_deps(struct mount *);
736static	void softdep_error(char *, int);
737static	int softdep_process_worklist(struct mount *, int);
738static	int softdep_waitidle(struct mount *);
739static	void drain_output(struct vnode *);
740static	struct buf *getdirtybuf(struct buf *, struct rwlock *, int);
741static	void clear_remove(struct mount *);
742static	void clear_inodedeps(struct mount *);
743static	void unlinked_inodedep(struct mount *, struct inodedep *);
744static	void clear_unlinked_inodedep(struct inodedep *);
745static	struct inodedep *first_unlinked_inodedep(struct ufsmount *);
746static	int flush_pagedep_deps(struct vnode *, struct mount *,
747	    struct diraddhd *);
748static	int free_pagedep(struct pagedep *);
749static	int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
750static	int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
751static	int flush_deplist(struct allocdirectlst *, int, int *);
752static	int sync_cgs(struct mount *, int);
753static	int handle_written_filepage(struct pagedep *, struct buf *);
754static	int handle_written_sbdep(struct sbdep *, struct buf *);
755static	void initiate_write_sbdep(struct sbdep *);
756static	void diradd_inode_written(struct diradd *, struct inodedep *);
757static	int handle_written_indirdep(struct indirdep *, struct buf *,
758	    struct buf**);
759static	int handle_written_inodeblock(struct inodedep *, struct buf *);
760static	int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
761	    uint8_t *);
762static	int handle_written_bmsafemap(struct bmsafemap *, struct buf *);
763static	void handle_written_jaddref(struct jaddref *);
764static	void handle_written_jremref(struct jremref *);
765static	void handle_written_jseg(struct jseg *, struct buf *);
766static	void handle_written_jnewblk(struct jnewblk *);
767static	void handle_written_jblkdep(struct jblkdep *);
768static	void handle_written_jfreefrag(struct jfreefrag *);
769static	void complete_jseg(struct jseg *);
770static	void complete_jsegs(struct jseg *);
771static	void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
772static	void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
773static	void jremref_write(struct jremref *, struct jseg *, uint8_t *);
774static	void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
775static	void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
776static	void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
777static	void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
778static	void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
779static	void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
780static	inline void inoref_write(struct inoref *, struct jseg *,
781	    struct jrefrec *);
782static	void handle_allocdirect_partdone(struct allocdirect *,
783	    struct workhead *);
784static	struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
785	    struct workhead *);
786static	void indirdep_complete(struct indirdep *);
787static	int indirblk_lookup(struct mount *, ufs2_daddr_t);
788static	void indirblk_insert(struct freework *);
789static	void indirblk_remove(struct freework *);
790static	void handle_allocindir_partdone(struct allocindir *);
791static	void initiate_write_filepage(struct pagedep *, struct buf *);
792static	void initiate_write_indirdep(struct indirdep*, struct buf *);
793static	void handle_written_mkdir(struct mkdir *, int);
794static	int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
795	    uint8_t *);
796static	void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
797static	void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
798static	void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
799static	void handle_workitem_freefile(struct freefile *);
800static	int handle_workitem_remove(struct dirrem *, int);
801static	struct dirrem *newdirrem(struct buf *, struct inode *,
802	    struct inode *, int, struct dirrem **);
803static	struct indirdep *indirdep_lookup(struct mount *, struct inode *,
804	    struct buf *);
805static	void cancel_indirdep(struct indirdep *, struct buf *,
806	    struct freeblks *);
807static	void free_indirdep(struct indirdep *);
808static	void free_diradd(struct diradd *, struct workhead *);
809static	void merge_diradd(struct inodedep *, struct diradd *);
810static	void complete_diradd(struct diradd *);
811static	struct diradd *diradd_lookup(struct pagedep *, int);
812static	struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
813	    struct jremref *);
814static	struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
815	    struct jremref *);
816static	void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
817	    struct jremref *, struct jremref *);
818static	void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
819	    struct jremref *);
820static	void cancel_allocindir(struct allocindir *, struct buf *bp,
821	    struct freeblks *, int);
822static	int setup_trunc_indir(struct freeblks *, struct inode *,
823	    ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
824static	void complete_trunc_indir(struct freework *);
825static	void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
826	    int);
827static	void complete_mkdir(struct mkdir *);
828static	void free_newdirblk(struct newdirblk *);
829static	void free_jremref(struct jremref *);
830static	void free_jaddref(struct jaddref *);
831static	void free_jsegdep(struct jsegdep *);
832static	void free_jsegs(struct jblocks *);
833static	void rele_jseg(struct jseg *);
834static	void free_jseg(struct jseg *, struct jblocks *);
835static	void free_jnewblk(struct jnewblk *);
836static	void free_jblkdep(struct jblkdep *);
837static	void free_jfreefrag(struct jfreefrag *);
838static	void free_freedep(struct freedep *);
839static	void journal_jremref(struct dirrem *, struct jremref *,
840	    struct inodedep *);
841static	void cancel_jnewblk(struct jnewblk *, struct workhead *);
842static	int cancel_jaddref(struct jaddref *, struct inodedep *,
843	    struct workhead *);
844static	void cancel_jfreefrag(struct jfreefrag *);
845static	inline void setup_freedirect(struct freeblks *, struct inode *,
846	    int, int);
847static	inline void setup_freeext(struct freeblks *, struct inode *, int, int);
848static	inline void setup_freeindir(struct freeblks *, struct inode *, int,
849	    ufs_lbn_t, int);
850static	inline struct freeblks *newfreeblks(struct mount *, struct inode *);
851static	void freeblks_free(struct ufsmount *, struct freeblks *, int);
852static	void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
853static	ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
854static	int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
855static	void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
856	    int, int);
857static	void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
858static 	int cancel_pagedep(struct pagedep *, struct freeblks *, int);
859static	int deallocate_dependencies(struct buf *, struct freeblks *, int);
860static	void newblk_freefrag(struct newblk*);
861static	void free_newblk(struct newblk *);
862static	void cancel_allocdirect(struct allocdirectlst *,
863	    struct allocdirect *, struct freeblks *);
864static	int check_inode_unwritten(struct inodedep *);
865static	int free_inodedep(struct inodedep *);
866static	void freework_freeblock(struct freework *);
867static	void freework_enqueue(struct freework *);
868static	int handle_workitem_freeblocks(struct freeblks *, int);
869static	int handle_complete_freeblocks(struct freeblks *, int);
870static	void handle_workitem_indirblk(struct freework *);
871static	void handle_written_freework(struct freework *);
872static	void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
873static	struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
874	    struct workhead *);
875static	struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
876	    struct inodedep *, struct allocindir *, ufs_lbn_t);
877static	struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
878	    ufs2_daddr_t, ufs_lbn_t);
879static	void handle_workitem_freefrag(struct freefrag *);
880static	struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
881	    ufs_lbn_t);
882static	void allocdirect_merge(struct allocdirectlst *,
883	    struct allocdirect *, struct allocdirect *);
884static	struct freefrag *allocindir_merge(struct allocindir *,
885	    struct allocindir *);
886static	int bmsafemap_find(struct bmsafemap_hashhead *, int,
887	    struct bmsafemap **);
888static	struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
889	    int cg, struct bmsafemap *);
890static	int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int,
891	    struct newblk **);
892static	int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
893static	int inodedep_find(struct inodedep_hashhead *, ino_t,
894	    struct inodedep **);
895static	int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
896static	int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
897	    int, struct pagedep **);
898static	int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
899	    struct pagedep **);
900static	void pause_timer(void *);
901static	int request_cleanup(struct mount *, int);
902static	int process_worklist_item(struct mount *, int, int);
903static	void process_removes(struct vnode *);
904static	void process_truncates(struct vnode *);
905static	void jwork_move(struct workhead *, struct workhead *);
906static	void jwork_insert(struct workhead *, struct jsegdep *);
907static	void add_to_worklist(struct worklist *, int);
908static	void wake_worklist(struct worklist *);
909static	void wait_worklist(struct worklist *, char *);
910static	void remove_from_worklist(struct worklist *);
911static	void softdep_flush(void *);
912static	void softdep_flushjournal(struct mount *);
913static	int softdep_speedup(struct ufsmount *);
914static	void worklist_speedup(struct mount *);
915static	int journal_mount(struct mount *, struct fs *, struct ucred *);
916static	void journal_unmount(struct ufsmount *);
917static	int journal_space(struct ufsmount *, int);
918static	void journal_suspend(struct ufsmount *);
919static	int journal_unsuspend(struct ufsmount *ump);
920static	void softdep_prelink(struct vnode *, struct vnode *);
921static	void add_to_journal(struct worklist *);
922static	void remove_from_journal(struct worklist *);
923static	void softdep_process_journal(struct mount *, struct worklist *, int);
924static	struct jremref *newjremref(struct dirrem *, struct inode *,
925	    struct inode *ip, off_t, nlink_t);
926static	struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
927	    uint16_t);
928static	inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
929	    uint16_t);
930static	inline struct jsegdep *inoref_jseg(struct inoref *);
931static	struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
932static	struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
933	    ufs2_daddr_t, int);
934static	void adjust_newfreework(struct freeblks *, int);
935static	struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
936static	void move_newblock_dep(struct jaddref *, struct inodedep *);
937static	void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
938static	struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
939	    ufs2_daddr_t, long, ufs_lbn_t);
940static	struct freework *newfreework(struct ufsmount *, struct freeblks *,
941	    struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
942static	int jwait(struct worklist *, int);
943static	struct inodedep *inodedep_lookup_ip(struct inode *);
944static	int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
945static	struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
946static	void handle_jwork(struct workhead *);
947static	struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
948	    struct mkdir **);
949static	struct jblocks *jblocks_create(void);
950static	ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
951static	void jblocks_free(struct jblocks *, struct mount *, int);
952static	void jblocks_destroy(struct jblocks *);
953static	void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
954
955/*
956 * Exported softdep operations.
957 */
958static	void softdep_disk_io_initiation(struct buf *);
959static	void softdep_disk_write_complete(struct buf *);
960static	void softdep_deallocate_dependencies(struct buf *);
961static	int softdep_count_dependencies(struct buf *bp, int);
962
963/*
964 * Global lock over all of soft updates.
965 */
966static struct mtx lk;
967MTX_SYSINIT(softdep_lock, &lk, "Global Softdep Lock", MTX_DEF);
968
969#define ACQUIRE_GBLLOCK(lk)	mtx_lock(lk)
970#define FREE_GBLLOCK(lk)	mtx_unlock(lk)
971#define GBLLOCK_OWNED(lk)	mtx_assert((lk), MA_OWNED)
972
973/*
974 * Per-filesystem soft-updates locking.
975 */
976#define LOCK_PTR(ump)		(&(ump)->um_softdep->sd_fslock)
977#define TRY_ACQUIRE_LOCK(ump)	rw_try_wlock(&(ump)->um_softdep->sd_fslock)
978#define ACQUIRE_LOCK(ump)	rw_wlock(&(ump)->um_softdep->sd_fslock)
979#define FREE_LOCK(ump)		rw_wunlock(&(ump)->um_softdep->sd_fslock)
980#define LOCK_OWNED(ump)		rw_assert(&(ump)->um_softdep->sd_fslock, \
981				    RA_WLOCKED)
982
983#define	BUF_AREC(bp)		lockallowrecurse(&(bp)->b_lock)
984#define	BUF_NOREC(bp)		lockdisablerecurse(&(bp)->b_lock)
985
986/*
987 * Worklist queue management.
988 * These routines require that the lock be held.
989 */
990#ifndef /* NOT */ DEBUG
991#define WORKLIST_INSERT(head, item) do {	\
992	(item)->wk_state |= ONWORKLIST;		\
993	LIST_INSERT_HEAD(head, item, wk_list);	\
994} while (0)
995#define WORKLIST_REMOVE(item) do {		\
996	(item)->wk_state &= ~ONWORKLIST;	\
997	LIST_REMOVE(item, wk_list);		\
998} while (0)
999#define WORKLIST_INSERT_UNLOCKED	WORKLIST_INSERT
1000#define WORKLIST_REMOVE_UNLOCKED	WORKLIST_REMOVE
1001
1002#else /* DEBUG */
1003static	void worklist_insert(struct workhead *, struct worklist *, int);
1004static	void worklist_remove(struct worklist *, int);
1005
1006#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1)
1007#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0)
1008#define WORKLIST_REMOVE(item) worklist_remove(item, 1)
1009#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0)
1010
1011static void
1012worklist_insert(head, item, locked)
1013	struct workhead *head;
1014	struct worklist *item;
1015	int locked;
1016{
1017
1018	if (locked)
1019		LOCK_OWNED(VFSTOUFS(item->wk_mp));
1020	if (item->wk_state & ONWORKLIST)
1021		panic("worklist_insert: %p %s(0x%X) already on list",
1022		    item, TYPENAME(item->wk_type), item->wk_state);
1023	item->wk_state |= ONWORKLIST;
1024	LIST_INSERT_HEAD(head, item, wk_list);
1025}
1026
1027static void
1028worklist_remove(item, locked)
1029	struct worklist *item;
1030	int locked;
1031{
1032
1033	if (locked)
1034		LOCK_OWNED(VFSTOUFS(item->wk_mp));
1035	if ((item->wk_state & ONWORKLIST) == 0)
1036		panic("worklist_remove: %p %s(0x%X) not on list",
1037		    item, TYPENAME(item->wk_type), item->wk_state);
1038	item->wk_state &= ~ONWORKLIST;
1039	LIST_REMOVE(item, wk_list);
1040}
1041#endif /* DEBUG */
1042
1043/*
1044 * Merge two jsegdeps keeping only the oldest one as newer references
1045 * can't be discarded until after older references.
1046 */
1047static inline struct jsegdep *
1048jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1049{
1050	struct jsegdep *swp;
1051
1052	if (two == NULL)
1053		return (one);
1054
1055	if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1056		swp = one;
1057		one = two;
1058		two = swp;
1059	}
1060	WORKLIST_REMOVE(&two->jd_list);
1061	free_jsegdep(two);
1062
1063	return (one);
1064}
1065
1066/*
1067 * If two freedeps are compatible free one to reduce list size.
1068 */
1069static inline struct freedep *
1070freedep_merge(struct freedep *one, struct freedep *two)
1071{
1072	if (two == NULL)
1073		return (one);
1074
1075	if (one->fd_freework == two->fd_freework) {
1076		WORKLIST_REMOVE(&two->fd_list);
1077		free_freedep(two);
1078	}
1079	return (one);
1080}
1081
1082/*
1083 * Move journal work from one list to another.  Duplicate freedeps and
1084 * jsegdeps are coalesced to keep the lists as small as possible.
1085 */
1086static void
1087jwork_move(dst, src)
1088	struct workhead *dst;
1089	struct workhead *src;
1090{
1091	struct freedep *freedep;
1092	struct jsegdep *jsegdep;
1093	struct worklist *wkn;
1094	struct worklist *wk;
1095
1096	KASSERT(dst != src,
1097	    ("jwork_move: dst == src"));
1098	freedep = NULL;
1099	jsegdep = NULL;
1100	LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1101		if (wk->wk_type == D_JSEGDEP)
1102			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1103		if (wk->wk_type == D_FREEDEP)
1104			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1105	}
1106
1107	while ((wk = LIST_FIRST(src)) != NULL) {
1108		WORKLIST_REMOVE(wk);
1109		WORKLIST_INSERT(dst, wk);
1110		if (wk->wk_type == D_JSEGDEP) {
1111			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1112			continue;
1113		}
1114		if (wk->wk_type == D_FREEDEP)
1115			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1116	}
1117}
1118
1119static void
1120jwork_insert(dst, jsegdep)
1121	struct workhead *dst;
1122	struct jsegdep *jsegdep;
1123{
1124	struct jsegdep *jsegdepn;
1125	struct worklist *wk;
1126
1127	LIST_FOREACH(wk, dst, wk_list)
1128		if (wk->wk_type == D_JSEGDEP)
1129			break;
1130	if (wk == NULL) {
1131		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1132		return;
1133	}
1134	jsegdepn = WK_JSEGDEP(wk);
1135	if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1136		WORKLIST_REMOVE(wk);
1137		free_jsegdep(jsegdepn);
1138		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1139	} else
1140		free_jsegdep(jsegdep);
1141}
1142
1143/*
1144 * Routines for tracking and managing workitems.
1145 */
1146static	void workitem_free(struct worklist *, int);
1147static	void workitem_alloc(struct worklist *, int, struct mount *);
1148static	void workitem_reassign(struct worklist *, int);
1149
1150#define	WORKITEM_FREE(item, type) \
1151	workitem_free((struct worklist *)(item), (type))
1152#define	WORKITEM_REASSIGN(item, type) \
1153	workitem_reassign((struct worklist *)(item), (type))
1154
1155static void
1156workitem_free(item, type)
1157	struct worklist *item;
1158	int type;
1159{
1160	struct ufsmount *ump;
1161
1162#ifdef DEBUG
1163	if (item->wk_state & ONWORKLIST)
1164		panic("workitem_free: %s(0x%X) still on list",
1165		    TYPENAME(item->wk_type), item->wk_state);
1166	if (item->wk_type != type && type != D_NEWBLK)
1167		panic("workitem_free: type mismatch %s != %s",
1168		    TYPENAME(item->wk_type), TYPENAME(type));
1169#endif
1170	if (item->wk_state & IOWAITING)
1171		wakeup(item);
1172	ump = VFSTOUFS(item->wk_mp);
1173	LOCK_OWNED(ump);
1174	KASSERT(ump->softdep_deps > 0,
1175	    ("workitem_free: %s: softdep_deps going negative",
1176	    ump->um_fs->fs_fsmnt));
1177	if (--ump->softdep_deps == 0 && ump->softdep_req)
1178		wakeup(&ump->softdep_deps);
1179	KASSERT(dep_current[item->wk_type] > 0,
1180	    ("workitem_free: %s: dep_current[%s] going negative",
1181	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1182	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1183	    ("workitem_free: %s: softdep_curdeps[%s] going negative",
1184	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1185	atomic_subtract_long(&dep_current[item->wk_type], 1);
1186	ump->softdep_curdeps[item->wk_type] -= 1;
1187	free(item, DtoM(type));
1188}
1189
1190static void
1191workitem_alloc(item, type, mp)
1192	struct worklist *item;
1193	int type;
1194	struct mount *mp;
1195{
1196	struct ufsmount *ump;
1197
1198	item->wk_type = type;
1199	item->wk_mp = mp;
1200	item->wk_state = 0;
1201
1202	ump = VFSTOUFS(mp);
1203	ACQUIRE_GBLLOCK(&lk);
1204	dep_current[type]++;
1205	if (dep_current[type] > dep_highuse[type])
1206		dep_highuse[type] = dep_current[type];
1207	dep_total[type]++;
1208	FREE_GBLLOCK(&lk);
1209	ACQUIRE_LOCK(ump);
1210	ump->softdep_curdeps[type] += 1;
1211	ump->softdep_deps++;
1212	ump->softdep_accdeps++;
1213	FREE_LOCK(ump);
1214}
1215
1216static void
1217workitem_reassign(item, newtype)
1218	struct worklist *item;
1219	int newtype;
1220{
1221	struct ufsmount *ump;
1222
1223	ump = VFSTOUFS(item->wk_mp);
1224	LOCK_OWNED(ump);
1225	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1226	    ("workitem_reassign: %s: softdep_curdeps[%s] going negative",
1227	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1228	ump->softdep_curdeps[item->wk_type] -= 1;
1229	ump->softdep_curdeps[newtype] += 1;
1230	KASSERT(dep_current[item->wk_type] > 0,
1231	    ("workitem_reassign: %s: dep_current[%s] going negative",
1232	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1233	ACQUIRE_GBLLOCK(&lk);
1234	dep_current[newtype]++;
1235	dep_current[item->wk_type]--;
1236	if (dep_current[newtype] > dep_highuse[newtype])
1237		dep_highuse[newtype] = dep_current[newtype];
1238	dep_total[newtype]++;
1239	FREE_GBLLOCK(&lk);
1240	item->wk_type = newtype;
1241}
1242
1243/*
1244 * Workitem queue management
1245 */
1246static int max_softdeps;	/* maximum number of structs before slowdown */
1247static int tickdelay = 2;	/* number of ticks to pause during slowdown */
1248static int proc_waiting;	/* tracks whether we have a timeout posted */
1249static int *stat_countp;	/* statistic to count in proc_waiting timeout */
1250static struct callout softdep_callout;
1251static int req_clear_inodedeps;	/* syncer process flush some inodedeps */
1252static int req_clear_remove;	/* syncer process flush some freeblks */
1253static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1254
1255/*
1256 * runtime statistics
1257 */
1258static int stat_flush_threads;	/* number of softdep flushing threads */
1259static int stat_worklist_push;	/* number of worklist cleanups */
1260static int stat_blk_limit_push;	/* number of times block limit neared */
1261static int stat_ino_limit_push;	/* number of times inode limit neared */
1262static int stat_blk_limit_hit;	/* number of times block slowdown imposed */
1263static int stat_ino_limit_hit;	/* number of times inode slowdown imposed */
1264static int stat_sync_limit_hit;	/* number of synchronous slowdowns imposed */
1265static int stat_indir_blk_ptrs;	/* bufs redirtied as indir ptrs not written */
1266static int stat_inode_bitmap;	/* bufs redirtied as inode bitmap not written */
1267static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1268static int stat_dir_entry;	/* bufs redirtied as dir entry cannot write */
1269static int stat_jaddref;	/* bufs redirtied as ino bitmap can not write */
1270static int stat_jnewblk;	/* bufs redirtied as blk bitmap can not write */
1271static int stat_journal_min;	/* Times hit journal min threshold */
1272static int stat_journal_low;	/* Times hit journal low threshold */
1273static int stat_journal_wait;	/* Times blocked in jwait(). */
1274static int stat_jwait_filepage;	/* Times blocked in jwait() for filepage. */
1275static int stat_jwait_freeblks;	/* Times blocked in jwait() for freeblks. */
1276static int stat_jwait_inode;	/* Times blocked in jwait() for inodes. */
1277static int stat_jwait_newblk;	/* Times blocked in jwait() for newblks. */
1278static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1279static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1280static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1281static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1282static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1283static int stat_emptyjblocks; /* Number of potentially empty journal blocks */
1284
1285SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1286    &max_softdeps, 0, "");
1287SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1288    &tickdelay, 0, "");
1289SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD,
1290    &stat_flush_threads, 0, "");
1291SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW,
1292    &stat_worklist_push, 0,"");
1293SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW,
1294    &stat_blk_limit_push, 0,"");
1295SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW,
1296    &stat_ino_limit_push, 0,"");
1297SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW,
1298    &stat_blk_limit_hit, 0, "");
1299SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW,
1300    &stat_ino_limit_hit, 0, "");
1301SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW,
1302    &stat_sync_limit_hit, 0, "");
1303SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW,
1304    &stat_indir_blk_ptrs, 0, "");
1305SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW,
1306    &stat_inode_bitmap, 0, "");
1307SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW,
1308    &stat_direct_blk_ptrs, 0, "");
1309SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW,
1310    &stat_dir_entry, 0, "");
1311SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW,
1312    &stat_jaddref, 0, "");
1313SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW,
1314    &stat_jnewblk, 0, "");
1315SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW,
1316    &stat_journal_low, 0, "");
1317SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW,
1318    &stat_journal_min, 0, "");
1319SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW,
1320    &stat_journal_wait, 0, "");
1321SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW,
1322    &stat_jwait_filepage, 0, "");
1323SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW,
1324    &stat_jwait_freeblks, 0, "");
1325SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW,
1326    &stat_jwait_inode, 0, "");
1327SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW,
1328    &stat_jwait_newblk, 0, "");
1329SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW,
1330    &stat_cleanup_blkrequests, 0, "");
1331SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW,
1332    &stat_cleanup_inorequests, 0, "");
1333SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW,
1334    &stat_cleanup_high_delay, 0, "");
1335SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW,
1336    &stat_cleanup_retries, 0, "");
1337SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW,
1338    &stat_cleanup_failures, 0, "");
1339SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1340    &softdep_flushcache, 0, "");
1341SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD,
1342    &stat_emptyjblocks, 0, "");
1343
1344SYSCTL_DECL(_vfs_ffs);
1345
1346/* Whether to recompute the summary at mount time */
1347static int compute_summary_at_mount = 0;
1348SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1349	   &compute_summary_at_mount, 0, "Recompute summary at mount");
1350static int print_threads = 0;
1351SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW,
1352    &print_threads, 0, "Notify flusher thread start/stop");
1353
1354/* List of all filesystems mounted with soft updates */
1355static TAILQ_HEAD(, mount_softdeps) softdepmounts;
1356
1357/*
1358 * This function cleans the worklist for a filesystem.
1359 * Each filesystem running with soft dependencies gets its own
1360 * thread to run in this function. The thread is started up in
1361 * softdep_mount and shutdown in softdep_unmount. They show up
1362 * as part of the kernel "bufdaemon" process whose process
1363 * entry is available in bufdaemonproc.
1364 */
1365static int searchfailed;
1366extern struct proc *bufdaemonproc;
1367static void
1368softdep_flush(addr)
1369	void *addr;
1370{
1371	struct mount *mp;
1372	struct thread *td;
1373	struct ufsmount *ump;
1374
1375	td = curthread;
1376	td->td_pflags |= TDP_NORUNNINGBUF;
1377	mp = (struct mount *)addr;
1378	ump = VFSTOUFS(mp);
1379	atomic_add_int(&stat_flush_threads, 1);
1380	if (print_threads) {
1381		if (stat_flush_threads == 1)
1382			printf("Running %s at pid %d\n", bufdaemonproc->p_comm,
1383			    bufdaemonproc->p_pid);
1384		printf("Start thread %s\n", td->td_name);
1385	}
1386	for (;;) {
1387		while (softdep_process_worklist(mp, 0) > 0 ||
1388		    (MOUNTEDSUJ(mp) &&
1389		    VFSTOUFS(mp)->softdep_jblocks->jb_suspended))
1390			kthread_suspend_check();
1391		ACQUIRE_LOCK(ump);
1392		if ((ump->softdep_flags & FLUSH_CLEANUP) == 0)
1393			msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM,
1394			    "sdflush", hz / 2);
1395		ump->softdep_flags &= ~FLUSH_CLEANUP;
1396		/*
1397		 * Check to see if we are done and need to exit.
1398		 */
1399		if ((ump->softdep_flags & FLUSH_EXIT) == 0) {
1400			FREE_LOCK(ump);
1401			continue;
1402		}
1403		ump->softdep_flags &= ~FLUSH_EXIT;
1404		FREE_LOCK(ump);
1405		wakeup(&ump->softdep_flags);
1406		if (print_threads)
1407			printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups);
1408		atomic_subtract_int(&stat_flush_threads, 1);
1409		kthread_exit();
1410		panic("kthread_exit failed\n");
1411	}
1412}
1413
1414static void
1415worklist_speedup(mp)
1416	struct mount *mp;
1417{
1418	struct ufsmount *ump;
1419
1420	ump = VFSTOUFS(mp);
1421	LOCK_OWNED(ump);
1422	if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) {
1423		ump->softdep_flags |= FLUSH_CLEANUP;
1424		if (ump->softdep_flushtd->td_wchan == &ump->softdep_flushtd)
1425			wakeup(&ump->softdep_flushtd);
1426	}
1427}
1428
1429static int
1430softdep_speedup(ump)
1431	struct ufsmount *ump;
1432{
1433	struct ufsmount *altump;
1434	struct mount_softdeps *sdp;
1435
1436	LOCK_OWNED(ump);
1437	worklist_speedup(ump->um_mountp);
1438	bd_speedup();
1439	/*
1440	 * If we have global shortages, then we need other
1441	 * filesystems to help with the cleanup. Here we wakeup a
1442	 * flusher thread for a filesystem that is over its fair
1443	 * share of resources.
1444	 */
1445	if (req_clear_inodedeps || req_clear_remove) {
1446		ACQUIRE_GBLLOCK(&lk);
1447		TAILQ_FOREACH(sdp, &softdepmounts, sd_next) {
1448			if ((altump = sdp->sd_ump) == ump)
1449				continue;
1450			if (((req_clear_inodedeps &&
1451			    altump->softdep_curdeps[D_INODEDEP] >
1452			    max_softdeps / stat_flush_threads) ||
1453			    (req_clear_remove &&
1454			    altump->softdep_curdeps[D_DIRREM] >
1455			    (max_softdeps / 2) / stat_flush_threads)) &&
1456			    TRY_ACQUIRE_LOCK(altump))
1457				break;
1458		}
1459		if (sdp == NULL) {
1460			searchfailed++;
1461			FREE_GBLLOCK(&lk);
1462		} else {
1463			/*
1464			 * Move to the end of the list so we pick a
1465			 * different one on out next try.
1466			 */
1467			TAILQ_REMOVE(&softdepmounts, sdp, sd_next);
1468			TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
1469			FREE_GBLLOCK(&lk);
1470			if ((altump->softdep_flags &
1471			    (FLUSH_CLEANUP | FLUSH_EXIT)) == 0) {
1472				altump->softdep_flags |= FLUSH_CLEANUP;
1473				altump->um_softdep->sd_cleanups++;
1474				if (altump->softdep_flushtd->td_wchan ==
1475				    &altump->softdep_flushtd) {
1476					wakeup(&altump->softdep_flushtd);
1477				}
1478			}
1479			FREE_LOCK(altump);
1480		}
1481	}
1482	return (speedup_syncer());
1483}
1484
1485/*
1486 * Add an item to the end of the work queue.
1487 * This routine requires that the lock be held.
1488 * This is the only routine that adds items to the list.
1489 * The following routine is the only one that removes items
1490 * and does so in order from first to last.
1491 */
1492
1493#define	WK_HEAD		0x0001	/* Add to HEAD. */
1494#define	WK_NODELAY	0x0002	/* Process immediately. */
1495
1496static void
1497add_to_worklist(wk, flags)
1498	struct worklist *wk;
1499	int flags;
1500{
1501	struct ufsmount *ump;
1502
1503	ump = VFSTOUFS(wk->wk_mp);
1504	LOCK_OWNED(ump);
1505	if (wk->wk_state & ONWORKLIST)
1506		panic("add_to_worklist: %s(0x%X) already on list",
1507		    TYPENAME(wk->wk_type), wk->wk_state);
1508	wk->wk_state |= ONWORKLIST;
1509	if (ump->softdep_on_worklist == 0) {
1510		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1511		ump->softdep_worklist_tail = wk;
1512	} else if (flags & WK_HEAD) {
1513		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1514	} else {
1515		LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1516		ump->softdep_worklist_tail = wk;
1517	}
1518	ump->softdep_on_worklist += 1;
1519	if (flags & WK_NODELAY)
1520		worklist_speedup(wk->wk_mp);
1521}
1522
1523/*
1524 * Remove the item to be processed. If we are removing the last
1525 * item on the list, we need to recalculate the tail pointer.
1526 */
1527static void
1528remove_from_worklist(wk)
1529	struct worklist *wk;
1530{
1531	struct ufsmount *ump;
1532
1533	ump = VFSTOUFS(wk->wk_mp);
1534	WORKLIST_REMOVE(wk);
1535	if (ump->softdep_worklist_tail == wk)
1536		ump->softdep_worklist_tail =
1537		    (struct worklist *)wk->wk_list.le_prev;
1538	ump->softdep_on_worklist -= 1;
1539}
1540
1541static void
1542wake_worklist(wk)
1543	struct worklist *wk;
1544{
1545	if (wk->wk_state & IOWAITING) {
1546		wk->wk_state &= ~IOWAITING;
1547		wakeup(wk);
1548	}
1549}
1550
1551static void
1552wait_worklist(wk, wmesg)
1553	struct worklist *wk;
1554	char *wmesg;
1555{
1556	struct ufsmount *ump;
1557
1558	ump = VFSTOUFS(wk->wk_mp);
1559	wk->wk_state |= IOWAITING;
1560	msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0);
1561}
1562
1563/*
1564 * Process that runs once per second to handle items in the background queue.
1565 *
1566 * Note that we ensure that everything is done in the order in which they
1567 * appear in the queue. The code below depends on this property to ensure
1568 * that blocks of a file are freed before the inode itself is freed. This
1569 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1570 * until all the old ones have been purged from the dependency lists.
1571 */
1572static int
1573softdep_process_worklist(mp, full)
1574	struct mount *mp;
1575	int full;
1576{
1577	int cnt, matchcnt;
1578	struct ufsmount *ump;
1579	long starttime;
1580
1581	KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1582	if (MOUNTEDSOFTDEP(mp) == 0)
1583		return (0);
1584	matchcnt = 0;
1585	ump = VFSTOUFS(mp);
1586	ACQUIRE_LOCK(ump);
1587	starttime = time_second;
1588	softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0);
1589	check_clear_deps(mp);
1590	while (ump->softdep_on_worklist > 0) {
1591		if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1592			break;
1593		else
1594			matchcnt += cnt;
1595		check_clear_deps(mp);
1596		/*
1597		 * We do not generally want to stop for buffer space, but if
1598		 * we are really being a buffer hog, we will stop and wait.
1599		 */
1600		if (should_yield()) {
1601			FREE_LOCK(ump);
1602			kern_yield(PRI_USER);
1603			bwillwrite();
1604			ACQUIRE_LOCK(ump);
1605		}
1606		/*
1607		 * Never allow processing to run for more than one
1608		 * second. This gives the syncer thread the opportunity
1609		 * to pause if appropriate.
1610		 */
1611		if (!full && starttime != time_second)
1612			break;
1613	}
1614	if (full == 0)
1615		journal_unsuspend(ump);
1616	FREE_LOCK(ump);
1617	return (matchcnt);
1618}
1619
1620/*
1621 * Process all removes associated with a vnode if we are running out of
1622 * journal space.  Any other process which attempts to flush these will
1623 * be unable as we have the vnodes locked.
1624 */
1625static void
1626process_removes(vp)
1627	struct vnode *vp;
1628{
1629	struct inodedep *inodedep;
1630	struct dirrem *dirrem;
1631	struct ufsmount *ump;
1632	struct mount *mp;
1633	ino_t inum;
1634
1635	mp = vp->v_mount;
1636	ump = VFSTOUFS(mp);
1637	LOCK_OWNED(ump);
1638	inum = VTOI(vp)->i_number;
1639	for (;;) {
1640top:
1641		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1642			return;
1643		LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1644			/*
1645			 * If another thread is trying to lock this vnode
1646			 * it will fail but we must wait for it to do so
1647			 * before we can proceed.
1648			 */
1649			if (dirrem->dm_state & INPROGRESS) {
1650				wait_worklist(&dirrem->dm_list, "pwrwait");
1651				goto top;
1652			}
1653			if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1654			    (COMPLETE | ONWORKLIST))
1655				break;
1656		}
1657		if (dirrem == NULL)
1658			return;
1659		remove_from_worklist(&dirrem->dm_list);
1660		FREE_LOCK(ump);
1661		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1662			panic("process_removes: suspended filesystem");
1663		handle_workitem_remove(dirrem, 0);
1664		vn_finished_secondary_write(mp);
1665		ACQUIRE_LOCK(ump);
1666	}
1667}
1668
1669/*
1670 * Process all truncations associated with a vnode if we are running out
1671 * of journal space.  This is called when the vnode lock is already held
1672 * and no other process can clear the truncation.  This function returns
1673 * a value greater than zero if it did any work.
1674 */
1675static void
1676process_truncates(vp)
1677	struct vnode *vp;
1678{
1679	struct inodedep *inodedep;
1680	struct freeblks *freeblks;
1681	struct ufsmount *ump;
1682	struct mount *mp;
1683	ino_t inum;
1684	int cgwait;
1685
1686	mp = vp->v_mount;
1687	ump = VFSTOUFS(mp);
1688	LOCK_OWNED(ump);
1689	inum = VTOI(vp)->i_number;
1690	for (;;) {
1691		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1692			return;
1693		cgwait = 0;
1694		TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1695			/* Journal entries not yet written.  */
1696			if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1697				jwait(&LIST_FIRST(
1698				    &freeblks->fb_jblkdephd)->jb_list,
1699				    MNT_WAIT);
1700				break;
1701			}
1702			/* Another thread is executing this item. */
1703			if (freeblks->fb_state & INPROGRESS) {
1704				wait_worklist(&freeblks->fb_list, "ptrwait");
1705				break;
1706			}
1707			/* Freeblks is waiting on a inode write. */
1708			if ((freeblks->fb_state & COMPLETE) == 0) {
1709				FREE_LOCK(ump);
1710				ffs_update(vp, 1);
1711				ACQUIRE_LOCK(ump);
1712				break;
1713			}
1714			if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1715			    (ALLCOMPLETE | ONWORKLIST)) {
1716				remove_from_worklist(&freeblks->fb_list);
1717				freeblks->fb_state |= INPROGRESS;
1718				FREE_LOCK(ump);
1719				if (vn_start_secondary_write(NULL, &mp,
1720				    V_NOWAIT))
1721					panic("process_truncates: "
1722					    "suspended filesystem");
1723				handle_workitem_freeblocks(freeblks, 0);
1724				vn_finished_secondary_write(mp);
1725				ACQUIRE_LOCK(ump);
1726				break;
1727			}
1728			if (freeblks->fb_cgwait)
1729				cgwait++;
1730		}
1731		if (cgwait) {
1732			FREE_LOCK(ump);
1733			sync_cgs(mp, MNT_WAIT);
1734			ffs_sync_snap(mp, MNT_WAIT);
1735			ACQUIRE_LOCK(ump);
1736			continue;
1737		}
1738		if (freeblks == NULL)
1739			break;
1740	}
1741	return;
1742}
1743
1744/*
1745 * Process one item on the worklist.
1746 */
1747static int
1748process_worklist_item(mp, target, flags)
1749	struct mount *mp;
1750	int target;
1751	int flags;
1752{
1753	struct worklist sentinel;
1754	struct worklist *wk;
1755	struct ufsmount *ump;
1756	int matchcnt;
1757	int error;
1758
1759	KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1760	/*
1761	 * If we are being called because of a process doing a
1762	 * copy-on-write, then it is not safe to write as we may
1763	 * recurse into the copy-on-write routine.
1764	 */
1765	if (curthread->td_pflags & TDP_COWINPROGRESS)
1766		return (-1);
1767	PHOLD(curproc);	/* Don't let the stack go away. */
1768	ump = VFSTOUFS(mp);
1769	LOCK_OWNED(ump);
1770	matchcnt = 0;
1771	sentinel.wk_mp = NULL;
1772	sentinel.wk_type = D_SENTINEL;
1773	LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1774	for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1775	    wk = LIST_NEXT(&sentinel, wk_list)) {
1776		if (wk->wk_type == D_SENTINEL) {
1777			LIST_REMOVE(&sentinel, wk_list);
1778			LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1779			continue;
1780		}
1781		if (wk->wk_state & INPROGRESS)
1782			panic("process_worklist_item: %p already in progress.",
1783			    wk);
1784		wk->wk_state |= INPROGRESS;
1785		remove_from_worklist(wk);
1786		FREE_LOCK(ump);
1787		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1788			panic("process_worklist_item: suspended filesystem");
1789		switch (wk->wk_type) {
1790		case D_DIRREM:
1791			/* removal of a directory entry */
1792			error = handle_workitem_remove(WK_DIRREM(wk), flags);
1793			break;
1794
1795		case D_FREEBLKS:
1796			/* releasing blocks and/or fragments from a file */
1797			error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1798			    flags);
1799			break;
1800
1801		case D_FREEFRAG:
1802			/* releasing a fragment when replaced as a file grows */
1803			handle_workitem_freefrag(WK_FREEFRAG(wk));
1804			error = 0;
1805			break;
1806
1807		case D_FREEFILE:
1808			/* releasing an inode when its link count drops to 0 */
1809			handle_workitem_freefile(WK_FREEFILE(wk));
1810			error = 0;
1811			break;
1812
1813		default:
1814			panic("%s_process_worklist: Unknown type %s",
1815			    "softdep", TYPENAME(wk->wk_type));
1816			/* NOTREACHED */
1817		}
1818		vn_finished_secondary_write(mp);
1819		ACQUIRE_LOCK(ump);
1820		if (error == 0) {
1821			if (++matchcnt == target)
1822				break;
1823			continue;
1824		}
1825		/*
1826		 * We have to retry the worklist item later.  Wake up any
1827		 * waiters who may be able to complete it immediately and
1828		 * add the item back to the head so we don't try to execute
1829		 * it again.
1830		 */
1831		wk->wk_state &= ~INPROGRESS;
1832		wake_worklist(wk);
1833		add_to_worklist(wk, WK_HEAD);
1834	}
1835	LIST_REMOVE(&sentinel, wk_list);
1836	/* Sentinal could've become the tail from remove_from_worklist. */
1837	if (ump->softdep_worklist_tail == &sentinel)
1838		ump->softdep_worklist_tail =
1839		    (struct worklist *)sentinel.wk_list.le_prev;
1840	PRELE(curproc);
1841	return (matchcnt);
1842}
1843
1844/*
1845 * Move dependencies from one buffer to another.
1846 */
1847int
1848softdep_move_dependencies(oldbp, newbp)
1849	struct buf *oldbp;
1850	struct buf *newbp;
1851{
1852	struct worklist *wk, *wktail;
1853	struct ufsmount *ump;
1854	int dirty;
1855
1856	if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL)
1857		return (0);
1858	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
1859	    ("softdep_move_dependencies called on non-softdep filesystem"));
1860	dirty = 0;
1861	wktail = NULL;
1862	ump = VFSTOUFS(wk->wk_mp);
1863	ACQUIRE_LOCK(ump);
1864	while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
1865		LIST_REMOVE(wk, wk_list);
1866		if (wk->wk_type == D_BMSAFEMAP &&
1867		    bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
1868			dirty = 1;
1869		if (wktail == 0)
1870			LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
1871		else
1872			LIST_INSERT_AFTER(wktail, wk, wk_list);
1873		wktail = wk;
1874	}
1875	FREE_LOCK(ump);
1876
1877	return (dirty);
1878}
1879
1880/*
1881 * Purge the work list of all items associated with a particular mount point.
1882 */
1883int
1884softdep_flushworklist(oldmnt, countp, td)
1885	struct mount *oldmnt;
1886	int *countp;
1887	struct thread *td;
1888{
1889	struct vnode *devvp;
1890	int count, error = 0;
1891	struct ufsmount *ump;
1892
1893	/*
1894	 * Alternately flush the block device associated with the mount
1895	 * point and process any dependencies that the flushing
1896	 * creates. We continue until no more worklist dependencies
1897	 * are found.
1898	 */
1899	*countp = 0;
1900	ump = VFSTOUFS(oldmnt);
1901	devvp = ump->um_devvp;
1902	while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
1903		*countp += count;
1904		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1905		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1906		VOP_UNLOCK(devvp, 0);
1907		if (error)
1908			break;
1909	}
1910	return (error);
1911}
1912
1913static int
1914softdep_waitidle(struct mount *mp)
1915{
1916	struct ufsmount *ump;
1917	int error;
1918	int i;
1919
1920	ump = VFSTOUFS(mp);
1921	ACQUIRE_LOCK(ump);
1922	for (i = 0; i < 10 && ump->softdep_deps; i++) {
1923		ump->softdep_req = 1;
1924		if (ump->softdep_on_worklist)
1925			panic("softdep_waitidle: work added after flush.");
1926		msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM, "softdeps", 1);
1927	}
1928	ump->softdep_req = 0;
1929	FREE_LOCK(ump);
1930	error = 0;
1931	if (i == 10) {
1932		error = EBUSY;
1933		printf("softdep_waitidle: Failed to flush worklist for %p\n",
1934		    mp);
1935	}
1936
1937	return (error);
1938}
1939
1940/*
1941 * Flush all vnodes and worklist items associated with a specified mount point.
1942 */
1943int
1944softdep_flushfiles(oldmnt, flags, td)
1945	struct mount *oldmnt;
1946	int flags;
1947	struct thread *td;
1948{
1949#ifdef QUOTA
1950	struct ufsmount *ump;
1951	int i;
1952#endif
1953	int error, early, depcount, loopcnt, retry_flush_count, retry;
1954	int morework;
1955
1956	KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0,
1957	    ("softdep_flushfiles called on non-softdep filesystem"));
1958	loopcnt = 10;
1959	retry_flush_count = 3;
1960retry_flush:
1961	error = 0;
1962
1963	/*
1964	 * Alternately flush the vnodes associated with the mount
1965	 * point and process any dependencies that the flushing
1966	 * creates. In theory, this loop can happen at most twice,
1967	 * but we give it a few extra just to be sure.
1968	 */
1969	for (; loopcnt > 0; loopcnt--) {
1970		/*
1971		 * Do another flush in case any vnodes were brought in
1972		 * as part of the cleanup operations.
1973		 */
1974		early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
1975		    MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
1976		if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
1977			break;
1978		if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
1979		    depcount == 0)
1980			break;
1981	}
1982	/*
1983	 * If we are unmounting then it is an error to fail. If we
1984	 * are simply trying to downgrade to read-only, then filesystem
1985	 * activity can keep us busy forever, so we just fail with EBUSY.
1986	 */
1987	if (loopcnt == 0) {
1988		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
1989			panic("softdep_flushfiles: looping");
1990		error = EBUSY;
1991	}
1992	if (!error)
1993		error = softdep_waitidle(oldmnt);
1994	if (!error) {
1995		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
1996			retry = 0;
1997			MNT_ILOCK(oldmnt);
1998			KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0,
1999			    ("softdep_flushfiles: !MNTK_NOINSMNTQ"));
2000			morework = oldmnt->mnt_nvnodelistsize > 0;
2001#ifdef QUOTA
2002			ump = VFSTOUFS(oldmnt);
2003			UFS_LOCK(ump);
2004			for (i = 0; i < MAXQUOTAS; i++) {
2005				if (ump->um_quotas[i] != NULLVP)
2006					morework = 1;
2007			}
2008			UFS_UNLOCK(ump);
2009#endif
2010			if (morework) {
2011				if (--retry_flush_count > 0) {
2012					retry = 1;
2013					loopcnt = 3;
2014				} else
2015					error = EBUSY;
2016			}
2017			MNT_IUNLOCK(oldmnt);
2018			if (retry)
2019				goto retry_flush;
2020		}
2021	}
2022	return (error);
2023}
2024
2025/*
2026 * Structure hashing.
2027 *
2028 * There are four types of structures that can be looked up:
2029 *	1) pagedep structures identified by mount point, inode number,
2030 *	   and logical block.
2031 *	2) inodedep structures identified by mount point and inode number.
2032 *	3) newblk structures identified by mount point and
2033 *	   physical block number.
2034 *	4) bmsafemap structures identified by mount point and
2035 *	   cylinder group number.
2036 *
2037 * The "pagedep" and "inodedep" dependency structures are hashed
2038 * separately from the file blocks and inodes to which they correspond.
2039 * This separation helps when the in-memory copy of an inode or
2040 * file block must be replaced. It also obviates the need to access
2041 * an inode or file page when simply updating (or de-allocating)
2042 * dependency structures. Lookup of newblk structures is needed to
2043 * find newly allocated blocks when trying to associate them with
2044 * their allocdirect or allocindir structure.
2045 *
2046 * The lookup routines optionally create and hash a new instance when
2047 * an existing entry is not found. The bmsafemap lookup routine always
2048 * allocates a new structure if an existing one is not found.
2049 */
2050#define DEPALLOC	0x0001	/* allocate structure if lookup fails */
2051#define NODELAY		0x0002	/* cannot do background work */
2052
2053/*
2054 * Structures and routines associated with pagedep caching.
2055 */
2056#define	PAGEDEP_HASH(ump, inum, lbn) \
2057	(&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size])
2058
2059static int
2060pagedep_find(pagedephd, ino, lbn, pagedeppp)
2061	struct pagedep_hashhead *pagedephd;
2062	ino_t ino;
2063	ufs_lbn_t lbn;
2064	struct pagedep **pagedeppp;
2065{
2066	struct pagedep *pagedep;
2067
2068	LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2069		if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) {
2070			*pagedeppp = pagedep;
2071			return (1);
2072		}
2073	}
2074	*pagedeppp = NULL;
2075	return (0);
2076}
2077/*
2078 * Look up a pagedep. Return 1 if found, 0 otherwise.
2079 * If not found, allocate if DEPALLOC flag is passed.
2080 * Found or allocated entry is returned in pagedeppp.
2081 * This routine must be called with splbio interrupts blocked.
2082 */
2083static int
2084pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp)
2085	struct mount *mp;
2086	struct buf *bp;
2087	ino_t ino;
2088	ufs_lbn_t lbn;
2089	int flags;
2090	struct pagedep **pagedeppp;
2091{
2092	struct pagedep *pagedep;
2093	struct pagedep_hashhead *pagedephd;
2094	struct worklist *wk;
2095	struct ufsmount *ump;
2096	int ret;
2097	int i;
2098
2099	ump = VFSTOUFS(mp);
2100	LOCK_OWNED(ump);
2101	if (bp) {
2102		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2103			if (wk->wk_type == D_PAGEDEP) {
2104				*pagedeppp = WK_PAGEDEP(wk);
2105				return (1);
2106			}
2107		}
2108	}
2109	pagedephd = PAGEDEP_HASH(ump, ino, lbn);
2110	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2111	if (ret) {
2112		if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2113			WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2114		return (1);
2115	}
2116	if ((flags & DEPALLOC) == 0)
2117		return (0);
2118	FREE_LOCK(ump);
2119	pagedep = malloc(sizeof(struct pagedep),
2120	    M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2121	workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2122	ACQUIRE_LOCK(ump);
2123	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2124	if (*pagedeppp) {
2125		/*
2126		 * This should never happen since we only create pagedeps
2127		 * with the vnode lock held.  Could be an assert.
2128		 */
2129		WORKITEM_FREE(pagedep, D_PAGEDEP);
2130		return (ret);
2131	}
2132	pagedep->pd_ino = ino;
2133	pagedep->pd_lbn = lbn;
2134	LIST_INIT(&pagedep->pd_dirremhd);
2135	LIST_INIT(&pagedep->pd_pendinghd);
2136	for (i = 0; i < DAHASHSZ; i++)
2137		LIST_INIT(&pagedep->pd_diraddhd[i]);
2138	LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2139	WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2140	*pagedeppp = pagedep;
2141	return (0);
2142}
2143
2144/*
2145 * Structures and routines associated with inodedep caching.
2146 */
2147#define	INODEDEP_HASH(ump, inum) \
2148      (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size])
2149
2150static int
2151inodedep_find(inodedephd, inum, inodedeppp)
2152	struct inodedep_hashhead *inodedephd;
2153	ino_t inum;
2154	struct inodedep **inodedeppp;
2155{
2156	struct inodedep *inodedep;
2157
2158	LIST_FOREACH(inodedep, inodedephd, id_hash)
2159		if (inum == inodedep->id_ino)
2160			break;
2161	if (inodedep) {
2162		*inodedeppp = inodedep;
2163		return (1);
2164	}
2165	*inodedeppp = NULL;
2166
2167	return (0);
2168}
2169/*
2170 * Look up an inodedep. Return 1 if found, 0 if not found.
2171 * If not found, allocate if DEPALLOC flag is passed.
2172 * Found or allocated entry is returned in inodedeppp.
2173 * This routine must be called with splbio interrupts blocked.
2174 */
2175static int
2176inodedep_lookup(mp, inum, flags, inodedeppp)
2177	struct mount *mp;
2178	ino_t inum;
2179	int flags;
2180	struct inodedep **inodedeppp;
2181{
2182	struct inodedep *inodedep;
2183	struct inodedep_hashhead *inodedephd;
2184	struct ufsmount *ump;
2185	struct fs *fs;
2186
2187	ump = VFSTOUFS(mp);
2188	LOCK_OWNED(ump);
2189	fs = ump->um_fs;
2190	inodedephd = INODEDEP_HASH(ump, inum);
2191
2192	if (inodedep_find(inodedephd, inum, inodedeppp))
2193		return (1);
2194	if ((flags & DEPALLOC) == 0)
2195		return (0);
2196	/*
2197	 * If the system is over its limit and our filesystem is
2198	 * responsible for more than our share of that usage and
2199	 * we are not in a rush, request some inodedep cleanup.
2200	 */
2201	while (dep_current[D_INODEDEP] > max_softdeps &&
2202	    (flags & NODELAY) == 0 &&
2203	    ump->softdep_curdeps[D_INODEDEP] >
2204	    max_softdeps / stat_flush_threads)
2205		request_cleanup(mp, FLUSH_INODES);
2206	FREE_LOCK(ump);
2207	inodedep = malloc(sizeof(struct inodedep),
2208		M_INODEDEP, M_SOFTDEP_FLAGS);
2209	workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2210	ACQUIRE_LOCK(ump);
2211	if (inodedep_find(inodedephd, inum, inodedeppp)) {
2212		WORKITEM_FREE(inodedep, D_INODEDEP);
2213		return (1);
2214	}
2215	inodedep->id_fs = fs;
2216	inodedep->id_ino = inum;
2217	inodedep->id_state = ALLCOMPLETE;
2218	inodedep->id_nlinkdelta = 0;
2219	inodedep->id_savedino1 = NULL;
2220	inodedep->id_savedsize = -1;
2221	inodedep->id_savedextsize = -1;
2222	inodedep->id_savednlink = -1;
2223	inodedep->id_bmsafemap = NULL;
2224	inodedep->id_mkdiradd = NULL;
2225	LIST_INIT(&inodedep->id_dirremhd);
2226	LIST_INIT(&inodedep->id_pendinghd);
2227	LIST_INIT(&inodedep->id_inowait);
2228	LIST_INIT(&inodedep->id_bufwait);
2229	TAILQ_INIT(&inodedep->id_inoreflst);
2230	TAILQ_INIT(&inodedep->id_inoupdt);
2231	TAILQ_INIT(&inodedep->id_newinoupdt);
2232	TAILQ_INIT(&inodedep->id_extupdt);
2233	TAILQ_INIT(&inodedep->id_newextupdt);
2234	TAILQ_INIT(&inodedep->id_freeblklst);
2235	LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2236	*inodedeppp = inodedep;
2237	return (0);
2238}
2239
2240/*
2241 * Structures and routines associated with newblk caching.
2242 */
2243#define	NEWBLK_HASH(ump, inum) \
2244	(&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size])
2245
2246static int
2247newblk_find(newblkhd, newblkno, flags, newblkpp)
2248	struct newblk_hashhead *newblkhd;
2249	ufs2_daddr_t newblkno;
2250	int flags;
2251	struct newblk **newblkpp;
2252{
2253	struct newblk *newblk;
2254
2255	LIST_FOREACH(newblk, newblkhd, nb_hash) {
2256		if (newblkno != newblk->nb_newblkno)
2257			continue;
2258		/*
2259		 * If we're creating a new dependency don't match those that
2260		 * have already been converted to allocdirects.  This is for
2261		 * a frag extend.
2262		 */
2263		if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2264			continue;
2265		break;
2266	}
2267	if (newblk) {
2268		*newblkpp = newblk;
2269		return (1);
2270	}
2271	*newblkpp = NULL;
2272	return (0);
2273}
2274
2275/*
2276 * Look up a newblk. Return 1 if found, 0 if not found.
2277 * If not found, allocate if DEPALLOC flag is passed.
2278 * Found or allocated entry is returned in newblkpp.
2279 */
2280static int
2281newblk_lookup(mp, newblkno, flags, newblkpp)
2282	struct mount *mp;
2283	ufs2_daddr_t newblkno;
2284	int flags;
2285	struct newblk **newblkpp;
2286{
2287	struct newblk *newblk;
2288	struct newblk_hashhead *newblkhd;
2289	struct ufsmount *ump;
2290
2291	ump = VFSTOUFS(mp);
2292	LOCK_OWNED(ump);
2293	newblkhd = NEWBLK_HASH(ump, newblkno);
2294	if (newblk_find(newblkhd, newblkno, flags, newblkpp))
2295		return (1);
2296	if ((flags & DEPALLOC) == 0)
2297		return (0);
2298	FREE_LOCK(ump);
2299	newblk = malloc(sizeof(union allblk), M_NEWBLK,
2300	    M_SOFTDEP_FLAGS | M_ZERO);
2301	workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2302	ACQUIRE_LOCK(ump);
2303	if (newblk_find(newblkhd, newblkno, flags, newblkpp)) {
2304		WORKITEM_FREE(newblk, D_NEWBLK);
2305		return (1);
2306	}
2307	newblk->nb_freefrag = NULL;
2308	LIST_INIT(&newblk->nb_indirdeps);
2309	LIST_INIT(&newblk->nb_newdirblk);
2310	LIST_INIT(&newblk->nb_jwork);
2311	newblk->nb_state = ATTACHED;
2312	newblk->nb_newblkno = newblkno;
2313	LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2314	*newblkpp = newblk;
2315	return (0);
2316}
2317
2318/*
2319 * Structures and routines associated with freed indirect block caching.
2320 */
2321#define	INDIR_HASH(ump, blkno) \
2322	(&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size])
2323
2324/*
2325 * Lookup an indirect block in the indir hash table.  The freework is
2326 * removed and potentially freed.  The caller must do a blocking journal
2327 * write before writing to the blkno.
2328 */
2329static int
2330indirblk_lookup(mp, blkno)
2331	struct mount *mp;
2332	ufs2_daddr_t blkno;
2333{
2334	struct freework *freework;
2335	struct indir_hashhead *wkhd;
2336	struct ufsmount *ump;
2337
2338	ump = VFSTOUFS(mp);
2339	wkhd = INDIR_HASH(ump, blkno);
2340	TAILQ_FOREACH(freework, wkhd, fw_next) {
2341		if (freework->fw_blkno != blkno)
2342			continue;
2343		indirblk_remove(freework);
2344		return (1);
2345	}
2346	return (0);
2347}
2348
2349/*
2350 * Insert an indirect block represented by freework into the indirblk
2351 * hash table so that it may prevent the block from being re-used prior
2352 * to the journal being written.
2353 */
2354static void
2355indirblk_insert(freework)
2356	struct freework *freework;
2357{
2358	struct jblocks *jblocks;
2359	struct jseg *jseg;
2360	struct ufsmount *ump;
2361
2362	ump = VFSTOUFS(freework->fw_list.wk_mp);
2363	jblocks = ump->softdep_jblocks;
2364	jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2365	if (jseg == NULL)
2366		return;
2367
2368	LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2369	TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework,
2370	    fw_next);
2371	freework->fw_state &= ~DEPCOMPLETE;
2372}
2373
2374static void
2375indirblk_remove(freework)
2376	struct freework *freework;
2377{
2378	struct ufsmount *ump;
2379
2380	ump = VFSTOUFS(freework->fw_list.wk_mp);
2381	LIST_REMOVE(freework, fw_segs);
2382	TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next);
2383	freework->fw_state |= DEPCOMPLETE;
2384	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2385		WORKITEM_FREE(freework, D_FREEWORK);
2386}
2387
2388/*
2389 * Executed during filesystem system initialization before
2390 * mounting any filesystems.
2391 */
2392void
2393softdep_initialize()
2394{
2395
2396	TAILQ_INIT(&softdepmounts);
2397	max_softdeps = desiredvnodes * 4;
2398
2399	/* initialise bioops hack */
2400	bioops.io_start = softdep_disk_io_initiation;
2401	bioops.io_complete = softdep_disk_write_complete;
2402	bioops.io_deallocate = softdep_deallocate_dependencies;
2403	bioops.io_countdeps = softdep_count_dependencies;
2404
2405	/* Initialize the callout with an mtx. */
2406	callout_init_mtx(&softdep_callout, &lk, 0);
2407}
2408
2409/*
2410 * Executed after all filesystems have been unmounted during
2411 * filesystem module unload.
2412 */
2413void
2414softdep_uninitialize()
2415{
2416
2417	/* clear bioops hack */
2418	bioops.io_start = NULL;
2419	bioops.io_complete = NULL;
2420	bioops.io_deallocate = NULL;
2421	bioops.io_countdeps = NULL;
2422
2423	callout_drain(&softdep_callout);
2424}
2425
2426/*
2427 * Called at mount time to notify the dependency code that a
2428 * filesystem wishes to use it.
2429 */
2430int
2431softdep_mount(devvp, mp, fs, cred)
2432	struct vnode *devvp;
2433	struct mount *mp;
2434	struct fs *fs;
2435	struct ucred *cred;
2436{
2437	struct csum_total cstotal;
2438	struct mount_softdeps *sdp;
2439	struct ufsmount *ump;
2440	struct cg *cgp;
2441	struct buf *bp;
2442	int i, error, cyl;
2443
2444	sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA,
2445	    M_WAITOK | M_ZERO);
2446	MNT_ILOCK(mp);
2447	mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2448	if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2449		mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2450			MNTK_SOFTDEP | MNTK_NOASYNC;
2451	}
2452	ump = VFSTOUFS(mp);
2453	ump->um_softdep = sdp;
2454	MNT_IUNLOCK(mp);
2455	rw_init(LOCK_PTR(ump), "Per-Filesystem Softdep Lock");
2456	sdp->sd_ump = ump;
2457	LIST_INIT(&ump->softdep_workitem_pending);
2458	LIST_INIT(&ump->softdep_journal_pending);
2459	TAILQ_INIT(&ump->softdep_unlinked);
2460	LIST_INIT(&ump->softdep_dirtycg);
2461	ump->softdep_worklist_tail = NULL;
2462	ump->softdep_on_worklist = 0;
2463	ump->softdep_deps = 0;
2464	LIST_INIT(&ump->softdep_mkdirlisthd);
2465	ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP,
2466	    &ump->pagedep_hash_size);
2467	ump->pagedep_nextclean = 0;
2468	ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP,
2469	    &ump->inodedep_hash_size);
2470	ump->inodedep_nextclean = 0;
2471	ump->newblk_hashtbl = hashinit(max_softdeps / 2,  M_NEWBLK,
2472	    &ump->newblk_hash_size);
2473	ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP,
2474	    &ump->bmsafemap_hash_size);
2475	i = 1 << (ffs(desiredvnodes / 10) - 1);
2476	ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead),
2477	    M_FREEWORK, M_WAITOK);
2478	ump->indir_hash_size = i - 1;
2479	for (i = 0; i <= ump->indir_hash_size; i++)
2480		TAILQ_INIT(&ump->indir_hashtbl[i]);
2481	ACQUIRE_GBLLOCK(&lk);
2482	TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
2483	FREE_GBLLOCK(&lk);
2484	if ((fs->fs_flags & FS_SUJ) &&
2485	    (error = journal_mount(mp, fs, cred)) != 0) {
2486		printf("Failed to start journal: %d\n", error);
2487		softdep_unmount(mp);
2488		return (error);
2489	}
2490	/*
2491	 * Start our flushing thread in the bufdaemon process.
2492	 */
2493	kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc,
2494	    &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker",
2495	    mp->mnt_stat.f_mntonname);
2496	/*
2497	 * When doing soft updates, the counters in the
2498	 * superblock may have gotten out of sync. Recomputation
2499	 * can take a long time and can be deferred for background
2500	 * fsck.  However, the old behavior of scanning the cylinder
2501	 * groups and recalculating them at mount time is available
2502	 * by setting vfs.ffs.compute_summary_at_mount to one.
2503	 */
2504	if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2505		return (0);
2506	bzero(&cstotal, sizeof cstotal);
2507	for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2508		if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2509		    fs->fs_cgsize, cred, &bp)) != 0) {
2510			brelse(bp);
2511			softdep_unmount(mp);
2512			return (error);
2513		}
2514		cgp = (struct cg *)bp->b_data;
2515		cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2516		cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2517		cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2518		cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2519		fs->fs_cs(fs, cyl) = cgp->cg_cs;
2520		brelse(bp);
2521	}
2522#ifdef DEBUG
2523	if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2524		printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2525#endif
2526	bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2527	return (0);
2528}
2529
2530void
2531softdep_unmount(mp)
2532	struct mount *mp;
2533{
2534	struct ufsmount *ump;
2535#ifdef INVARIANTS
2536	int i;
2537#endif
2538
2539	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
2540	    ("softdep_unmount called on non-softdep filesystem"));
2541	ump = VFSTOUFS(mp);
2542	MNT_ILOCK(mp);
2543	mp->mnt_flag &= ~MNT_SOFTDEP;
2544	if (MOUNTEDSUJ(mp) == 0) {
2545		MNT_IUNLOCK(mp);
2546	} else {
2547		mp->mnt_flag &= ~MNT_SUJ;
2548		MNT_IUNLOCK(mp);
2549		journal_unmount(ump);
2550	}
2551	/*
2552	 * Shut down our flushing thread. Check for NULL is if
2553	 * softdep_mount errors out before the thread has been created.
2554	 */
2555	if (ump->softdep_flushtd != NULL) {
2556		ACQUIRE_LOCK(ump);
2557		ump->softdep_flags |= FLUSH_EXIT;
2558		wakeup(&ump->softdep_flushtd);
2559		msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP,
2560		    "sdwait", 0);
2561		KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0,
2562		    ("Thread shutdown failed"));
2563	}
2564	/*
2565	 * Free up our resources.
2566	 */
2567	ACQUIRE_GBLLOCK(&lk);
2568	TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next);
2569	FREE_GBLLOCK(&lk);
2570	rw_destroy(LOCK_PTR(ump));
2571	hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size);
2572	hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size);
2573	hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size);
2574	hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP,
2575	    ump->bmsafemap_hash_size);
2576	free(ump->indir_hashtbl, M_FREEWORK);
2577#ifdef INVARIANTS
2578	for (i = 0; i <= D_LAST; i++)
2579		KASSERT(ump->softdep_curdeps[i] == 0,
2580		    ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt,
2581		    TYPENAME(i), ump->softdep_curdeps[i]));
2582#endif
2583	free(ump->um_softdep, M_MOUNTDATA);
2584}
2585
2586static struct jblocks *
2587jblocks_create(void)
2588{
2589	struct jblocks *jblocks;
2590
2591	jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2592	TAILQ_INIT(&jblocks->jb_segs);
2593	jblocks->jb_avail = 10;
2594	jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2595	    M_JBLOCKS, M_WAITOK | M_ZERO);
2596
2597	return (jblocks);
2598}
2599
2600static ufs2_daddr_t
2601jblocks_alloc(jblocks, bytes, actual)
2602	struct jblocks *jblocks;
2603	int bytes;
2604	int *actual;
2605{
2606	ufs2_daddr_t daddr;
2607	struct jextent *jext;
2608	int freecnt;
2609	int blocks;
2610
2611	blocks = bytes / DEV_BSIZE;
2612	jext = &jblocks->jb_extent[jblocks->jb_head];
2613	freecnt = jext->je_blocks - jblocks->jb_off;
2614	if (freecnt == 0) {
2615		jblocks->jb_off = 0;
2616		if (++jblocks->jb_head > jblocks->jb_used)
2617			jblocks->jb_head = 0;
2618		jext = &jblocks->jb_extent[jblocks->jb_head];
2619		freecnt = jext->je_blocks;
2620	}
2621	if (freecnt > blocks)
2622		freecnt = blocks;
2623	*actual = freecnt * DEV_BSIZE;
2624	daddr = jext->je_daddr + jblocks->jb_off;
2625	jblocks->jb_off += freecnt;
2626	jblocks->jb_free -= freecnt;
2627
2628	return (daddr);
2629}
2630
2631static void
2632jblocks_free(jblocks, mp, bytes)
2633	struct jblocks *jblocks;
2634	struct mount *mp;
2635	int bytes;
2636{
2637
2638	LOCK_OWNED(VFSTOUFS(mp));
2639	jblocks->jb_free += bytes / DEV_BSIZE;
2640	if (jblocks->jb_suspended)
2641		worklist_speedup(mp);
2642	wakeup(jblocks);
2643}
2644
2645static void
2646jblocks_destroy(jblocks)
2647	struct jblocks *jblocks;
2648{
2649
2650	if (jblocks->jb_extent)
2651		free(jblocks->jb_extent, M_JBLOCKS);
2652	free(jblocks, M_JBLOCKS);
2653}
2654
2655static void
2656jblocks_add(jblocks, daddr, blocks)
2657	struct jblocks *jblocks;
2658	ufs2_daddr_t daddr;
2659	int blocks;
2660{
2661	struct jextent *jext;
2662
2663	jblocks->jb_blocks += blocks;
2664	jblocks->jb_free += blocks;
2665	jext = &jblocks->jb_extent[jblocks->jb_used];
2666	/* Adding the first block. */
2667	if (jext->je_daddr == 0) {
2668		jext->je_daddr = daddr;
2669		jext->je_blocks = blocks;
2670		return;
2671	}
2672	/* Extending the last extent. */
2673	if (jext->je_daddr + jext->je_blocks == daddr) {
2674		jext->je_blocks += blocks;
2675		return;
2676	}
2677	/* Adding a new extent. */
2678	if (++jblocks->jb_used == jblocks->jb_avail) {
2679		jblocks->jb_avail *= 2;
2680		jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2681		    M_JBLOCKS, M_WAITOK | M_ZERO);
2682		memcpy(jext, jblocks->jb_extent,
2683		    sizeof(struct jextent) * jblocks->jb_used);
2684		free(jblocks->jb_extent, M_JBLOCKS);
2685		jblocks->jb_extent = jext;
2686	}
2687	jext = &jblocks->jb_extent[jblocks->jb_used];
2688	jext->je_daddr = daddr;
2689	jext->je_blocks = blocks;
2690	return;
2691}
2692
2693int
2694softdep_journal_lookup(mp, vpp)
2695	struct mount *mp;
2696	struct vnode **vpp;
2697{
2698	struct componentname cnp;
2699	struct vnode *dvp;
2700	ino_t sujournal;
2701	int error;
2702
2703	error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp);
2704	if (error)
2705		return (error);
2706	bzero(&cnp, sizeof(cnp));
2707	cnp.cn_nameiop = LOOKUP;
2708	cnp.cn_flags = ISLASTCN;
2709	cnp.cn_thread = curthread;
2710	cnp.cn_cred = curthread->td_ucred;
2711	cnp.cn_pnbuf = SUJ_FILE;
2712	cnp.cn_nameptr = SUJ_FILE;
2713	cnp.cn_namelen = strlen(SUJ_FILE);
2714	error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2715	vput(dvp);
2716	if (error != 0)
2717		return (error);
2718	error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2719	return (error);
2720}
2721
2722/*
2723 * Open and verify the journal file.
2724 */
2725static int
2726journal_mount(mp, fs, cred)
2727	struct mount *mp;
2728	struct fs *fs;
2729	struct ucred *cred;
2730{
2731	struct jblocks *jblocks;
2732	struct ufsmount *ump;
2733	struct vnode *vp;
2734	struct inode *ip;
2735	ufs2_daddr_t blkno;
2736	int bcount;
2737	int error;
2738	int i;
2739
2740	ump = VFSTOUFS(mp);
2741	ump->softdep_journal_tail = NULL;
2742	ump->softdep_on_journal = 0;
2743	ump->softdep_accdeps = 0;
2744	ump->softdep_req = 0;
2745	ump->softdep_jblocks = NULL;
2746	error = softdep_journal_lookup(mp, &vp);
2747	if (error != 0) {
2748		printf("Failed to find journal.  Use tunefs to create one\n");
2749		return (error);
2750	}
2751	ip = VTOI(vp);
2752	if (ip->i_size < SUJ_MIN) {
2753		error = ENOSPC;
2754		goto out;
2755	}
2756	bcount = lblkno(fs, ip->i_size);	/* Only use whole blocks. */
2757	jblocks = jblocks_create();
2758	for (i = 0; i < bcount; i++) {
2759		error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2760		if (error)
2761			break;
2762		jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2763	}
2764	if (error) {
2765		jblocks_destroy(jblocks);
2766		goto out;
2767	}
2768	jblocks->jb_low = jblocks->jb_free / 3;	/* Reserve 33%. */
2769	jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2770	ump->softdep_jblocks = jblocks;
2771out:
2772	if (error == 0) {
2773		MNT_ILOCK(mp);
2774		mp->mnt_flag |= MNT_SUJ;
2775		mp->mnt_flag &= ~MNT_SOFTDEP;
2776		MNT_IUNLOCK(mp);
2777		/*
2778		 * Only validate the journal contents if the
2779		 * filesystem is clean, otherwise we write the logs
2780		 * but they'll never be used.  If the filesystem was
2781		 * still dirty when we mounted it the journal is
2782		 * invalid and a new journal can only be valid if it
2783		 * starts from a clean mount.
2784		 */
2785		if (fs->fs_clean) {
2786			DIP_SET(ip, i_modrev, fs->fs_mtime);
2787			ip->i_flags |= IN_MODIFIED;
2788			ffs_update(vp, 1);
2789		}
2790	}
2791	vput(vp);
2792	return (error);
2793}
2794
2795static void
2796journal_unmount(ump)
2797	struct ufsmount *ump;
2798{
2799
2800	if (ump->softdep_jblocks)
2801		jblocks_destroy(ump->softdep_jblocks);
2802	ump->softdep_jblocks = NULL;
2803}
2804
2805/*
2806 * Called when a journal record is ready to be written.  Space is allocated
2807 * and the journal entry is created when the journal is flushed to stable
2808 * store.
2809 */
2810static void
2811add_to_journal(wk)
2812	struct worklist *wk;
2813{
2814	struct ufsmount *ump;
2815
2816	ump = VFSTOUFS(wk->wk_mp);
2817	LOCK_OWNED(ump);
2818	if (wk->wk_state & ONWORKLIST)
2819		panic("add_to_journal: %s(0x%X) already on list",
2820		    TYPENAME(wk->wk_type), wk->wk_state);
2821	wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2822	if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2823		ump->softdep_jblocks->jb_age = ticks;
2824		LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2825	} else
2826		LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2827	ump->softdep_journal_tail = wk;
2828	ump->softdep_on_journal += 1;
2829}
2830
2831/*
2832 * Remove an arbitrary item for the journal worklist maintain the tail
2833 * pointer.  This happens when a new operation obviates the need to
2834 * journal an old operation.
2835 */
2836static void
2837remove_from_journal(wk)
2838	struct worklist *wk;
2839{
2840	struct ufsmount *ump;
2841
2842	ump = VFSTOUFS(wk->wk_mp);
2843	LOCK_OWNED(ump);
2844#ifdef SUJ_DEBUG
2845	{
2846		struct worklist *wkn;
2847
2848		LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
2849			if (wkn == wk)
2850				break;
2851		if (wkn == NULL)
2852			panic("remove_from_journal: %p is not in journal", wk);
2853	}
2854#endif
2855	/*
2856	 * We emulate a TAILQ to save space in most structures which do not
2857	 * require TAILQ semantics.  Here we must update the tail position
2858	 * when removing the tail which is not the final entry. This works
2859	 * only if the worklist linkage are at the beginning of the structure.
2860	 */
2861	if (ump->softdep_journal_tail == wk)
2862		ump->softdep_journal_tail =
2863		    (struct worklist *)wk->wk_list.le_prev;
2864
2865	WORKLIST_REMOVE(wk);
2866	ump->softdep_on_journal -= 1;
2867}
2868
2869/*
2870 * Check for journal space as well as dependency limits so the prelink
2871 * code can throttle both journaled and non-journaled filesystems.
2872 * Threshold is 0 for low and 1 for min.
2873 */
2874static int
2875journal_space(ump, thresh)
2876	struct ufsmount *ump;
2877	int thresh;
2878{
2879	struct jblocks *jblocks;
2880	int limit, avail;
2881
2882	jblocks = ump->softdep_jblocks;
2883	if (jblocks == NULL)
2884		return (1);
2885	/*
2886	 * We use a tighter restriction here to prevent request_cleanup()
2887	 * running in threads from running into locks we currently hold.
2888	 * We have to be over the limit and our filesystem has to be
2889	 * responsible for more than our share of that usage.
2890	 */
2891	limit = (max_softdeps / 10) * 9;
2892	if (dep_current[D_INODEDEP] > limit &&
2893	    ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads)
2894		return (0);
2895	if (thresh)
2896		thresh = jblocks->jb_min;
2897	else
2898		thresh = jblocks->jb_low;
2899	avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
2900	avail = jblocks->jb_free - avail;
2901
2902	return (avail > thresh);
2903}
2904
2905static void
2906journal_suspend(ump)
2907	struct ufsmount *ump;
2908{
2909	struct jblocks *jblocks;
2910	struct mount *mp;
2911
2912	mp = UFSTOVFS(ump);
2913	jblocks = ump->softdep_jblocks;
2914	MNT_ILOCK(mp);
2915	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
2916		stat_journal_min++;
2917		mp->mnt_kern_flag |= MNTK_SUSPEND;
2918		mp->mnt_susp_owner = ump->softdep_flushtd;
2919	}
2920	jblocks->jb_suspended = 1;
2921	MNT_IUNLOCK(mp);
2922}
2923
2924static int
2925journal_unsuspend(struct ufsmount *ump)
2926{
2927	struct jblocks *jblocks;
2928	struct mount *mp;
2929
2930	mp = UFSTOVFS(ump);
2931	jblocks = ump->softdep_jblocks;
2932
2933	if (jblocks != NULL && jblocks->jb_suspended &&
2934	    journal_space(ump, jblocks->jb_min)) {
2935		jblocks->jb_suspended = 0;
2936		FREE_LOCK(ump);
2937		mp->mnt_susp_owner = curthread;
2938		vfs_write_resume(mp, 0);
2939		ACQUIRE_LOCK(ump);
2940		return (1);
2941	}
2942	return (0);
2943}
2944
2945/*
2946 * Called before any allocation function to be certain that there is
2947 * sufficient space in the journal prior to creating any new records.
2948 * Since in the case of block allocation we may have multiple locked
2949 * buffers at the time of the actual allocation we can not block
2950 * when the journal records are created.  Doing so would create a deadlock
2951 * if any of these buffers needed to be flushed to reclaim space.  Instead
2952 * we require a sufficiently large amount of available space such that
2953 * each thread in the system could have passed this allocation check and
2954 * still have sufficient free space.  With 20% of a minimum journal size
2955 * of 1MB we have 6553 records available.
2956 */
2957int
2958softdep_prealloc(vp, waitok)
2959	struct vnode *vp;
2960	int waitok;
2961{
2962	struct ufsmount *ump;
2963
2964	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
2965	    ("softdep_prealloc called on non-softdep filesystem"));
2966	/*
2967	 * Nothing to do if we are not running journaled soft updates.
2968	 * If we currently hold the snapshot lock, we must avoid handling
2969	 * other resources that could cause deadlock.
2970	 */
2971	if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)))
2972		return (0);
2973	ump = VFSTOUFS(vp->v_mount);
2974	ACQUIRE_LOCK(ump);
2975	if (journal_space(ump, 0)) {
2976		FREE_LOCK(ump);
2977		return (0);
2978	}
2979	stat_journal_low++;
2980	FREE_LOCK(ump);
2981	if (waitok == MNT_NOWAIT)
2982		return (ENOSPC);
2983	/*
2984	 * Attempt to sync this vnode once to flush any journal
2985	 * work attached to it.
2986	 */
2987	if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
2988		ffs_syncvnode(vp, waitok, 0);
2989	ACQUIRE_LOCK(ump);
2990	process_removes(vp);
2991	process_truncates(vp);
2992	if (journal_space(ump, 0) == 0) {
2993		softdep_speedup(ump);
2994		if (journal_space(ump, 1) == 0)
2995			journal_suspend(ump);
2996	}
2997	FREE_LOCK(ump);
2998
2999	return (0);
3000}
3001
3002/*
3003 * Before adjusting a link count on a vnode verify that we have sufficient
3004 * journal space.  If not, process operations that depend on the currently
3005 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
3006 * and softdep flush threads can not acquire these locks to reclaim space.
3007 */
3008static void
3009softdep_prelink(dvp, vp)
3010	struct vnode *dvp;
3011	struct vnode *vp;
3012{
3013	struct ufsmount *ump;
3014
3015	ump = VFSTOUFS(dvp->v_mount);
3016	LOCK_OWNED(ump);
3017	/*
3018	 * Nothing to do if we have sufficient journal space.
3019	 * If we currently hold the snapshot lock, we must avoid
3020	 * handling other resources that could cause deadlock.
3021	 */
3022	if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp))))
3023		return;
3024	stat_journal_low++;
3025	FREE_LOCK(ump);
3026	if (vp)
3027		ffs_syncvnode(vp, MNT_NOWAIT, 0);
3028	ffs_syncvnode(dvp, MNT_WAIT, 0);
3029	ACQUIRE_LOCK(ump);
3030	/* Process vp before dvp as it may create .. removes. */
3031	if (vp) {
3032		process_removes(vp);
3033		process_truncates(vp);
3034	}
3035	process_removes(dvp);
3036	process_truncates(dvp);
3037	softdep_speedup(ump);
3038	process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
3039	if (journal_space(ump, 0) == 0) {
3040		softdep_speedup(ump);
3041		if (journal_space(ump, 1) == 0)
3042			journal_suspend(ump);
3043	}
3044}
3045
3046static void
3047jseg_write(ump, jseg, data)
3048	struct ufsmount *ump;
3049	struct jseg *jseg;
3050	uint8_t *data;
3051{
3052	struct jsegrec *rec;
3053
3054	rec = (struct jsegrec *)data;
3055	rec->jsr_seq = jseg->js_seq;
3056	rec->jsr_oldest = jseg->js_oldseq;
3057	rec->jsr_cnt = jseg->js_cnt;
3058	rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
3059	rec->jsr_crc = 0;
3060	rec->jsr_time = ump->um_fs->fs_mtime;
3061}
3062
3063static inline void
3064inoref_write(inoref, jseg, rec)
3065	struct inoref *inoref;
3066	struct jseg *jseg;
3067	struct jrefrec *rec;
3068{
3069
3070	inoref->if_jsegdep->jd_seg = jseg;
3071	rec->jr_ino = inoref->if_ino;
3072	rec->jr_parent = inoref->if_parent;
3073	rec->jr_nlink = inoref->if_nlink;
3074	rec->jr_mode = inoref->if_mode;
3075	rec->jr_diroff = inoref->if_diroff;
3076}
3077
3078static void
3079jaddref_write(jaddref, jseg, data)
3080	struct jaddref *jaddref;
3081	struct jseg *jseg;
3082	uint8_t *data;
3083{
3084	struct jrefrec *rec;
3085
3086	rec = (struct jrefrec *)data;
3087	rec->jr_op = JOP_ADDREF;
3088	inoref_write(&jaddref->ja_ref, jseg, rec);
3089}
3090
3091static void
3092jremref_write(jremref, jseg, data)
3093	struct jremref *jremref;
3094	struct jseg *jseg;
3095	uint8_t *data;
3096{
3097	struct jrefrec *rec;
3098
3099	rec = (struct jrefrec *)data;
3100	rec->jr_op = JOP_REMREF;
3101	inoref_write(&jremref->jr_ref, jseg, rec);
3102}
3103
3104static void
3105jmvref_write(jmvref, jseg, data)
3106	struct jmvref *jmvref;
3107	struct jseg *jseg;
3108	uint8_t *data;
3109{
3110	struct jmvrec *rec;
3111
3112	rec = (struct jmvrec *)data;
3113	rec->jm_op = JOP_MVREF;
3114	rec->jm_ino = jmvref->jm_ino;
3115	rec->jm_parent = jmvref->jm_parent;
3116	rec->jm_oldoff = jmvref->jm_oldoff;
3117	rec->jm_newoff = jmvref->jm_newoff;
3118}
3119
3120static void
3121jnewblk_write(jnewblk, jseg, data)
3122	struct jnewblk *jnewblk;
3123	struct jseg *jseg;
3124	uint8_t *data;
3125{
3126	struct jblkrec *rec;
3127
3128	jnewblk->jn_jsegdep->jd_seg = jseg;
3129	rec = (struct jblkrec *)data;
3130	rec->jb_op = JOP_NEWBLK;
3131	rec->jb_ino = jnewblk->jn_ino;
3132	rec->jb_blkno = jnewblk->jn_blkno;
3133	rec->jb_lbn = jnewblk->jn_lbn;
3134	rec->jb_frags = jnewblk->jn_frags;
3135	rec->jb_oldfrags = jnewblk->jn_oldfrags;
3136}
3137
3138static void
3139jfreeblk_write(jfreeblk, jseg, data)
3140	struct jfreeblk *jfreeblk;
3141	struct jseg *jseg;
3142	uint8_t *data;
3143{
3144	struct jblkrec *rec;
3145
3146	jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3147	rec = (struct jblkrec *)data;
3148	rec->jb_op = JOP_FREEBLK;
3149	rec->jb_ino = jfreeblk->jf_ino;
3150	rec->jb_blkno = jfreeblk->jf_blkno;
3151	rec->jb_lbn = jfreeblk->jf_lbn;
3152	rec->jb_frags = jfreeblk->jf_frags;
3153	rec->jb_oldfrags = 0;
3154}
3155
3156static void
3157jfreefrag_write(jfreefrag, jseg, data)
3158	struct jfreefrag *jfreefrag;
3159	struct jseg *jseg;
3160	uint8_t *data;
3161{
3162	struct jblkrec *rec;
3163
3164	jfreefrag->fr_jsegdep->jd_seg = jseg;
3165	rec = (struct jblkrec *)data;
3166	rec->jb_op = JOP_FREEBLK;
3167	rec->jb_ino = jfreefrag->fr_ino;
3168	rec->jb_blkno = jfreefrag->fr_blkno;
3169	rec->jb_lbn = jfreefrag->fr_lbn;
3170	rec->jb_frags = jfreefrag->fr_frags;
3171	rec->jb_oldfrags = 0;
3172}
3173
3174static void
3175jtrunc_write(jtrunc, jseg, data)
3176	struct jtrunc *jtrunc;
3177	struct jseg *jseg;
3178	uint8_t *data;
3179{
3180	struct jtrncrec *rec;
3181
3182	jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3183	rec = (struct jtrncrec *)data;
3184	rec->jt_op = JOP_TRUNC;
3185	rec->jt_ino = jtrunc->jt_ino;
3186	rec->jt_size = jtrunc->jt_size;
3187	rec->jt_extsize = jtrunc->jt_extsize;
3188}
3189
3190static void
3191jfsync_write(jfsync, jseg, data)
3192	struct jfsync *jfsync;
3193	struct jseg *jseg;
3194	uint8_t *data;
3195{
3196	struct jtrncrec *rec;
3197
3198	rec = (struct jtrncrec *)data;
3199	rec->jt_op = JOP_SYNC;
3200	rec->jt_ino = jfsync->jfs_ino;
3201	rec->jt_size = jfsync->jfs_size;
3202	rec->jt_extsize = jfsync->jfs_extsize;
3203}
3204
3205static void
3206softdep_flushjournal(mp)
3207	struct mount *mp;
3208{
3209	struct jblocks *jblocks;
3210	struct ufsmount *ump;
3211
3212	if (MOUNTEDSUJ(mp) == 0)
3213		return;
3214	ump = VFSTOUFS(mp);
3215	jblocks = ump->softdep_jblocks;
3216	ACQUIRE_LOCK(ump);
3217	while (ump->softdep_on_journal) {
3218		jblocks->jb_needseg = 1;
3219		softdep_process_journal(mp, NULL, MNT_WAIT);
3220	}
3221	FREE_LOCK(ump);
3222}
3223
3224static void softdep_synchronize_completed(struct bio *);
3225static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3226
3227static void
3228softdep_synchronize_completed(bp)
3229        struct bio *bp;
3230{
3231	struct jseg *oldest;
3232	struct jseg *jseg;
3233	struct ufsmount *ump;
3234
3235	/*
3236	 * caller1 marks the last segment written before we issued the
3237	 * synchronize cache.
3238	 */
3239	jseg = bp->bio_caller1;
3240	if (jseg == NULL) {
3241		g_destroy_bio(bp);
3242		return;
3243	}
3244	ump = VFSTOUFS(jseg->js_list.wk_mp);
3245	ACQUIRE_LOCK(ump);
3246	oldest = NULL;
3247	/*
3248	 * Mark all the journal entries waiting on the synchronize cache
3249	 * as completed so they may continue on.
3250	 */
3251	while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3252		jseg->js_state |= COMPLETE;
3253		oldest = jseg;
3254		jseg = TAILQ_PREV(jseg, jseglst, js_next);
3255	}
3256	/*
3257	 * Restart deferred journal entry processing from the oldest
3258	 * completed jseg.
3259	 */
3260	if (oldest)
3261		complete_jsegs(oldest);
3262
3263	FREE_LOCK(ump);
3264	g_destroy_bio(bp);
3265}
3266
3267/*
3268 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3269 * barriers.  The journal must be written prior to any blocks that depend
3270 * on it and the journal can not be released until the blocks have be
3271 * written.  This code handles both barriers simultaneously.
3272 */
3273static void
3274softdep_synchronize(bp, ump, caller1)
3275	struct bio *bp;
3276	struct ufsmount *ump;
3277	void *caller1;
3278{
3279
3280	bp->bio_cmd = BIO_FLUSH;
3281	bp->bio_flags |= BIO_ORDERED;
3282	bp->bio_data = NULL;
3283	bp->bio_offset = ump->um_cp->provider->mediasize;
3284	bp->bio_length = 0;
3285	bp->bio_done = softdep_synchronize_completed;
3286	bp->bio_caller1 = caller1;
3287	g_io_request(bp,
3288	    (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private);
3289}
3290
3291/*
3292 * Flush some journal records to disk.
3293 */
3294static void
3295softdep_process_journal(mp, needwk, flags)
3296	struct mount *mp;
3297	struct worklist *needwk;
3298	int flags;
3299{
3300	struct jblocks *jblocks;
3301	struct ufsmount *ump;
3302	struct worklist *wk;
3303	struct jseg *jseg;
3304	struct buf *bp;
3305	struct bio *bio;
3306	uint8_t *data;
3307	struct fs *fs;
3308	int shouldflush;
3309	int segwritten;
3310	int jrecmin;	/* Minimum records per block. */
3311	int jrecmax;	/* Maximum records per block. */
3312	int size;
3313	int cnt;
3314	int off;
3315	int devbsize;
3316
3317	if (MOUNTEDSUJ(mp) == 0)
3318		return;
3319	shouldflush = softdep_flushcache;
3320	bio = NULL;
3321	jseg = NULL;
3322	ump = VFSTOUFS(mp);
3323	LOCK_OWNED(ump);
3324	fs = ump->um_fs;
3325	jblocks = ump->softdep_jblocks;
3326	devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3327	/*
3328	 * We write anywhere between a disk block and fs block.  The upper
3329	 * bound is picked to prevent buffer cache fragmentation and limit
3330	 * processing time per I/O.
3331	 */
3332	jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3333	jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3334	segwritten = 0;
3335	for (;;) {
3336		cnt = ump->softdep_on_journal;
3337		/*
3338		 * Criteria for writing a segment:
3339		 * 1) We have a full block.
3340		 * 2) We're called from jwait() and haven't found the
3341		 *    journal item yet.
3342		 * 3) Always write if needseg is set.
3343		 * 4) If we are called from process_worklist and have
3344		 *    not yet written anything we write a partial block
3345		 *    to enforce a 1 second maximum latency on journal
3346		 *    entries.
3347		 */
3348		if (cnt < (jrecmax - 1) && needwk == NULL &&
3349		    jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3350			break;
3351		cnt++;
3352		/*
3353		 * Verify some free journal space.  softdep_prealloc() should
3354		 * guarantee that we don't run out so this is indicative of
3355		 * a problem with the flow control.  Try to recover
3356		 * gracefully in any event.
3357		 */
3358		while (jblocks->jb_free == 0) {
3359			if (flags != MNT_WAIT)
3360				break;
3361			printf("softdep: Out of journal space!\n");
3362			softdep_speedup(ump);
3363			msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz);
3364		}
3365		FREE_LOCK(ump);
3366		jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3367		workitem_alloc(&jseg->js_list, D_JSEG, mp);
3368		LIST_INIT(&jseg->js_entries);
3369		LIST_INIT(&jseg->js_indirs);
3370		jseg->js_state = ATTACHED;
3371		if (shouldflush == 0)
3372			jseg->js_state |= COMPLETE;
3373		else if (bio == NULL)
3374			bio = g_alloc_bio();
3375		jseg->js_jblocks = jblocks;
3376		bp = geteblk(fs->fs_bsize, 0);
3377		ACQUIRE_LOCK(ump);
3378		/*
3379		 * If there was a race while we were allocating the block
3380		 * and jseg the entry we care about was likely written.
3381		 * We bail out in both the WAIT and NOWAIT case and assume
3382		 * the caller will loop if the entry it cares about is
3383		 * not written.
3384		 */
3385		cnt = ump->softdep_on_journal;
3386		if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3387			bp->b_flags |= B_INVAL | B_NOCACHE;
3388			WORKITEM_FREE(jseg, D_JSEG);
3389			FREE_LOCK(ump);
3390			brelse(bp);
3391			ACQUIRE_LOCK(ump);
3392			break;
3393		}
3394		/*
3395		 * Calculate the disk block size required for the available
3396		 * records rounded to the min size.
3397		 */
3398		if (cnt == 0)
3399			size = devbsize;
3400		else if (cnt < jrecmax)
3401			size = howmany(cnt, jrecmin) * devbsize;
3402		else
3403			size = fs->fs_bsize;
3404		/*
3405		 * Allocate a disk block for this journal data and account
3406		 * for truncation of the requested size if enough contiguous
3407		 * space was not available.
3408		 */
3409		bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3410		bp->b_lblkno = bp->b_blkno;
3411		bp->b_offset = bp->b_blkno * DEV_BSIZE;
3412		bp->b_bcount = size;
3413		bp->b_flags &= ~B_INVAL;
3414		bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3415		/*
3416		 * Initialize our jseg with cnt records.  Assign the next
3417		 * sequence number to it and link it in-order.
3418		 */
3419		cnt = MIN(cnt, (size / devbsize) * jrecmin);
3420		jseg->js_buf = bp;
3421		jseg->js_cnt = cnt;
3422		jseg->js_refs = cnt + 1;	/* Self ref. */
3423		jseg->js_size = size;
3424		jseg->js_seq = jblocks->jb_nextseq++;
3425		if (jblocks->jb_oldestseg == NULL)
3426			jblocks->jb_oldestseg = jseg;
3427		jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3428		TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3429		if (jblocks->jb_writeseg == NULL)
3430			jblocks->jb_writeseg = jseg;
3431		/*
3432		 * Start filling in records from the pending list.
3433		 */
3434		data = bp->b_data;
3435		off = 0;
3436
3437		/*
3438		 * Always put a header on the first block.
3439		 * XXX As with below, there might not be a chance to get
3440		 * into the loop.  Ensure that something valid is written.
3441		 */
3442		jseg_write(ump, jseg, data);
3443		off += JREC_SIZE;
3444		data = bp->b_data + off;
3445
3446		/*
3447		 * XXX Something is wrong here.  There's no work to do,
3448		 * but we need to perform and I/O and allow it to complete
3449		 * anyways.
3450		 */
3451		if (LIST_EMPTY(&ump->softdep_journal_pending))
3452			stat_emptyjblocks++;
3453
3454		while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3455		    != NULL) {
3456			if (cnt == 0)
3457				break;
3458			/* Place a segment header on every device block. */
3459			if ((off % devbsize) == 0) {
3460				jseg_write(ump, jseg, data);
3461				off += JREC_SIZE;
3462				data = bp->b_data + off;
3463			}
3464			if (wk == needwk)
3465				needwk = NULL;
3466			remove_from_journal(wk);
3467			wk->wk_state |= INPROGRESS;
3468			WORKLIST_INSERT(&jseg->js_entries, wk);
3469			switch (wk->wk_type) {
3470			case D_JADDREF:
3471				jaddref_write(WK_JADDREF(wk), jseg, data);
3472				break;
3473			case D_JREMREF:
3474				jremref_write(WK_JREMREF(wk), jseg, data);
3475				break;
3476			case D_JMVREF:
3477				jmvref_write(WK_JMVREF(wk), jseg, data);
3478				break;
3479			case D_JNEWBLK:
3480				jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3481				break;
3482			case D_JFREEBLK:
3483				jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3484				break;
3485			case D_JFREEFRAG:
3486				jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3487				break;
3488			case D_JTRUNC:
3489				jtrunc_write(WK_JTRUNC(wk), jseg, data);
3490				break;
3491			case D_JFSYNC:
3492				jfsync_write(WK_JFSYNC(wk), jseg, data);
3493				break;
3494			default:
3495				panic("process_journal: Unknown type %s",
3496				    TYPENAME(wk->wk_type));
3497				/* NOTREACHED */
3498			}
3499			off += JREC_SIZE;
3500			data = bp->b_data + off;
3501			cnt--;
3502		}
3503
3504		/* Clear any remaining space so we don't leak kernel data */
3505		if (size > off)
3506			bzero(data, size - off);
3507
3508		/*
3509		 * Write this one buffer and continue.
3510		 */
3511		segwritten = 1;
3512		jblocks->jb_needseg = 0;
3513		WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3514		FREE_LOCK(ump);
3515		pbgetvp(ump->um_devvp, bp);
3516		/*
3517		 * We only do the blocking wait once we find the journal
3518		 * entry we're looking for.
3519		 */
3520		if (needwk == NULL && flags == MNT_WAIT)
3521			bwrite(bp);
3522		else
3523			bawrite(bp);
3524		ACQUIRE_LOCK(ump);
3525	}
3526	/*
3527	 * If we wrote a segment issue a synchronize cache so the journal
3528	 * is reflected on disk before the data is written.  Since reclaiming
3529	 * journal space also requires writing a journal record this
3530	 * process also enforces a barrier before reclamation.
3531	 */
3532	if (segwritten && shouldflush) {
3533		softdep_synchronize(bio, ump,
3534		    TAILQ_LAST(&jblocks->jb_segs, jseglst));
3535	} else if (bio)
3536		g_destroy_bio(bio);
3537	/*
3538	 * If we've suspended the filesystem because we ran out of journal
3539	 * space either try to sync it here to make some progress or
3540	 * unsuspend it if we already have.
3541	 */
3542	if (flags == 0 && jblocks->jb_suspended) {
3543		if (journal_unsuspend(ump))
3544			return;
3545		FREE_LOCK(ump);
3546		VFS_SYNC(mp, MNT_NOWAIT);
3547		ffs_sbupdate(ump, MNT_WAIT, 0);
3548		ACQUIRE_LOCK(ump);
3549	}
3550}
3551
3552/*
3553 * Complete a jseg, allowing all dependencies awaiting journal writes
3554 * to proceed.  Each journal dependency also attaches a jsegdep to dependent
3555 * structures so that the journal segment can be freed to reclaim space.
3556 */
3557static void
3558complete_jseg(jseg)
3559	struct jseg *jseg;
3560{
3561	struct worklist *wk;
3562	struct jmvref *jmvref;
3563	int waiting;
3564#ifdef INVARIANTS
3565	int i = 0;
3566#endif
3567
3568	while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3569		WORKLIST_REMOVE(wk);
3570		waiting = wk->wk_state & IOWAITING;
3571		wk->wk_state &= ~(INPROGRESS | IOWAITING);
3572		wk->wk_state |= COMPLETE;
3573		KASSERT(i++ < jseg->js_cnt,
3574		    ("handle_written_jseg: overflow %d >= %d",
3575		    i - 1, jseg->js_cnt));
3576		switch (wk->wk_type) {
3577		case D_JADDREF:
3578			handle_written_jaddref(WK_JADDREF(wk));
3579			break;
3580		case D_JREMREF:
3581			handle_written_jremref(WK_JREMREF(wk));
3582			break;
3583		case D_JMVREF:
3584			rele_jseg(jseg);	/* No jsegdep. */
3585			jmvref = WK_JMVREF(wk);
3586			LIST_REMOVE(jmvref, jm_deps);
3587			if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3588				free_pagedep(jmvref->jm_pagedep);
3589			WORKITEM_FREE(jmvref, D_JMVREF);
3590			break;
3591		case D_JNEWBLK:
3592			handle_written_jnewblk(WK_JNEWBLK(wk));
3593			break;
3594		case D_JFREEBLK:
3595			handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3596			break;
3597		case D_JTRUNC:
3598			handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3599			break;
3600		case D_JFSYNC:
3601			rele_jseg(jseg);	/* No jsegdep. */
3602			WORKITEM_FREE(wk, D_JFSYNC);
3603			break;
3604		case D_JFREEFRAG:
3605			handle_written_jfreefrag(WK_JFREEFRAG(wk));
3606			break;
3607		default:
3608			panic("handle_written_jseg: Unknown type %s",
3609			    TYPENAME(wk->wk_type));
3610			/* NOTREACHED */
3611		}
3612		if (waiting)
3613			wakeup(wk);
3614	}
3615	/* Release the self reference so the structure may be freed. */
3616	rele_jseg(jseg);
3617}
3618
3619/*
3620 * Determine which jsegs are ready for completion processing.  Waits for
3621 * synchronize cache to complete as well as forcing in-order completion
3622 * of journal entries.
3623 */
3624static void
3625complete_jsegs(jseg)
3626	struct jseg *jseg;
3627{
3628	struct jblocks *jblocks;
3629	struct jseg *jsegn;
3630
3631	jblocks = jseg->js_jblocks;
3632	/*
3633	 * Don't allow out of order completions.  If this isn't the first
3634	 * block wait for it to write before we're done.
3635	 */
3636	if (jseg != jblocks->jb_writeseg)
3637		return;
3638	/* Iterate through available jsegs processing their entries. */
3639	while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3640		jblocks->jb_oldestwrseq = jseg->js_oldseq;
3641		jsegn = TAILQ_NEXT(jseg, js_next);
3642		complete_jseg(jseg);
3643		jseg = jsegn;
3644	}
3645	jblocks->jb_writeseg = jseg;
3646	/*
3647	 * Attempt to free jsegs now that oldestwrseq may have advanced.
3648	 */
3649	free_jsegs(jblocks);
3650}
3651
3652/*
3653 * Mark a jseg as DEPCOMPLETE and throw away the buffer.  Attempt to handle
3654 * the final completions.
3655 */
3656static void
3657handle_written_jseg(jseg, bp)
3658	struct jseg *jseg;
3659	struct buf *bp;
3660{
3661
3662	if (jseg->js_refs == 0)
3663		panic("handle_written_jseg: No self-reference on %p", jseg);
3664	jseg->js_state |= DEPCOMPLETE;
3665	/*
3666	 * We'll never need this buffer again, set flags so it will be
3667	 * discarded.
3668	 */
3669	bp->b_flags |= B_INVAL | B_NOCACHE;
3670	pbrelvp(bp);
3671	complete_jsegs(jseg);
3672}
3673
3674static inline struct jsegdep *
3675inoref_jseg(inoref)
3676	struct inoref *inoref;
3677{
3678	struct jsegdep *jsegdep;
3679
3680	jsegdep = inoref->if_jsegdep;
3681	inoref->if_jsegdep = NULL;
3682
3683	return (jsegdep);
3684}
3685
3686/*
3687 * Called once a jremref has made it to stable store.  The jremref is marked
3688 * complete and we attempt to free it.  Any pagedeps writes sleeping waiting
3689 * for the jremref to complete will be awoken by free_jremref.
3690 */
3691static void
3692handle_written_jremref(jremref)
3693	struct jremref *jremref;
3694{
3695	struct inodedep *inodedep;
3696	struct jsegdep *jsegdep;
3697	struct dirrem *dirrem;
3698
3699	/* Grab the jsegdep. */
3700	jsegdep = inoref_jseg(&jremref->jr_ref);
3701	/*
3702	 * Remove us from the inoref list.
3703	 */
3704	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
3705	    0, &inodedep) == 0)
3706		panic("handle_written_jremref: Lost inodedep");
3707	TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
3708	/*
3709	 * Complete the dirrem.
3710	 */
3711	dirrem = jremref->jr_dirrem;
3712	jremref->jr_dirrem = NULL;
3713	LIST_REMOVE(jremref, jr_deps);
3714	jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
3715	jwork_insert(&dirrem->dm_jwork, jsegdep);
3716	if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
3717	    (dirrem->dm_state & COMPLETE) != 0)
3718		add_to_worklist(&dirrem->dm_list, 0);
3719	free_jremref(jremref);
3720}
3721
3722/*
3723 * Called once a jaddref has made it to stable store.  The dependency is
3724 * marked complete and any dependent structures are added to the inode
3725 * bufwait list to be completed as soon as it is written.  If a bitmap write
3726 * depends on this entry we move the inode into the inodedephd of the
3727 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
3728 */
3729static void
3730handle_written_jaddref(jaddref)
3731	struct jaddref *jaddref;
3732{
3733	struct jsegdep *jsegdep;
3734	struct inodedep *inodedep;
3735	struct diradd *diradd;
3736	struct mkdir *mkdir;
3737
3738	/* Grab the jsegdep. */
3739	jsegdep = inoref_jseg(&jaddref->ja_ref);
3740	mkdir = NULL;
3741	diradd = NULL;
3742	if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
3743	    0, &inodedep) == 0)
3744		panic("handle_written_jaddref: Lost inodedep.");
3745	if (jaddref->ja_diradd == NULL)
3746		panic("handle_written_jaddref: No dependency");
3747	if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
3748		diradd = jaddref->ja_diradd;
3749		WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
3750	} else if (jaddref->ja_state & MKDIR_PARENT) {
3751		mkdir = jaddref->ja_mkdir;
3752		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
3753	} else if (jaddref->ja_state & MKDIR_BODY)
3754		mkdir = jaddref->ja_mkdir;
3755	else
3756		panic("handle_written_jaddref: Unknown dependency %p",
3757		    jaddref->ja_diradd);
3758	jaddref->ja_diradd = NULL;	/* also clears ja_mkdir */
3759	/*
3760	 * Remove us from the inode list.
3761	 */
3762	TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
3763	/*
3764	 * The mkdir may be waiting on the jaddref to clear before freeing.
3765	 */
3766	if (mkdir) {
3767		KASSERT(mkdir->md_list.wk_type == D_MKDIR,
3768		    ("handle_written_jaddref: Incorrect type for mkdir %s",
3769		    TYPENAME(mkdir->md_list.wk_type)));
3770		mkdir->md_jaddref = NULL;
3771		diradd = mkdir->md_diradd;
3772		mkdir->md_state |= DEPCOMPLETE;
3773		complete_mkdir(mkdir);
3774	}
3775	jwork_insert(&diradd->da_jwork, jsegdep);
3776	if (jaddref->ja_state & NEWBLOCK) {
3777		inodedep->id_state |= ONDEPLIST;
3778		LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
3779		    inodedep, id_deps);
3780	}
3781	free_jaddref(jaddref);
3782}
3783
3784/*
3785 * Called once a jnewblk journal is written.  The allocdirect or allocindir
3786 * is placed in the bmsafemap to await notification of a written bitmap.  If
3787 * the operation was canceled we add the segdep to the appropriate
3788 * dependency to free the journal space once the canceling operation
3789 * completes.
3790 */
3791static void
3792handle_written_jnewblk(jnewblk)
3793	struct jnewblk *jnewblk;
3794{
3795	struct bmsafemap *bmsafemap;
3796	struct freefrag *freefrag;
3797	struct freework *freework;
3798	struct jsegdep *jsegdep;
3799	struct newblk *newblk;
3800
3801	/* Grab the jsegdep. */
3802	jsegdep = jnewblk->jn_jsegdep;
3803	jnewblk->jn_jsegdep = NULL;
3804	if (jnewblk->jn_dep == NULL)
3805		panic("handle_written_jnewblk: No dependency for the segdep.");
3806	switch (jnewblk->jn_dep->wk_type) {
3807	case D_NEWBLK:
3808	case D_ALLOCDIRECT:
3809	case D_ALLOCINDIR:
3810		/*
3811		 * Add the written block to the bmsafemap so it can
3812		 * be notified when the bitmap is on disk.
3813		 */
3814		newblk = WK_NEWBLK(jnewblk->jn_dep);
3815		newblk->nb_jnewblk = NULL;
3816		if ((newblk->nb_state & GOINGAWAY) == 0) {
3817			bmsafemap = newblk->nb_bmsafemap;
3818			newblk->nb_state |= ONDEPLIST;
3819			LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
3820			    nb_deps);
3821		}
3822		jwork_insert(&newblk->nb_jwork, jsegdep);
3823		break;
3824	case D_FREEFRAG:
3825		/*
3826		 * A newblock being removed by a freefrag when replaced by
3827		 * frag extension.
3828		 */
3829		freefrag = WK_FREEFRAG(jnewblk->jn_dep);
3830		freefrag->ff_jdep = NULL;
3831		jwork_insert(&freefrag->ff_jwork, jsegdep);
3832		break;
3833	case D_FREEWORK:
3834		/*
3835		 * A direct block was removed by truncate.
3836		 */
3837		freework = WK_FREEWORK(jnewblk->jn_dep);
3838		freework->fw_jnewblk = NULL;
3839		jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
3840		break;
3841	default:
3842		panic("handle_written_jnewblk: Unknown type %d.",
3843		    jnewblk->jn_dep->wk_type);
3844	}
3845	jnewblk->jn_dep = NULL;
3846	free_jnewblk(jnewblk);
3847}
3848
3849/*
3850 * Cancel a jfreefrag that won't be needed, probably due to colliding with
3851 * an in-flight allocation that has not yet been committed.  Divorce us
3852 * from the freefrag and mark it DEPCOMPLETE so that it may be added
3853 * to the worklist.
3854 */
3855static void
3856cancel_jfreefrag(jfreefrag)
3857	struct jfreefrag *jfreefrag;
3858{
3859	struct freefrag *freefrag;
3860
3861	if (jfreefrag->fr_jsegdep) {
3862		free_jsegdep(jfreefrag->fr_jsegdep);
3863		jfreefrag->fr_jsegdep = NULL;
3864	}
3865	freefrag = jfreefrag->fr_freefrag;
3866	jfreefrag->fr_freefrag = NULL;
3867	free_jfreefrag(jfreefrag);
3868	freefrag->ff_state |= DEPCOMPLETE;
3869	CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
3870}
3871
3872/*
3873 * Free a jfreefrag when the parent freefrag is rendered obsolete.
3874 */
3875static void
3876free_jfreefrag(jfreefrag)
3877	struct jfreefrag *jfreefrag;
3878{
3879
3880	if (jfreefrag->fr_state & INPROGRESS)
3881		WORKLIST_REMOVE(&jfreefrag->fr_list);
3882	else if (jfreefrag->fr_state & ONWORKLIST)
3883		remove_from_journal(&jfreefrag->fr_list);
3884	if (jfreefrag->fr_freefrag != NULL)
3885		panic("free_jfreefrag:  Still attached to a freefrag.");
3886	WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
3887}
3888
3889/*
3890 * Called when the journal write for a jfreefrag completes.  The parent
3891 * freefrag is added to the worklist if this completes its dependencies.
3892 */
3893static void
3894handle_written_jfreefrag(jfreefrag)
3895	struct jfreefrag *jfreefrag;
3896{
3897	struct jsegdep *jsegdep;
3898	struct freefrag *freefrag;
3899
3900	/* Grab the jsegdep. */
3901	jsegdep = jfreefrag->fr_jsegdep;
3902	jfreefrag->fr_jsegdep = NULL;
3903	freefrag = jfreefrag->fr_freefrag;
3904	if (freefrag == NULL)
3905		panic("handle_written_jfreefrag: No freefrag.");
3906	freefrag->ff_state |= DEPCOMPLETE;
3907	freefrag->ff_jdep = NULL;
3908	jwork_insert(&freefrag->ff_jwork, jsegdep);
3909	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
3910		add_to_worklist(&freefrag->ff_list, 0);
3911	jfreefrag->fr_freefrag = NULL;
3912	free_jfreefrag(jfreefrag);
3913}
3914
3915/*
3916 * Called when the journal write for a jfreeblk completes.  The jfreeblk
3917 * is removed from the freeblks list of pending journal writes and the
3918 * jsegdep is moved to the freeblks jwork to be completed when all blocks
3919 * have been reclaimed.
3920 */
3921static void
3922handle_written_jblkdep(jblkdep)
3923	struct jblkdep *jblkdep;
3924{
3925	struct freeblks *freeblks;
3926	struct jsegdep *jsegdep;
3927
3928	/* Grab the jsegdep. */
3929	jsegdep = jblkdep->jb_jsegdep;
3930	jblkdep->jb_jsegdep = NULL;
3931	freeblks = jblkdep->jb_freeblks;
3932	LIST_REMOVE(jblkdep, jb_deps);
3933	jwork_insert(&freeblks->fb_jwork, jsegdep);
3934	/*
3935	 * If the freeblks is all journaled, we can add it to the worklist.
3936	 */
3937	if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
3938	    (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
3939		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
3940
3941	free_jblkdep(jblkdep);
3942}
3943
3944static struct jsegdep *
3945newjsegdep(struct worklist *wk)
3946{
3947	struct jsegdep *jsegdep;
3948
3949	jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
3950	workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
3951	jsegdep->jd_seg = NULL;
3952
3953	return (jsegdep);
3954}
3955
3956static struct jmvref *
3957newjmvref(dp, ino, oldoff, newoff)
3958	struct inode *dp;
3959	ino_t ino;
3960	off_t oldoff;
3961	off_t newoff;
3962{
3963	struct jmvref *jmvref;
3964
3965	jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
3966	workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump));
3967	jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
3968	jmvref->jm_parent = dp->i_number;
3969	jmvref->jm_ino = ino;
3970	jmvref->jm_oldoff = oldoff;
3971	jmvref->jm_newoff = newoff;
3972
3973	return (jmvref);
3974}
3975
3976/*
3977 * Allocate a new jremref that tracks the removal of ip from dp with the
3978 * directory entry offset of diroff.  Mark the entry as ATTACHED and
3979 * DEPCOMPLETE as we have all the information required for the journal write
3980 * and the directory has already been removed from the buffer.  The caller
3981 * is responsible for linking the jremref into the pagedep and adding it
3982 * to the journal to write.  The MKDIR_PARENT flag is set if we're doing
3983 * a DOTDOT addition so handle_workitem_remove() can properly assign
3984 * the jsegdep when we're done.
3985 */
3986static struct jremref *
3987newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip,
3988    off_t diroff, nlink_t nlink)
3989{
3990	struct jremref *jremref;
3991
3992	jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
3993	workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump));
3994	jremref->jr_state = ATTACHED;
3995	newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
3996	   nlink, ip->i_mode);
3997	jremref->jr_dirrem = dirrem;
3998
3999	return (jremref);
4000}
4001
4002static inline void
4003newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff,
4004    nlink_t nlink, uint16_t mode)
4005{
4006
4007	inoref->if_jsegdep = newjsegdep(&inoref->if_list);
4008	inoref->if_diroff = diroff;
4009	inoref->if_ino = ino;
4010	inoref->if_parent = parent;
4011	inoref->if_nlink = nlink;
4012	inoref->if_mode = mode;
4013}
4014
4015/*
4016 * Allocate a new jaddref to track the addition of ino to dp at diroff.  The
4017 * directory offset may not be known until later.  The caller is responsible
4018 * adding the entry to the journal when this information is available.  nlink
4019 * should be the link count prior to the addition and mode is only required
4020 * to have the correct FMT.
4021 */
4022static struct jaddref *
4023newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink,
4024    uint16_t mode)
4025{
4026	struct jaddref *jaddref;
4027
4028	jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
4029	workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump));
4030	jaddref->ja_state = ATTACHED;
4031	jaddref->ja_mkdir = NULL;
4032	newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
4033
4034	return (jaddref);
4035}
4036
4037/*
4038 * Create a new free dependency for a freework.  The caller is responsible
4039 * for adjusting the reference count when it has the lock held.  The freedep
4040 * will track an outstanding bitmap write that will ultimately clear the
4041 * freework to continue.
4042 */
4043static struct freedep *
4044newfreedep(struct freework *freework)
4045{
4046	struct freedep *freedep;
4047
4048	freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
4049	workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
4050	freedep->fd_freework = freework;
4051
4052	return (freedep);
4053}
4054
4055/*
4056 * Free a freedep structure once the buffer it is linked to is written.  If
4057 * this is the last reference to the freework schedule it for completion.
4058 */
4059static void
4060free_freedep(freedep)
4061	struct freedep *freedep;
4062{
4063	struct freework *freework;
4064
4065	freework = freedep->fd_freework;
4066	freework->fw_freeblks->fb_cgwait--;
4067	if (--freework->fw_ref == 0)
4068		freework_enqueue(freework);
4069	WORKITEM_FREE(freedep, D_FREEDEP);
4070}
4071
4072/*
4073 * Allocate a new freework structure that may be a level in an indirect
4074 * when parent is not NULL or a top level block when it is.  The top level
4075 * freework structures are allocated without the per-filesystem lock held
4076 * and before the freeblks is visible outside of softdep_setup_freeblocks().
4077 */
4078static struct freework *
4079newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal)
4080	struct ufsmount *ump;
4081	struct freeblks *freeblks;
4082	struct freework *parent;
4083	ufs_lbn_t lbn;
4084	ufs2_daddr_t nb;
4085	int frags;
4086	int off;
4087	int journal;
4088{
4089	struct freework *freework;
4090
4091	freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
4092	workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
4093	freework->fw_state = ATTACHED;
4094	freework->fw_jnewblk = NULL;
4095	freework->fw_freeblks = freeblks;
4096	freework->fw_parent = parent;
4097	freework->fw_lbn = lbn;
4098	freework->fw_blkno = nb;
4099	freework->fw_frags = frags;
4100	freework->fw_indir = NULL;
4101	freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR)
4102		? 0 : NINDIR(ump->um_fs) + 1;
4103	freework->fw_start = freework->fw_off = off;
4104	if (journal)
4105		newjfreeblk(freeblks, lbn, nb, frags);
4106	if (parent == NULL) {
4107		ACQUIRE_LOCK(ump);
4108		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
4109		freeblks->fb_ref++;
4110		FREE_LOCK(ump);
4111	}
4112
4113	return (freework);
4114}
4115
4116/*
4117 * Eliminate a jfreeblk for a block that does not need journaling.
4118 */
4119static void
4120cancel_jfreeblk(freeblks, blkno)
4121	struct freeblks *freeblks;
4122	ufs2_daddr_t blkno;
4123{
4124	struct jfreeblk *jfreeblk;
4125	struct jblkdep *jblkdep;
4126
4127	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
4128		if (jblkdep->jb_list.wk_type != D_JFREEBLK)
4129			continue;
4130		jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
4131		if (jfreeblk->jf_blkno == blkno)
4132			break;
4133	}
4134	if (jblkdep == NULL)
4135		return;
4136	CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4137	free_jsegdep(jblkdep->jb_jsegdep);
4138	LIST_REMOVE(jblkdep, jb_deps);
4139	WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4140}
4141
4142/*
4143 * Allocate a new jfreeblk to journal top level block pointer when truncating
4144 * a file.  The caller must add this to the worklist when the per-filesystem
4145 * lock is held.
4146 */
4147static struct jfreeblk *
4148newjfreeblk(freeblks, lbn, blkno, frags)
4149	struct freeblks *freeblks;
4150	ufs_lbn_t lbn;
4151	ufs2_daddr_t blkno;
4152	int frags;
4153{
4154	struct jfreeblk *jfreeblk;
4155
4156	jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4157	workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4158	    freeblks->fb_list.wk_mp);
4159	jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4160	jfreeblk->jf_dep.jb_freeblks = freeblks;
4161	jfreeblk->jf_ino = freeblks->fb_inum;
4162	jfreeblk->jf_lbn = lbn;
4163	jfreeblk->jf_blkno = blkno;
4164	jfreeblk->jf_frags = frags;
4165	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4166
4167	return (jfreeblk);
4168}
4169
4170/*
4171 * The journal is only prepared to handle full-size block numbers, so we
4172 * have to adjust the record to reflect the change to a full-size block.
4173 * For example, suppose we have a block made up of fragments 8-15 and
4174 * want to free its last two fragments. We are given a request that says:
4175 *     FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0
4176 * where frags are the number of fragments to free and oldfrags are the
4177 * number of fragments to keep. To block align it, we have to change it to
4178 * have a valid full-size blkno, so it becomes:
4179 *     FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6
4180 */
4181static void
4182adjust_newfreework(freeblks, frag_offset)
4183	struct freeblks *freeblks;
4184	int frag_offset;
4185{
4186	struct jfreeblk *jfreeblk;
4187
4188	KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL &&
4189	    LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK),
4190	    ("adjust_newfreework: Missing freeblks dependency"));
4191
4192	jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd));
4193	jfreeblk->jf_blkno -= frag_offset;
4194	jfreeblk->jf_frags += frag_offset;
4195}
4196
4197/*
4198 * Allocate a new jtrunc to track a partial truncation.
4199 */
4200static struct jtrunc *
4201newjtrunc(freeblks, size, extsize)
4202	struct freeblks *freeblks;
4203	off_t size;
4204	int extsize;
4205{
4206	struct jtrunc *jtrunc;
4207
4208	jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4209	workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4210	    freeblks->fb_list.wk_mp);
4211	jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4212	jtrunc->jt_dep.jb_freeblks = freeblks;
4213	jtrunc->jt_ino = freeblks->fb_inum;
4214	jtrunc->jt_size = size;
4215	jtrunc->jt_extsize = extsize;
4216	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4217
4218	return (jtrunc);
4219}
4220
4221/*
4222 * If we're canceling a new bitmap we have to search for another ref
4223 * to move into the bmsafemap dep.  This might be better expressed
4224 * with another structure.
4225 */
4226static void
4227move_newblock_dep(jaddref, inodedep)
4228	struct jaddref *jaddref;
4229	struct inodedep *inodedep;
4230{
4231	struct inoref *inoref;
4232	struct jaddref *jaddrefn;
4233
4234	jaddrefn = NULL;
4235	for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4236	    inoref = TAILQ_NEXT(inoref, if_deps)) {
4237		if ((jaddref->ja_state & NEWBLOCK) &&
4238		    inoref->if_list.wk_type == D_JADDREF) {
4239			jaddrefn = (struct jaddref *)inoref;
4240			break;
4241		}
4242	}
4243	if (jaddrefn == NULL)
4244		return;
4245	jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4246	jaddrefn->ja_state |= jaddref->ja_state &
4247	    (ATTACHED | UNDONE | NEWBLOCK);
4248	jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4249	jaddref->ja_state |= ATTACHED;
4250	LIST_REMOVE(jaddref, ja_bmdeps);
4251	LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4252	    ja_bmdeps);
4253}
4254
4255/*
4256 * Cancel a jaddref either before it has been written or while it is being
4257 * written.  This happens when a link is removed before the add reaches
4258 * the disk.  The jaddref dependency is kept linked into the bmsafemap
4259 * and inode to prevent the link count or bitmap from reaching the disk
4260 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4261 * required.
4262 *
4263 * Returns 1 if the canceled addref requires journaling of the remove and
4264 * 0 otherwise.
4265 */
4266static int
4267cancel_jaddref(jaddref, inodedep, wkhd)
4268	struct jaddref *jaddref;
4269	struct inodedep *inodedep;
4270	struct workhead *wkhd;
4271{
4272	struct inoref *inoref;
4273	struct jsegdep *jsegdep;
4274	int needsj;
4275
4276	KASSERT((jaddref->ja_state & COMPLETE) == 0,
4277	    ("cancel_jaddref: Canceling complete jaddref"));
4278	if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4279		needsj = 1;
4280	else
4281		needsj = 0;
4282	if (inodedep == NULL)
4283		if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4284		    0, &inodedep) == 0)
4285			panic("cancel_jaddref: Lost inodedep");
4286	/*
4287	 * We must adjust the nlink of any reference operation that follows
4288	 * us so that it is consistent with the in-memory reference.  This
4289	 * ensures that inode nlink rollbacks always have the correct link.
4290	 */
4291	if (needsj == 0) {
4292		for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4293		    inoref = TAILQ_NEXT(inoref, if_deps)) {
4294			if (inoref->if_state & GOINGAWAY)
4295				break;
4296			inoref->if_nlink--;
4297		}
4298	}
4299	jsegdep = inoref_jseg(&jaddref->ja_ref);
4300	if (jaddref->ja_state & NEWBLOCK)
4301		move_newblock_dep(jaddref, inodedep);
4302	wake_worklist(&jaddref->ja_list);
4303	jaddref->ja_mkdir = NULL;
4304	if (jaddref->ja_state & INPROGRESS) {
4305		jaddref->ja_state &= ~INPROGRESS;
4306		WORKLIST_REMOVE(&jaddref->ja_list);
4307		jwork_insert(wkhd, jsegdep);
4308	} else {
4309		free_jsegdep(jsegdep);
4310		if (jaddref->ja_state & DEPCOMPLETE)
4311			remove_from_journal(&jaddref->ja_list);
4312	}
4313	jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4314	/*
4315	 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4316	 * can arrange for them to be freed with the bitmap.  Otherwise we
4317	 * no longer need this addref attached to the inoreflst and it
4318	 * will incorrectly adjust nlink if we leave it.
4319	 */
4320	if ((jaddref->ja_state & NEWBLOCK) == 0) {
4321		TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4322		    if_deps);
4323		jaddref->ja_state |= COMPLETE;
4324		free_jaddref(jaddref);
4325		return (needsj);
4326	}
4327	/*
4328	 * Leave the head of the list for jsegdeps for fast merging.
4329	 */
4330	if (LIST_FIRST(wkhd) != NULL) {
4331		jaddref->ja_state |= ONWORKLIST;
4332		LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4333	} else
4334		WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4335
4336	return (needsj);
4337}
4338
4339/*
4340 * Attempt to free a jaddref structure when some work completes.  This
4341 * should only succeed once the entry is written and all dependencies have
4342 * been notified.
4343 */
4344static void
4345free_jaddref(jaddref)
4346	struct jaddref *jaddref;
4347{
4348
4349	if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4350		return;
4351	if (jaddref->ja_ref.if_jsegdep)
4352		panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4353		    jaddref, jaddref->ja_state);
4354	if (jaddref->ja_state & NEWBLOCK)
4355		LIST_REMOVE(jaddref, ja_bmdeps);
4356	if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4357		panic("free_jaddref: Bad state %p(0x%X)",
4358		    jaddref, jaddref->ja_state);
4359	if (jaddref->ja_mkdir != NULL)
4360		panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4361	WORKITEM_FREE(jaddref, D_JADDREF);
4362}
4363
4364/*
4365 * Free a jremref structure once it has been written or discarded.
4366 */
4367static void
4368free_jremref(jremref)
4369	struct jremref *jremref;
4370{
4371
4372	if (jremref->jr_ref.if_jsegdep)
4373		free_jsegdep(jremref->jr_ref.if_jsegdep);
4374	if (jremref->jr_state & INPROGRESS)
4375		panic("free_jremref: IO still pending");
4376	WORKITEM_FREE(jremref, D_JREMREF);
4377}
4378
4379/*
4380 * Free a jnewblk structure.
4381 */
4382static void
4383free_jnewblk(jnewblk)
4384	struct jnewblk *jnewblk;
4385{
4386
4387	if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4388		return;
4389	LIST_REMOVE(jnewblk, jn_deps);
4390	if (jnewblk->jn_dep != NULL)
4391		panic("free_jnewblk: Dependency still attached.");
4392	WORKITEM_FREE(jnewblk, D_JNEWBLK);
4393}
4394
4395/*
4396 * Cancel a jnewblk which has been been made redundant by frag extension.
4397 */
4398static void
4399cancel_jnewblk(jnewblk, wkhd)
4400	struct jnewblk *jnewblk;
4401	struct workhead *wkhd;
4402{
4403	struct jsegdep *jsegdep;
4404
4405	CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4406	jsegdep = jnewblk->jn_jsegdep;
4407	if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4408		panic("cancel_jnewblk: Invalid state");
4409	jnewblk->jn_jsegdep  = NULL;
4410	jnewblk->jn_dep = NULL;
4411	jnewblk->jn_state |= GOINGAWAY;
4412	if (jnewblk->jn_state & INPROGRESS) {
4413		jnewblk->jn_state &= ~INPROGRESS;
4414		WORKLIST_REMOVE(&jnewblk->jn_list);
4415		jwork_insert(wkhd, jsegdep);
4416	} else {
4417		free_jsegdep(jsegdep);
4418		remove_from_journal(&jnewblk->jn_list);
4419	}
4420	wake_worklist(&jnewblk->jn_list);
4421	WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4422}
4423
4424static void
4425free_jblkdep(jblkdep)
4426	struct jblkdep *jblkdep;
4427{
4428
4429	if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4430		WORKITEM_FREE(jblkdep, D_JFREEBLK);
4431	else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4432		WORKITEM_FREE(jblkdep, D_JTRUNC);
4433	else
4434		panic("free_jblkdep: Unexpected type %s",
4435		    TYPENAME(jblkdep->jb_list.wk_type));
4436}
4437
4438/*
4439 * Free a single jseg once it is no longer referenced in memory or on
4440 * disk.  Reclaim journal blocks and dependencies waiting for the segment
4441 * to disappear.
4442 */
4443static void
4444free_jseg(jseg, jblocks)
4445	struct jseg *jseg;
4446	struct jblocks *jblocks;
4447{
4448	struct freework *freework;
4449
4450	/*
4451	 * Free freework structures that were lingering to indicate freed
4452	 * indirect blocks that forced journal write ordering on reallocate.
4453	 */
4454	while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4455		indirblk_remove(freework);
4456	if (jblocks->jb_oldestseg == jseg)
4457		jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4458	TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4459	jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4460	KASSERT(LIST_EMPTY(&jseg->js_entries),
4461	    ("free_jseg: Freed jseg has valid entries."));
4462	WORKITEM_FREE(jseg, D_JSEG);
4463}
4464
4465/*
4466 * Free all jsegs that meet the criteria for being reclaimed and update
4467 * oldestseg.
4468 */
4469static void
4470free_jsegs(jblocks)
4471	struct jblocks *jblocks;
4472{
4473	struct jseg *jseg;
4474
4475	/*
4476	 * Free only those jsegs which have none allocated before them to
4477	 * preserve the journal space ordering.
4478	 */
4479	while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4480		/*
4481		 * Only reclaim space when nothing depends on this journal
4482		 * set and another set has written that it is no longer
4483		 * valid.
4484		 */
4485		if (jseg->js_refs != 0) {
4486			jblocks->jb_oldestseg = jseg;
4487			return;
4488		}
4489		if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4490			break;
4491		if (jseg->js_seq > jblocks->jb_oldestwrseq)
4492			break;
4493		/*
4494		 * We can free jsegs that didn't write entries when
4495		 * oldestwrseq == js_seq.
4496		 */
4497		if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4498		    jseg->js_cnt != 0)
4499			break;
4500		free_jseg(jseg, jblocks);
4501	}
4502	/*
4503	 * If we exited the loop above we still must discover the
4504	 * oldest valid segment.
4505	 */
4506	if (jseg)
4507		for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4508		     jseg = TAILQ_NEXT(jseg, js_next))
4509			if (jseg->js_refs != 0)
4510				break;
4511	jblocks->jb_oldestseg = jseg;
4512	/*
4513	 * The journal has no valid records but some jsegs may still be
4514	 * waiting on oldestwrseq to advance.  We force a small record
4515	 * out to permit these lingering records to be reclaimed.
4516	 */
4517	if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4518		jblocks->jb_needseg = 1;
4519}
4520
4521/*
4522 * Release one reference to a jseg and free it if the count reaches 0.  This
4523 * should eventually reclaim journal space as well.
4524 */
4525static void
4526rele_jseg(jseg)
4527	struct jseg *jseg;
4528{
4529
4530	KASSERT(jseg->js_refs > 0,
4531	    ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4532	if (--jseg->js_refs != 0)
4533		return;
4534	free_jsegs(jseg->js_jblocks);
4535}
4536
4537/*
4538 * Release a jsegdep and decrement the jseg count.
4539 */
4540static void
4541free_jsegdep(jsegdep)
4542	struct jsegdep *jsegdep;
4543{
4544
4545	if (jsegdep->jd_seg)
4546		rele_jseg(jsegdep->jd_seg);
4547	WORKITEM_FREE(jsegdep, D_JSEGDEP);
4548}
4549
4550/*
4551 * Wait for a journal item to make it to disk.  Initiate journal processing
4552 * if required.
4553 */
4554static int
4555jwait(wk, waitfor)
4556	struct worklist *wk;
4557	int waitfor;
4558{
4559
4560	LOCK_OWNED(VFSTOUFS(wk->wk_mp));
4561	/*
4562	 * Blocking journal waits cause slow synchronous behavior.  Record
4563	 * stats on the frequency of these blocking operations.
4564	 */
4565	if (waitfor == MNT_WAIT) {
4566		stat_journal_wait++;
4567		switch (wk->wk_type) {
4568		case D_JREMREF:
4569		case D_JMVREF:
4570			stat_jwait_filepage++;
4571			break;
4572		case D_JTRUNC:
4573		case D_JFREEBLK:
4574			stat_jwait_freeblks++;
4575			break;
4576		case D_JNEWBLK:
4577			stat_jwait_newblk++;
4578			break;
4579		case D_JADDREF:
4580			stat_jwait_inode++;
4581			break;
4582		default:
4583			break;
4584		}
4585	}
4586	/*
4587	 * If IO has not started we process the journal.  We can't mark the
4588	 * worklist item as IOWAITING because we drop the lock while
4589	 * processing the journal and the worklist entry may be freed after
4590	 * this point.  The caller may call back in and re-issue the request.
4591	 */
4592	if ((wk->wk_state & INPROGRESS) == 0) {
4593		softdep_process_journal(wk->wk_mp, wk, waitfor);
4594		if (waitfor != MNT_WAIT)
4595			return (EBUSY);
4596		return (0);
4597	}
4598	if (waitfor != MNT_WAIT)
4599		return (EBUSY);
4600	wait_worklist(wk, "jwait");
4601	return (0);
4602}
4603
4604/*
4605 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4606 * appropriate.  This is a convenience function to reduce duplicate code
4607 * for the setup and revert functions below.
4608 */
4609static struct inodedep *
4610inodedep_lookup_ip(ip)
4611	struct inode *ip;
4612{
4613	struct inodedep *inodedep;
4614	int dflags;
4615
4616	KASSERT(ip->i_nlink >= ip->i_effnlink,
4617	    ("inodedep_lookup_ip: bad delta"));
4618	dflags = DEPALLOC;
4619	if (IS_SNAPSHOT(ip))
4620		dflags |= NODELAY;
4621	(void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags,
4622	    &inodedep);
4623	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4624	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4625
4626	return (inodedep);
4627}
4628
4629/*
4630 * Called prior to creating a new inode and linking it to a directory.  The
4631 * jaddref structure must already be allocated by softdep_setup_inomapdep
4632 * and it is discovered here so we can initialize the mode and update
4633 * nlinkdelta.
4634 */
4635void
4636softdep_setup_create(dp, ip)
4637	struct inode *dp;
4638	struct inode *ip;
4639{
4640	struct inodedep *inodedep;
4641	struct jaddref *jaddref;
4642	struct vnode *dvp;
4643
4644	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4645	    ("softdep_setup_create called on non-softdep filesystem"));
4646	KASSERT(ip->i_nlink == 1,
4647	    ("softdep_setup_create: Invalid link count."));
4648	dvp = ITOV(dp);
4649	ACQUIRE_LOCK(dp->i_ump);
4650	inodedep = inodedep_lookup_ip(ip);
4651	if (DOINGSUJ(dvp)) {
4652		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4653		    inoreflst);
4654		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4655		    ("softdep_setup_create: No addref structure present."));
4656	}
4657	softdep_prelink(dvp, NULL);
4658	FREE_LOCK(dp->i_ump);
4659}
4660
4661/*
4662 * Create a jaddref structure to track the addition of a DOTDOT link when
4663 * we are reparenting an inode as part of a rename.  This jaddref will be
4664 * found by softdep_setup_directory_change.  Adjusts nlinkdelta for
4665 * non-journaling softdep.
4666 */
4667void
4668softdep_setup_dotdot_link(dp, ip)
4669	struct inode *dp;
4670	struct inode *ip;
4671{
4672	struct inodedep *inodedep;
4673	struct jaddref *jaddref;
4674	struct vnode *dvp;
4675	struct vnode *vp;
4676
4677	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4678	    ("softdep_setup_dotdot_link called on non-softdep filesystem"));
4679	dvp = ITOV(dp);
4680	vp = ITOV(ip);
4681	jaddref = NULL;
4682	/*
4683	 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4684	 * is used as a normal link would be.
4685	 */
4686	if (DOINGSUJ(dvp))
4687		jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4688		    dp->i_effnlink - 1, dp->i_mode);
4689	ACQUIRE_LOCK(dp->i_ump);
4690	inodedep = inodedep_lookup_ip(dp);
4691	if (jaddref)
4692		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4693		    if_deps);
4694	softdep_prelink(dvp, ITOV(ip));
4695	FREE_LOCK(dp->i_ump);
4696}
4697
4698/*
4699 * Create a jaddref structure to track a new link to an inode.  The directory
4700 * offset is not known until softdep_setup_directory_add or
4701 * softdep_setup_directory_change.  Adjusts nlinkdelta for non-journaling
4702 * softdep.
4703 */
4704void
4705softdep_setup_link(dp, ip)
4706	struct inode *dp;
4707	struct inode *ip;
4708{
4709	struct inodedep *inodedep;
4710	struct jaddref *jaddref;
4711	struct vnode *dvp;
4712
4713	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4714	    ("softdep_setup_link called on non-softdep filesystem"));
4715	dvp = ITOV(dp);
4716	jaddref = NULL;
4717	if (DOINGSUJ(dvp))
4718		jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4719		    ip->i_mode);
4720	ACQUIRE_LOCK(dp->i_ump);
4721	inodedep = inodedep_lookup_ip(ip);
4722	if (jaddref)
4723		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4724		    if_deps);
4725	softdep_prelink(dvp, ITOV(ip));
4726	FREE_LOCK(dp->i_ump);
4727}
4728
4729/*
4730 * Called to create the jaddref structures to track . and .. references as
4731 * well as lookup and further initialize the incomplete jaddref created
4732 * by softdep_setup_inomapdep when the inode was allocated.  Adjusts
4733 * nlinkdelta for non-journaling softdep.
4734 */
4735void
4736softdep_setup_mkdir(dp, ip)
4737	struct inode *dp;
4738	struct inode *ip;
4739{
4740	struct inodedep *inodedep;
4741	struct jaddref *dotdotaddref;
4742	struct jaddref *dotaddref;
4743	struct jaddref *jaddref;
4744	struct vnode *dvp;
4745
4746	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4747	    ("softdep_setup_mkdir called on non-softdep filesystem"));
4748	dvp = ITOV(dp);
4749	dotaddref = dotdotaddref = NULL;
4750	if (DOINGSUJ(dvp)) {
4751		dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
4752		    ip->i_mode);
4753		dotaddref->ja_state |= MKDIR_BODY;
4754		dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4755		    dp->i_effnlink - 1, dp->i_mode);
4756		dotdotaddref->ja_state |= MKDIR_PARENT;
4757	}
4758	ACQUIRE_LOCK(dp->i_ump);
4759	inodedep = inodedep_lookup_ip(ip);
4760	if (DOINGSUJ(dvp)) {
4761		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4762		    inoreflst);
4763		KASSERT(jaddref != NULL,
4764		    ("softdep_setup_mkdir: No addref structure present."));
4765		KASSERT(jaddref->ja_parent == dp->i_number,
4766		    ("softdep_setup_mkdir: bad parent %ju",
4767		    (uintmax_t)jaddref->ja_parent));
4768		TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
4769		    if_deps);
4770	}
4771	inodedep = inodedep_lookup_ip(dp);
4772	if (DOINGSUJ(dvp))
4773		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
4774		    &dotdotaddref->ja_ref, if_deps);
4775	softdep_prelink(ITOV(dp), NULL);
4776	FREE_LOCK(dp->i_ump);
4777}
4778
4779/*
4780 * Called to track nlinkdelta of the inode and parent directories prior to
4781 * unlinking a directory.
4782 */
4783void
4784softdep_setup_rmdir(dp, ip)
4785	struct inode *dp;
4786	struct inode *ip;
4787{
4788	struct vnode *dvp;
4789
4790	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4791	    ("softdep_setup_rmdir called on non-softdep filesystem"));
4792	dvp = ITOV(dp);
4793	ACQUIRE_LOCK(dp->i_ump);
4794	(void) inodedep_lookup_ip(ip);
4795	(void) inodedep_lookup_ip(dp);
4796	softdep_prelink(dvp, ITOV(ip));
4797	FREE_LOCK(dp->i_ump);
4798}
4799
4800/*
4801 * Called to track nlinkdelta of the inode and parent directories prior to
4802 * unlink.
4803 */
4804void
4805softdep_setup_unlink(dp, ip)
4806	struct inode *dp;
4807	struct inode *ip;
4808{
4809	struct vnode *dvp;
4810
4811	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4812	    ("softdep_setup_unlink called on non-softdep filesystem"));
4813	dvp = ITOV(dp);
4814	ACQUIRE_LOCK(dp->i_ump);
4815	(void) inodedep_lookup_ip(ip);
4816	(void) inodedep_lookup_ip(dp);
4817	softdep_prelink(dvp, ITOV(ip));
4818	FREE_LOCK(dp->i_ump);
4819}
4820
4821/*
4822 * Called to release the journal structures created by a failed non-directory
4823 * creation.  Adjusts nlinkdelta for non-journaling softdep.
4824 */
4825void
4826softdep_revert_create(dp, ip)
4827	struct inode *dp;
4828	struct inode *ip;
4829{
4830	struct inodedep *inodedep;
4831	struct jaddref *jaddref;
4832	struct vnode *dvp;
4833
4834	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4835	    ("softdep_revert_create called on non-softdep filesystem"));
4836	dvp = ITOV(dp);
4837	ACQUIRE_LOCK(dp->i_ump);
4838	inodedep = inodedep_lookup_ip(ip);
4839	if (DOINGSUJ(dvp)) {
4840		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4841		    inoreflst);
4842		KASSERT(jaddref->ja_parent == dp->i_number,
4843		    ("softdep_revert_create: addref parent mismatch"));
4844		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4845	}
4846	FREE_LOCK(dp->i_ump);
4847}
4848
4849/*
4850 * Called to release the journal structures created by a failed link
4851 * addition.  Adjusts nlinkdelta for non-journaling softdep.
4852 */
4853void
4854softdep_revert_link(dp, ip)
4855	struct inode *dp;
4856	struct inode *ip;
4857{
4858	struct inodedep *inodedep;
4859	struct jaddref *jaddref;
4860	struct vnode *dvp;
4861
4862	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4863	    ("softdep_revert_link called on non-softdep filesystem"));
4864	dvp = ITOV(dp);
4865	ACQUIRE_LOCK(dp->i_ump);
4866	inodedep = inodedep_lookup_ip(ip);
4867	if (DOINGSUJ(dvp)) {
4868		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4869		    inoreflst);
4870		KASSERT(jaddref->ja_parent == dp->i_number,
4871		    ("softdep_revert_link: addref parent mismatch"));
4872		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4873	}
4874	FREE_LOCK(dp->i_ump);
4875}
4876
4877/*
4878 * Called to release the journal structures created by a failed mkdir
4879 * attempt.  Adjusts nlinkdelta for non-journaling softdep.
4880 */
4881void
4882softdep_revert_mkdir(dp, ip)
4883	struct inode *dp;
4884	struct inode *ip;
4885{
4886	struct inodedep *inodedep;
4887	struct jaddref *jaddref;
4888	struct jaddref *dotaddref;
4889	struct vnode *dvp;
4890
4891	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4892	    ("softdep_revert_mkdir called on non-softdep filesystem"));
4893	dvp = ITOV(dp);
4894
4895	ACQUIRE_LOCK(dp->i_ump);
4896	inodedep = inodedep_lookup_ip(dp);
4897	if (DOINGSUJ(dvp)) {
4898		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4899		    inoreflst);
4900		KASSERT(jaddref->ja_parent == ip->i_number,
4901		    ("softdep_revert_mkdir: dotdot addref parent mismatch"));
4902		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4903	}
4904	inodedep = inodedep_lookup_ip(ip);
4905	if (DOINGSUJ(dvp)) {
4906		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4907		    inoreflst);
4908		KASSERT(jaddref->ja_parent == dp->i_number,
4909		    ("softdep_revert_mkdir: addref parent mismatch"));
4910		dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
4911		    inoreflst, if_deps);
4912		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4913		KASSERT(dotaddref->ja_parent == ip->i_number,
4914		    ("softdep_revert_mkdir: dot addref parent mismatch"));
4915		cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
4916	}
4917	FREE_LOCK(dp->i_ump);
4918}
4919
4920/*
4921 * Called to correct nlinkdelta after a failed rmdir.
4922 */
4923void
4924softdep_revert_rmdir(dp, ip)
4925	struct inode *dp;
4926	struct inode *ip;
4927{
4928
4929	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4930	    ("softdep_revert_rmdir called on non-softdep filesystem"));
4931	ACQUIRE_LOCK(dp->i_ump);
4932	(void) inodedep_lookup_ip(ip);
4933	(void) inodedep_lookup_ip(dp);
4934	FREE_LOCK(dp->i_ump);
4935}
4936
4937/*
4938 * Protecting the freemaps (or bitmaps).
4939 *
4940 * To eliminate the need to execute fsck before mounting a filesystem
4941 * after a power failure, one must (conservatively) guarantee that the
4942 * on-disk copy of the bitmaps never indicate that a live inode or block is
4943 * free.  So, when a block or inode is allocated, the bitmap should be
4944 * updated (on disk) before any new pointers.  When a block or inode is
4945 * freed, the bitmap should not be updated until all pointers have been
4946 * reset.  The latter dependency is handled by the delayed de-allocation
4947 * approach described below for block and inode de-allocation.  The former
4948 * dependency is handled by calling the following procedure when a block or
4949 * inode is allocated. When an inode is allocated an "inodedep" is created
4950 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
4951 * Each "inodedep" is also inserted into the hash indexing structure so
4952 * that any additional link additions can be made dependent on the inode
4953 * allocation.
4954 *
4955 * The ufs filesystem maintains a number of free block counts (e.g., per
4956 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
4957 * in addition to the bitmaps.  These counts are used to improve efficiency
4958 * during allocation and therefore must be consistent with the bitmaps.
4959 * There is no convenient way to guarantee post-crash consistency of these
4960 * counts with simple update ordering, for two main reasons: (1) The counts
4961 * and bitmaps for a single cylinder group block are not in the same disk
4962 * sector.  If a disk write is interrupted (e.g., by power failure), one may
4963 * be written and the other not.  (2) Some of the counts are located in the
4964 * superblock rather than the cylinder group block. So, we focus our soft
4965 * updates implementation on protecting the bitmaps. When mounting a
4966 * filesystem, we recompute the auxiliary counts from the bitmaps.
4967 */
4968
4969/*
4970 * Called just after updating the cylinder group block to allocate an inode.
4971 */
4972void
4973softdep_setup_inomapdep(bp, ip, newinum, mode)
4974	struct buf *bp;		/* buffer for cylgroup block with inode map */
4975	struct inode *ip;	/* inode related to allocation */
4976	ino_t newinum;		/* new inode number being allocated */
4977	int mode;
4978{
4979	struct inodedep *inodedep;
4980	struct bmsafemap *bmsafemap;
4981	struct jaddref *jaddref;
4982	struct mount *mp;
4983	struct fs *fs;
4984
4985	mp = UFSTOVFS(ip->i_ump);
4986	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
4987	    ("softdep_setup_inomapdep called on non-softdep filesystem"));
4988	fs = ip->i_ump->um_fs;
4989	jaddref = NULL;
4990
4991	/*
4992	 * Allocate the journal reference add structure so that the bitmap
4993	 * can be dependent on it.
4994	 */
4995	if (MOUNTEDSUJ(mp)) {
4996		jaddref = newjaddref(ip, newinum, 0, 0, mode);
4997		jaddref->ja_state |= NEWBLOCK;
4998	}
4999
5000	/*
5001	 * Create a dependency for the newly allocated inode.
5002	 * Panic if it already exists as something is seriously wrong.
5003	 * Otherwise add it to the dependency list for the buffer holding
5004	 * the cylinder group map from which it was allocated.
5005	 *
5006	 * We have to preallocate a bmsafemap entry in case it is needed
5007	 * in bmsafemap_lookup since once we allocate the inodedep, we
5008	 * have to finish initializing it before we can FREE_LOCK().
5009	 * By preallocating, we avoid FREE_LOCK() while doing a malloc
5010	 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
5011	 * creating the inodedep as it can be freed during the time
5012	 * that we FREE_LOCK() while allocating the inodedep. We must
5013	 * call workitem_alloc() before entering the locked section as
5014	 * it also acquires the lock and we must avoid trying doing so
5015	 * recursively.
5016	 */
5017	bmsafemap = malloc(sizeof(struct bmsafemap),
5018	    M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5019	workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5020	ACQUIRE_LOCK(ip->i_ump);
5021	if ((inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep)))
5022		panic("softdep_setup_inomapdep: dependency %p for new"
5023		    "inode already exists", inodedep);
5024	bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
5025	if (jaddref) {
5026		LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
5027		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
5028		    if_deps);
5029	} else {
5030		inodedep->id_state |= ONDEPLIST;
5031		LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
5032	}
5033	inodedep->id_bmsafemap = bmsafemap;
5034	inodedep->id_state &= ~DEPCOMPLETE;
5035	FREE_LOCK(ip->i_ump);
5036}
5037
5038/*
5039 * Called just after updating the cylinder group block to
5040 * allocate block or fragment.
5041 */
5042void
5043softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
5044	struct buf *bp;		/* buffer for cylgroup block with block map */
5045	struct mount *mp;	/* filesystem doing allocation */
5046	ufs2_daddr_t newblkno;	/* number of newly allocated block */
5047	int frags;		/* Number of fragments. */
5048	int oldfrags;		/* Previous number of fragments for extend. */
5049{
5050	struct newblk *newblk;
5051	struct bmsafemap *bmsafemap;
5052	struct jnewblk *jnewblk;
5053	struct ufsmount *ump;
5054	struct fs *fs;
5055
5056	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5057	    ("softdep_setup_blkmapdep called on non-softdep filesystem"));
5058	ump = VFSTOUFS(mp);
5059	fs = ump->um_fs;
5060	jnewblk = NULL;
5061	/*
5062	 * Create a dependency for the newly allocated block.
5063	 * Add it to the dependency list for the buffer holding
5064	 * the cylinder group map from which it was allocated.
5065	 */
5066	if (MOUNTEDSUJ(mp)) {
5067		jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
5068		workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
5069		jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
5070		jnewblk->jn_state = ATTACHED;
5071		jnewblk->jn_blkno = newblkno;
5072		jnewblk->jn_frags = frags;
5073		jnewblk->jn_oldfrags = oldfrags;
5074#ifdef SUJ_DEBUG
5075		{
5076			struct cg *cgp;
5077			uint8_t *blksfree;
5078			long bno;
5079			int i;
5080
5081			cgp = (struct cg *)bp->b_data;
5082			blksfree = cg_blksfree(cgp);
5083			bno = dtogd(fs, jnewblk->jn_blkno);
5084			for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
5085			    i++) {
5086				if (isset(blksfree, bno + i))
5087					panic("softdep_setup_blkmapdep: "
5088					    "free fragment %d from %d-%d "
5089					    "state 0x%X dep %p", i,
5090					    jnewblk->jn_oldfrags,
5091					    jnewblk->jn_frags,
5092					    jnewblk->jn_state,
5093					    jnewblk->jn_dep);
5094			}
5095		}
5096#endif
5097	}
5098
5099	CTR3(KTR_SUJ,
5100	    "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
5101	    newblkno, frags, oldfrags);
5102	ACQUIRE_LOCK(ump);
5103	if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
5104		panic("softdep_setup_blkmapdep: found block");
5105	newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
5106	    dtog(fs, newblkno), NULL);
5107	if (jnewblk) {
5108		jnewblk->jn_dep = (struct worklist *)newblk;
5109		LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
5110	} else {
5111		newblk->nb_state |= ONDEPLIST;
5112		LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
5113	}
5114	newblk->nb_bmsafemap = bmsafemap;
5115	newblk->nb_jnewblk = jnewblk;
5116	FREE_LOCK(ump);
5117}
5118
5119#define	BMSAFEMAP_HASH(ump, cg) \
5120      (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size])
5121
5122static int
5123bmsafemap_find(bmsafemaphd, cg, bmsafemapp)
5124	struct bmsafemap_hashhead *bmsafemaphd;
5125	int cg;
5126	struct bmsafemap **bmsafemapp;
5127{
5128	struct bmsafemap *bmsafemap;
5129
5130	LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
5131		if (bmsafemap->sm_cg == cg)
5132			break;
5133	if (bmsafemap) {
5134		*bmsafemapp = bmsafemap;
5135		return (1);
5136	}
5137	*bmsafemapp = NULL;
5138
5139	return (0);
5140}
5141
5142/*
5143 * Find the bmsafemap associated with a cylinder group buffer.
5144 * If none exists, create one. The buffer must be locked when
5145 * this routine is called and this routine must be called with
5146 * the softdep lock held. To avoid giving up the lock while
5147 * allocating a new bmsafemap, a preallocated bmsafemap may be
5148 * provided. If it is provided but not needed, it is freed.
5149 */
5150static struct bmsafemap *
5151bmsafemap_lookup(mp, bp, cg, newbmsafemap)
5152	struct mount *mp;
5153	struct buf *bp;
5154	int cg;
5155	struct bmsafemap *newbmsafemap;
5156{
5157	struct bmsafemap_hashhead *bmsafemaphd;
5158	struct bmsafemap *bmsafemap, *collision;
5159	struct worklist *wk;
5160	struct ufsmount *ump;
5161
5162	ump = VFSTOUFS(mp);
5163	LOCK_OWNED(ump);
5164	KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
5165	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5166		if (wk->wk_type == D_BMSAFEMAP) {
5167			if (newbmsafemap)
5168				WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5169			return (WK_BMSAFEMAP(wk));
5170		}
5171	}
5172	bmsafemaphd = BMSAFEMAP_HASH(ump, cg);
5173	if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) {
5174		if (newbmsafemap)
5175			WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5176		return (bmsafemap);
5177	}
5178	if (newbmsafemap) {
5179		bmsafemap = newbmsafemap;
5180	} else {
5181		FREE_LOCK(ump);
5182		bmsafemap = malloc(sizeof(struct bmsafemap),
5183			M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5184		workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5185		ACQUIRE_LOCK(ump);
5186	}
5187	bmsafemap->sm_buf = bp;
5188	LIST_INIT(&bmsafemap->sm_inodedephd);
5189	LIST_INIT(&bmsafemap->sm_inodedepwr);
5190	LIST_INIT(&bmsafemap->sm_newblkhd);
5191	LIST_INIT(&bmsafemap->sm_newblkwr);
5192	LIST_INIT(&bmsafemap->sm_jaddrefhd);
5193	LIST_INIT(&bmsafemap->sm_jnewblkhd);
5194	LIST_INIT(&bmsafemap->sm_freehd);
5195	LIST_INIT(&bmsafemap->sm_freewr);
5196	if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) {
5197		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5198		return (collision);
5199	}
5200	bmsafemap->sm_cg = cg;
5201	LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5202	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
5203	WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5204	return (bmsafemap);
5205}
5206
5207/*
5208 * Direct block allocation dependencies.
5209 *
5210 * When a new block is allocated, the corresponding disk locations must be
5211 * initialized (with zeros or new data) before the on-disk inode points to
5212 * them.  Also, the freemap from which the block was allocated must be
5213 * updated (on disk) before the inode's pointer. These two dependencies are
5214 * independent of each other and are needed for all file blocks and indirect
5215 * blocks that are pointed to directly by the inode.  Just before the
5216 * "in-core" version of the inode is updated with a newly allocated block
5217 * number, a procedure (below) is called to setup allocation dependency
5218 * structures.  These structures are removed when the corresponding
5219 * dependencies are satisfied or when the block allocation becomes obsolete
5220 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5221 * fragment that gets upgraded).  All of these cases are handled in
5222 * procedures described later.
5223 *
5224 * When a file extension causes a fragment to be upgraded, either to a larger
5225 * fragment or to a full block, the on-disk location may change (if the
5226 * previous fragment could not simply be extended). In this case, the old
5227 * fragment must be de-allocated, but not until after the inode's pointer has
5228 * been updated. In most cases, this is handled by later procedures, which
5229 * will construct a "freefrag" structure to be added to the workitem queue
5230 * when the inode update is complete (or obsolete).  The main exception to
5231 * this is when an allocation occurs while a pending allocation dependency
5232 * (for the same block pointer) remains.  This case is handled in the main
5233 * allocation dependency setup procedure by immediately freeing the
5234 * unreferenced fragments.
5235 */
5236void
5237softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5238	struct inode *ip;	/* inode to which block is being added */
5239	ufs_lbn_t off;		/* block pointer within inode */
5240	ufs2_daddr_t newblkno;	/* disk block number being added */
5241	ufs2_daddr_t oldblkno;	/* previous block number, 0 unless frag */
5242	long newsize;		/* size of new block */
5243	long oldsize;		/* size of new block */
5244	struct buf *bp;		/* bp for allocated block */
5245{
5246	struct allocdirect *adp, *oldadp;
5247	struct allocdirectlst *adphead;
5248	struct freefrag *freefrag;
5249	struct inodedep *inodedep;
5250	struct pagedep *pagedep;
5251	struct jnewblk *jnewblk;
5252	struct newblk *newblk;
5253	struct mount *mp;
5254	ufs_lbn_t lbn;
5255
5256	lbn = bp->b_lblkno;
5257	mp = UFSTOVFS(ip->i_ump);
5258	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5259	    ("softdep_setup_allocdirect called on non-softdep filesystem"));
5260	if (oldblkno && oldblkno != newblkno)
5261		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5262	else
5263		freefrag = NULL;
5264
5265	CTR6(KTR_SUJ,
5266	    "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5267	    "off %jd newsize %ld oldsize %d",
5268	    ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5269	ACQUIRE_LOCK(ip->i_ump);
5270	if (off >= NDADDR) {
5271		if (lbn > 0)
5272			panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5273			    lbn, off);
5274		/* allocating an indirect block */
5275		if (oldblkno != 0)
5276			panic("softdep_setup_allocdirect: non-zero indir");
5277	} else {
5278		if (off != lbn)
5279			panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5280			    lbn, off);
5281		/*
5282		 * Allocating a direct block.
5283		 *
5284		 * If we are allocating a directory block, then we must
5285		 * allocate an associated pagedep to track additions and
5286		 * deletions.
5287		 */
5288		if ((ip->i_mode & IFMT) == IFDIR)
5289			pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5290			    &pagedep);
5291	}
5292	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5293		panic("softdep_setup_allocdirect: lost block");
5294	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5295	    ("softdep_setup_allocdirect: newblk already initialized"));
5296	/*
5297	 * Convert the newblk to an allocdirect.
5298	 */
5299	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5300	adp = (struct allocdirect *)newblk;
5301	newblk->nb_freefrag = freefrag;
5302	adp->ad_offset = off;
5303	adp->ad_oldblkno = oldblkno;
5304	adp->ad_newsize = newsize;
5305	adp->ad_oldsize = oldsize;
5306
5307	/*
5308	 * Finish initializing the journal.
5309	 */
5310	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5311		jnewblk->jn_ino = ip->i_number;
5312		jnewblk->jn_lbn = lbn;
5313		add_to_journal(&jnewblk->jn_list);
5314	}
5315	if (freefrag && freefrag->ff_jdep != NULL &&
5316	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5317		add_to_journal(freefrag->ff_jdep);
5318	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5319	adp->ad_inodedep = inodedep;
5320
5321	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5322	/*
5323	 * The list of allocdirects must be kept in sorted and ascending
5324	 * order so that the rollback routines can quickly determine the
5325	 * first uncommitted block (the size of the file stored on disk
5326	 * ends at the end of the lowest committed fragment, or if there
5327	 * are no fragments, at the end of the highest committed block).
5328	 * Since files generally grow, the typical case is that the new
5329	 * block is to be added at the end of the list. We speed this
5330	 * special case by checking against the last allocdirect in the
5331	 * list before laboriously traversing the list looking for the
5332	 * insertion point.
5333	 */
5334	adphead = &inodedep->id_newinoupdt;
5335	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5336	if (oldadp == NULL || oldadp->ad_offset <= off) {
5337		/* insert at end of list */
5338		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5339		if (oldadp != NULL && oldadp->ad_offset == off)
5340			allocdirect_merge(adphead, adp, oldadp);
5341		FREE_LOCK(ip->i_ump);
5342		return;
5343	}
5344	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5345		if (oldadp->ad_offset >= off)
5346			break;
5347	}
5348	if (oldadp == NULL)
5349		panic("softdep_setup_allocdirect: lost entry");
5350	/* insert in middle of list */
5351	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5352	if (oldadp->ad_offset == off)
5353		allocdirect_merge(adphead, adp, oldadp);
5354
5355	FREE_LOCK(ip->i_ump);
5356}
5357
5358/*
5359 * Merge a newer and older journal record to be stored either in a
5360 * newblock or freefrag.  This handles aggregating journal records for
5361 * fragment allocation into a second record as well as replacing a
5362 * journal free with an aborted journal allocation.  A segment for the
5363 * oldest record will be placed on wkhd if it has been written.  If not
5364 * the segment for the newer record will suffice.
5365 */
5366static struct worklist *
5367jnewblk_merge(new, old, wkhd)
5368	struct worklist *new;
5369	struct worklist *old;
5370	struct workhead *wkhd;
5371{
5372	struct jnewblk *njnewblk;
5373	struct jnewblk *jnewblk;
5374
5375	/* Handle NULLs to simplify callers. */
5376	if (new == NULL)
5377		return (old);
5378	if (old == NULL)
5379		return (new);
5380	/* Replace a jfreefrag with a jnewblk. */
5381	if (new->wk_type == D_JFREEFRAG) {
5382		if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5383			panic("jnewblk_merge: blkno mismatch: %p, %p",
5384			    old, new);
5385		cancel_jfreefrag(WK_JFREEFRAG(new));
5386		return (old);
5387	}
5388	if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5389		panic("jnewblk_merge: Bad type: old %d new %d\n",
5390		    old->wk_type, new->wk_type);
5391	/*
5392	 * Handle merging of two jnewblk records that describe
5393	 * different sets of fragments in the same block.
5394	 */
5395	jnewblk = WK_JNEWBLK(old);
5396	njnewblk = WK_JNEWBLK(new);
5397	if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5398		panic("jnewblk_merge: Merging disparate blocks.");
5399	/*
5400	 * The record may be rolled back in the cg.
5401	 */
5402	if (jnewblk->jn_state & UNDONE) {
5403		jnewblk->jn_state &= ~UNDONE;
5404		njnewblk->jn_state |= UNDONE;
5405		njnewblk->jn_state &= ~ATTACHED;
5406	}
5407	/*
5408	 * We modify the newer addref and free the older so that if neither
5409	 * has been written the most up-to-date copy will be on disk.  If
5410	 * both have been written but rolled back we only temporarily need
5411	 * one of them to fix the bits when the cg write completes.
5412	 */
5413	jnewblk->jn_state |= ATTACHED | COMPLETE;
5414	njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5415	cancel_jnewblk(jnewblk, wkhd);
5416	WORKLIST_REMOVE(&jnewblk->jn_list);
5417	free_jnewblk(jnewblk);
5418	return (new);
5419}
5420
5421/*
5422 * Replace an old allocdirect dependency with a newer one.
5423 * This routine must be called with splbio interrupts blocked.
5424 */
5425static void
5426allocdirect_merge(adphead, newadp, oldadp)
5427	struct allocdirectlst *adphead;	/* head of list holding allocdirects */
5428	struct allocdirect *newadp;	/* allocdirect being added */
5429	struct allocdirect *oldadp;	/* existing allocdirect being checked */
5430{
5431	struct worklist *wk;
5432	struct freefrag *freefrag;
5433
5434	freefrag = NULL;
5435	LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp));
5436	if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5437	    newadp->ad_oldsize != oldadp->ad_newsize ||
5438	    newadp->ad_offset >= NDADDR)
5439		panic("%s %jd != new %jd || old size %ld != new %ld",
5440		    "allocdirect_merge: old blkno",
5441		    (intmax_t)newadp->ad_oldblkno,
5442		    (intmax_t)oldadp->ad_newblkno,
5443		    newadp->ad_oldsize, oldadp->ad_newsize);
5444	newadp->ad_oldblkno = oldadp->ad_oldblkno;
5445	newadp->ad_oldsize = oldadp->ad_oldsize;
5446	/*
5447	 * If the old dependency had a fragment to free or had never
5448	 * previously had a block allocated, then the new dependency
5449	 * can immediately post its freefrag and adopt the old freefrag.
5450	 * This action is done by swapping the freefrag dependencies.
5451	 * The new dependency gains the old one's freefrag, and the
5452	 * old one gets the new one and then immediately puts it on
5453	 * the worklist when it is freed by free_newblk. It is
5454	 * not possible to do this swap when the old dependency had a
5455	 * non-zero size but no previous fragment to free. This condition
5456	 * arises when the new block is an extension of the old block.
5457	 * Here, the first part of the fragment allocated to the new
5458	 * dependency is part of the block currently claimed on disk by
5459	 * the old dependency, so cannot legitimately be freed until the
5460	 * conditions for the new dependency are fulfilled.
5461	 */
5462	freefrag = newadp->ad_freefrag;
5463	if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5464		newadp->ad_freefrag = oldadp->ad_freefrag;
5465		oldadp->ad_freefrag = freefrag;
5466	}
5467	/*
5468	 * If we are tracking a new directory-block allocation,
5469	 * move it from the old allocdirect to the new allocdirect.
5470	 */
5471	if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5472		WORKLIST_REMOVE(wk);
5473		if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5474			panic("allocdirect_merge: extra newdirblk");
5475		WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5476	}
5477	TAILQ_REMOVE(adphead, oldadp, ad_next);
5478	/*
5479	 * We need to move any journal dependencies over to the freefrag
5480	 * that releases this block if it exists.  Otherwise we are
5481	 * extending an existing block and we'll wait until that is
5482	 * complete to release the journal space and extend the
5483	 * new journal to cover this old space as well.
5484	 */
5485	if (freefrag == NULL) {
5486		if (oldadp->ad_newblkno != newadp->ad_newblkno)
5487			panic("allocdirect_merge: %jd != %jd",
5488			    oldadp->ad_newblkno, newadp->ad_newblkno);
5489		newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5490		    jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5491		    &oldadp->ad_block.nb_jnewblk->jn_list,
5492		    &newadp->ad_block.nb_jwork);
5493		oldadp->ad_block.nb_jnewblk = NULL;
5494		cancel_newblk(&oldadp->ad_block, NULL,
5495		    &newadp->ad_block.nb_jwork);
5496	} else {
5497		wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5498		    &freefrag->ff_list, &freefrag->ff_jwork);
5499		freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5500		    &freefrag->ff_jwork);
5501	}
5502	free_newblk(&oldadp->ad_block);
5503}
5504
5505/*
5506 * Allocate a jfreefrag structure to journal a single block free.
5507 */
5508static struct jfreefrag *
5509newjfreefrag(freefrag, ip, blkno, size, lbn)
5510	struct freefrag *freefrag;
5511	struct inode *ip;
5512	ufs2_daddr_t blkno;
5513	long size;
5514	ufs_lbn_t lbn;
5515{
5516	struct jfreefrag *jfreefrag;
5517	struct fs *fs;
5518
5519	fs = ip->i_fs;
5520	jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5521	    M_SOFTDEP_FLAGS);
5522	workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump));
5523	jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5524	jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5525	jfreefrag->fr_ino = ip->i_number;
5526	jfreefrag->fr_lbn = lbn;
5527	jfreefrag->fr_blkno = blkno;
5528	jfreefrag->fr_frags = numfrags(fs, size);
5529	jfreefrag->fr_freefrag = freefrag;
5530
5531	return (jfreefrag);
5532}
5533
5534/*
5535 * Allocate a new freefrag structure.
5536 */
5537static struct freefrag *
5538newfreefrag(ip, blkno, size, lbn)
5539	struct inode *ip;
5540	ufs2_daddr_t blkno;
5541	long size;
5542	ufs_lbn_t lbn;
5543{
5544	struct freefrag *freefrag;
5545	struct fs *fs;
5546
5547	CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5548	    ip->i_number, blkno, size, lbn);
5549	fs = ip->i_fs;
5550	if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5551		panic("newfreefrag: frag size");
5552	freefrag = malloc(sizeof(struct freefrag),
5553	    M_FREEFRAG, M_SOFTDEP_FLAGS);
5554	workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump));
5555	freefrag->ff_state = ATTACHED;
5556	LIST_INIT(&freefrag->ff_jwork);
5557	freefrag->ff_inum = ip->i_number;
5558	freefrag->ff_vtype = ITOV(ip)->v_type;
5559	freefrag->ff_blkno = blkno;
5560	freefrag->ff_fragsize = size;
5561
5562	if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) {
5563		freefrag->ff_jdep = (struct worklist *)
5564		    newjfreefrag(freefrag, ip, blkno, size, lbn);
5565	} else {
5566		freefrag->ff_state |= DEPCOMPLETE;
5567		freefrag->ff_jdep = NULL;
5568	}
5569
5570	return (freefrag);
5571}
5572
5573/*
5574 * This workitem de-allocates fragments that were replaced during
5575 * file block allocation.
5576 */
5577static void
5578handle_workitem_freefrag(freefrag)
5579	struct freefrag *freefrag;
5580{
5581	struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5582	struct workhead wkhd;
5583
5584	CTR3(KTR_SUJ,
5585	    "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5586	    freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5587	/*
5588	 * It would be illegal to add new completion items to the
5589	 * freefrag after it was schedule to be done so it must be
5590	 * safe to modify the list head here.
5591	 */
5592	LIST_INIT(&wkhd);
5593	ACQUIRE_LOCK(ump);
5594	LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5595	/*
5596	 * If the journal has not been written we must cancel it here.
5597	 */
5598	if (freefrag->ff_jdep) {
5599		if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5600			panic("handle_workitem_freefrag: Unexpected type %d\n",
5601			    freefrag->ff_jdep->wk_type);
5602		cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5603	}
5604	FREE_LOCK(ump);
5605	ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5606	   freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd);
5607	ACQUIRE_LOCK(ump);
5608	WORKITEM_FREE(freefrag, D_FREEFRAG);
5609	FREE_LOCK(ump);
5610}
5611
5612/*
5613 * Set up a dependency structure for an external attributes data block.
5614 * This routine follows much of the structure of softdep_setup_allocdirect.
5615 * See the description of softdep_setup_allocdirect above for details.
5616 */
5617void
5618softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5619	struct inode *ip;
5620	ufs_lbn_t off;
5621	ufs2_daddr_t newblkno;
5622	ufs2_daddr_t oldblkno;
5623	long newsize;
5624	long oldsize;
5625	struct buf *bp;
5626{
5627	struct allocdirect *adp, *oldadp;
5628	struct allocdirectlst *adphead;
5629	struct freefrag *freefrag;
5630	struct inodedep *inodedep;
5631	struct jnewblk *jnewblk;
5632	struct newblk *newblk;
5633	struct mount *mp;
5634	ufs_lbn_t lbn;
5635
5636	mp = UFSTOVFS(ip->i_ump);
5637	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5638	    ("softdep_setup_allocext called on non-softdep filesystem"));
5639	KASSERT(off < NXADDR, ("softdep_setup_allocext: lbn %lld > NXADDR",
5640		    (long long)off));
5641
5642	lbn = bp->b_lblkno;
5643	if (oldblkno && oldblkno != newblkno)
5644		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5645	else
5646		freefrag = NULL;
5647
5648	ACQUIRE_LOCK(ip->i_ump);
5649	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5650		panic("softdep_setup_allocext: lost block");
5651	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5652	    ("softdep_setup_allocext: newblk already initialized"));
5653	/*
5654	 * Convert the newblk to an allocdirect.
5655	 */
5656	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5657	adp = (struct allocdirect *)newblk;
5658	newblk->nb_freefrag = freefrag;
5659	adp->ad_offset = off;
5660	adp->ad_oldblkno = oldblkno;
5661	adp->ad_newsize = newsize;
5662	adp->ad_oldsize = oldsize;
5663	adp->ad_state |=  EXTDATA;
5664
5665	/*
5666	 * Finish initializing the journal.
5667	 */
5668	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5669		jnewblk->jn_ino = ip->i_number;
5670		jnewblk->jn_lbn = lbn;
5671		add_to_journal(&jnewblk->jn_list);
5672	}
5673	if (freefrag && freefrag->ff_jdep != NULL &&
5674	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5675		add_to_journal(freefrag->ff_jdep);
5676	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5677	adp->ad_inodedep = inodedep;
5678
5679	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5680	/*
5681	 * The list of allocdirects must be kept in sorted and ascending
5682	 * order so that the rollback routines can quickly determine the
5683	 * first uncommitted block (the size of the file stored on disk
5684	 * ends at the end of the lowest committed fragment, or if there
5685	 * are no fragments, at the end of the highest committed block).
5686	 * Since files generally grow, the typical case is that the new
5687	 * block is to be added at the end of the list. We speed this
5688	 * special case by checking against the last allocdirect in the
5689	 * list before laboriously traversing the list looking for the
5690	 * insertion point.
5691	 */
5692	adphead = &inodedep->id_newextupdt;
5693	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5694	if (oldadp == NULL || oldadp->ad_offset <= off) {
5695		/* insert at end of list */
5696		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5697		if (oldadp != NULL && oldadp->ad_offset == off)
5698			allocdirect_merge(adphead, adp, oldadp);
5699		FREE_LOCK(ip->i_ump);
5700		return;
5701	}
5702	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5703		if (oldadp->ad_offset >= off)
5704			break;
5705	}
5706	if (oldadp == NULL)
5707		panic("softdep_setup_allocext: lost entry");
5708	/* insert in middle of list */
5709	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5710	if (oldadp->ad_offset == off)
5711		allocdirect_merge(adphead, adp, oldadp);
5712	FREE_LOCK(ip->i_ump);
5713}
5714
5715/*
5716 * Indirect block allocation dependencies.
5717 *
5718 * The same dependencies that exist for a direct block also exist when
5719 * a new block is allocated and pointed to by an entry in a block of
5720 * indirect pointers. The undo/redo states described above are also
5721 * used here. Because an indirect block contains many pointers that
5722 * may have dependencies, a second copy of the entire in-memory indirect
5723 * block is kept. The buffer cache copy is always completely up-to-date.
5724 * The second copy, which is used only as a source for disk writes,
5725 * contains only the safe pointers (i.e., those that have no remaining
5726 * update dependencies). The second copy is freed when all pointers
5727 * are safe. The cache is not allowed to replace indirect blocks with
5728 * pending update dependencies. If a buffer containing an indirect
5729 * block with dependencies is written, these routines will mark it
5730 * dirty again. It can only be successfully written once all the
5731 * dependencies are removed. The ffs_fsync routine in conjunction with
5732 * softdep_sync_metadata work together to get all the dependencies
5733 * removed so that a file can be successfully written to disk. Three
5734 * procedures are used when setting up indirect block pointer
5735 * dependencies. The division is necessary because of the organization
5736 * of the "balloc" routine and because of the distinction between file
5737 * pages and file metadata blocks.
5738 */
5739
5740/*
5741 * Allocate a new allocindir structure.
5742 */
5743static struct allocindir *
5744newallocindir(ip, ptrno, newblkno, oldblkno, lbn)
5745	struct inode *ip;	/* inode for file being extended */
5746	int ptrno;		/* offset of pointer in indirect block */
5747	ufs2_daddr_t newblkno;	/* disk block number being added */
5748	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5749	ufs_lbn_t lbn;
5750{
5751	struct newblk *newblk;
5752	struct allocindir *aip;
5753	struct freefrag *freefrag;
5754	struct jnewblk *jnewblk;
5755
5756	if (oldblkno)
5757		freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn);
5758	else
5759		freefrag = NULL;
5760	ACQUIRE_LOCK(ip->i_ump);
5761	if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0)
5762		panic("new_allocindir: lost block");
5763	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5764	    ("newallocindir: newblk already initialized"));
5765	WORKITEM_REASSIGN(newblk, D_ALLOCINDIR);
5766	newblk->nb_freefrag = freefrag;
5767	aip = (struct allocindir *)newblk;
5768	aip->ai_offset = ptrno;
5769	aip->ai_oldblkno = oldblkno;
5770	aip->ai_lbn = lbn;
5771	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5772		jnewblk->jn_ino = ip->i_number;
5773		jnewblk->jn_lbn = lbn;
5774		add_to_journal(&jnewblk->jn_list);
5775	}
5776	if (freefrag && freefrag->ff_jdep != NULL &&
5777	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5778		add_to_journal(freefrag->ff_jdep);
5779	return (aip);
5780}
5781
5782/*
5783 * Called just before setting an indirect block pointer
5784 * to a newly allocated file page.
5785 */
5786void
5787softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
5788	struct inode *ip;	/* inode for file being extended */
5789	ufs_lbn_t lbn;		/* allocated block number within file */
5790	struct buf *bp;		/* buffer with indirect blk referencing page */
5791	int ptrno;		/* offset of pointer in indirect block */
5792	ufs2_daddr_t newblkno;	/* disk block number being added */
5793	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5794	struct buf *nbp;	/* buffer holding allocated page */
5795{
5796	struct inodedep *inodedep;
5797	struct freefrag *freefrag;
5798	struct allocindir *aip;
5799	struct pagedep *pagedep;
5800	struct mount *mp;
5801	int dflags;
5802
5803	mp = UFSTOVFS(ip->i_ump);
5804	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5805	    ("softdep_setup_allocindir_page called on non-softdep filesystem"));
5806	KASSERT(lbn == nbp->b_lblkno,
5807	    ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
5808	    lbn, bp->b_lblkno));
5809	CTR4(KTR_SUJ,
5810	    "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
5811	    "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
5812	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
5813	aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
5814	dflags = DEPALLOC;
5815	if (IS_SNAPSHOT(ip))
5816		dflags |= NODELAY;
5817	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
5818	/*
5819	 * If we are allocating a directory page, then we must
5820	 * allocate an associated pagedep to track additions and
5821	 * deletions.
5822	 */
5823	if ((ip->i_mode & IFMT) == IFDIR)
5824		pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
5825	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5826	freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
5827	FREE_LOCK(ip->i_ump);
5828	if (freefrag)
5829		handle_workitem_freefrag(freefrag);
5830}
5831
5832/*
5833 * Called just before setting an indirect block pointer to a
5834 * newly allocated indirect block.
5835 */
5836void
5837softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
5838	struct buf *nbp;	/* newly allocated indirect block */
5839	struct inode *ip;	/* inode for file being extended */
5840	struct buf *bp;		/* indirect block referencing allocated block */
5841	int ptrno;		/* offset of pointer in indirect block */
5842	ufs2_daddr_t newblkno;	/* disk block number being added */
5843{
5844	struct inodedep *inodedep;
5845	struct allocindir *aip;
5846	ufs_lbn_t lbn;
5847	int dflags;
5848
5849	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
5850	    ("softdep_setup_allocindir_meta called on non-softdep filesystem"));
5851	CTR3(KTR_SUJ,
5852	    "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
5853	    ip->i_number, newblkno, ptrno);
5854	lbn = nbp->b_lblkno;
5855	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
5856	aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
5857	dflags = DEPALLOC;
5858	if (IS_SNAPSHOT(ip))
5859		dflags |= NODELAY;
5860	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
5861	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5862	if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
5863		panic("softdep_setup_allocindir_meta: Block already existed");
5864	FREE_LOCK(ip->i_ump);
5865}
5866
5867static void
5868indirdep_complete(indirdep)
5869	struct indirdep *indirdep;
5870{
5871	struct allocindir *aip;
5872
5873	LIST_REMOVE(indirdep, ir_next);
5874	indirdep->ir_state |= DEPCOMPLETE;
5875
5876	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
5877		LIST_REMOVE(aip, ai_next);
5878		free_newblk(&aip->ai_block);
5879	}
5880	/*
5881	 * If this indirdep is not attached to a buf it was simply waiting
5882	 * on completion to clear completehd.  free_indirdep() asserts
5883	 * that nothing is dangling.
5884	 */
5885	if ((indirdep->ir_state & ONWORKLIST) == 0)
5886		free_indirdep(indirdep);
5887}
5888
5889static struct indirdep *
5890indirdep_lookup(mp, ip, bp)
5891	struct mount *mp;
5892	struct inode *ip;
5893	struct buf *bp;
5894{
5895	struct indirdep *indirdep, *newindirdep;
5896	struct newblk *newblk;
5897	struct ufsmount *ump;
5898	struct worklist *wk;
5899	struct fs *fs;
5900	ufs2_daddr_t blkno;
5901
5902	ump = VFSTOUFS(mp);
5903	LOCK_OWNED(ump);
5904	indirdep = NULL;
5905	newindirdep = NULL;
5906	fs = ip->i_fs;
5907	for (;;) {
5908		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5909			if (wk->wk_type != D_INDIRDEP)
5910				continue;
5911			indirdep = WK_INDIRDEP(wk);
5912			break;
5913		}
5914		/* Found on the buffer worklist, no new structure to free. */
5915		if (indirdep != NULL && newindirdep == NULL)
5916			return (indirdep);
5917		if (indirdep != NULL && newindirdep != NULL)
5918			panic("indirdep_lookup: simultaneous create");
5919		/* None found on the buffer and a new structure is ready. */
5920		if (indirdep == NULL && newindirdep != NULL)
5921			break;
5922		/* None found and no new structure available. */
5923		FREE_LOCK(ump);
5924		newindirdep = malloc(sizeof(struct indirdep),
5925		    M_INDIRDEP, M_SOFTDEP_FLAGS);
5926		workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
5927		newindirdep->ir_state = ATTACHED;
5928		if (ip->i_ump->um_fstype == UFS1)
5929			newindirdep->ir_state |= UFS1FMT;
5930		TAILQ_INIT(&newindirdep->ir_trunc);
5931		newindirdep->ir_saveddata = NULL;
5932		LIST_INIT(&newindirdep->ir_deplisthd);
5933		LIST_INIT(&newindirdep->ir_donehd);
5934		LIST_INIT(&newindirdep->ir_writehd);
5935		LIST_INIT(&newindirdep->ir_completehd);
5936		if (bp->b_blkno == bp->b_lblkno) {
5937			ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
5938			    NULL, NULL);
5939			bp->b_blkno = blkno;
5940		}
5941		newindirdep->ir_freeblks = NULL;
5942		newindirdep->ir_savebp =
5943		    getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
5944		newindirdep->ir_bp = bp;
5945		BUF_KERNPROC(newindirdep->ir_savebp);
5946		bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
5947		ACQUIRE_LOCK(ump);
5948	}
5949	indirdep = newindirdep;
5950	WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
5951	/*
5952	 * If the block is not yet allocated we don't set DEPCOMPLETE so
5953	 * that we don't free dependencies until the pointers are valid.
5954	 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
5955	 * than using the hash.
5956	 */
5957	if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
5958		LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
5959	else
5960		indirdep->ir_state |= DEPCOMPLETE;
5961	return (indirdep);
5962}
5963
5964/*
5965 * Called to finish the allocation of the "aip" allocated
5966 * by one of the two routines above.
5967 */
5968static struct freefrag *
5969setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)
5970	struct buf *bp;		/* in-memory copy of the indirect block */
5971	struct inode *ip;	/* inode for file being extended */
5972	struct inodedep *inodedep; /* Inodedep for ip */
5973	struct allocindir *aip;	/* allocindir allocated by the above routines */
5974	ufs_lbn_t lbn;		/* Logical block number for this block. */
5975{
5976	struct fs *fs;
5977	struct indirdep *indirdep;
5978	struct allocindir *oldaip;
5979	struct freefrag *freefrag;
5980	struct mount *mp;
5981
5982	LOCK_OWNED(ip->i_ump);
5983	mp = UFSTOVFS(ip->i_ump);
5984	fs = ip->i_fs;
5985	if (bp->b_lblkno >= 0)
5986		panic("setup_allocindir_phase2: not indir blk");
5987	KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
5988	    ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
5989	indirdep = indirdep_lookup(mp, ip, bp);
5990	KASSERT(indirdep->ir_savebp != NULL,
5991	    ("setup_allocindir_phase2 NULL ir_savebp"));
5992	aip->ai_indirdep = indirdep;
5993	/*
5994	 * Check for an unwritten dependency for this indirect offset.  If
5995	 * there is, merge the old dependency into the new one.  This happens
5996	 * as a result of reallocblk only.
5997	 */
5998	freefrag = NULL;
5999	if (aip->ai_oldblkno != 0) {
6000		LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
6001			if (oldaip->ai_offset == aip->ai_offset) {
6002				freefrag = allocindir_merge(aip, oldaip);
6003				goto done;
6004			}
6005		}
6006		LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
6007			if (oldaip->ai_offset == aip->ai_offset) {
6008				freefrag = allocindir_merge(aip, oldaip);
6009				goto done;
6010			}
6011		}
6012	}
6013done:
6014	LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
6015	return (freefrag);
6016}
6017
6018/*
6019 * Merge two allocindirs which refer to the same block.  Move newblock
6020 * dependencies and setup the freefrags appropriately.
6021 */
6022static struct freefrag *
6023allocindir_merge(aip, oldaip)
6024	struct allocindir *aip;
6025	struct allocindir *oldaip;
6026{
6027	struct freefrag *freefrag;
6028	struct worklist *wk;
6029
6030	if (oldaip->ai_newblkno != aip->ai_oldblkno)
6031		panic("allocindir_merge: blkno");
6032	aip->ai_oldblkno = oldaip->ai_oldblkno;
6033	freefrag = aip->ai_freefrag;
6034	aip->ai_freefrag = oldaip->ai_freefrag;
6035	oldaip->ai_freefrag = NULL;
6036	KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
6037	/*
6038	 * If we are tracking a new directory-block allocation,
6039	 * move it from the old allocindir to the new allocindir.
6040	 */
6041	if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
6042		WORKLIST_REMOVE(wk);
6043		if (!LIST_EMPTY(&oldaip->ai_newdirblk))
6044			panic("allocindir_merge: extra newdirblk");
6045		WORKLIST_INSERT(&aip->ai_newdirblk, wk);
6046	}
6047	/*
6048	 * We can skip journaling for this freefrag and just complete
6049	 * any pending journal work for the allocindir that is being
6050	 * removed after the freefrag completes.
6051	 */
6052	if (freefrag->ff_jdep)
6053		cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
6054	LIST_REMOVE(oldaip, ai_next);
6055	freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
6056	    &freefrag->ff_list, &freefrag->ff_jwork);
6057	free_newblk(&oldaip->ai_block);
6058
6059	return (freefrag);
6060}
6061
6062static inline void
6063setup_freedirect(freeblks, ip, i, needj)
6064	struct freeblks *freeblks;
6065	struct inode *ip;
6066	int i;
6067	int needj;
6068{
6069	ufs2_daddr_t blkno;
6070	int frags;
6071
6072	blkno = DIP(ip, i_db[i]);
6073	if (blkno == 0)
6074		return;
6075	DIP_SET(ip, i_db[i], 0);
6076	frags = sblksize(ip->i_fs, ip->i_size, i);
6077	frags = numfrags(ip->i_fs, frags);
6078	newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj);
6079}
6080
6081static inline void
6082setup_freeext(freeblks, ip, i, needj)
6083	struct freeblks *freeblks;
6084	struct inode *ip;
6085	int i;
6086	int needj;
6087{
6088	ufs2_daddr_t blkno;
6089	int frags;
6090
6091	blkno = ip->i_din2->di_extb[i];
6092	if (blkno == 0)
6093		return;
6094	ip->i_din2->di_extb[i] = 0;
6095	frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i);
6096	frags = numfrags(ip->i_fs, frags);
6097	newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
6098}
6099
6100static inline void
6101setup_freeindir(freeblks, ip, i, lbn, needj)
6102	struct freeblks *freeblks;
6103	struct inode *ip;
6104	int i;
6105	ufs_lbn_t lbn;
6106	int needj;
6107{
6108	ufs2_daddr_t blkno;
6109
6110	blkno = DIP(ip, i_ib[i]);
6111	if (blkno == 0)
6112		return;
6113	DIP_SET(ip, i_ib[i], 0);
6114	newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag,
6115	    0, needj);
6116}
6117
6118static inline struct freeblks *
6119newfreeblks(mp, ip)
6120	struct mount *mp;
6121	struct inode *ip;
6122{
6123	struct freeblks *freeblks;
6124
6125	freeblks = malloc(sizeof(struct freeblks),
6126		M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
6127	workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
6128	LIST_INIT(&freeblks->fb_jblkdephd);
6129	LIST_INIT(&freeblks->fb_jwork);
6130	freeblks->fb_ref = 0;
6131	freeblks->fb_cgwait = 0;
6132	freeblks->fb_state = ATTACHED;
6133	freeblks->fb_uid = ip->i_uid;
6134	freeblks->fb_inum = ip->i_number;
6135	freeblks->fb_vtype = ITOV(ip)->v_type;
6136	freeblks->fb_modrev = DIP(ip, i_modrev);
6137	freeblks->fb_devvp = ip->i_devvp;
6138	freeblks->fb_chkcnt = 0;
6139	freeblks->fb_len = 0;
6140
6141	return (freeblks);
6142}
6143
6144static void
6145trunc_indirdep(indirdep, freeblks, bp, off)
6146	struct indirdep *indirdep;
6147	struct freeblks *freeblks;
6148	struct buf *bp;
6149	int off;
6150{
6151	struct allocindir *aip, *aipn;
6152
6153	/*
6154	 * The first set of allocindirs won't be in savedbp.
6155	 */
6156	LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
6157		if (aip->ai_offset > off)
6158			cancel_allocindir(aip, bp, freeblks, 1);
6159	LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
6160		if (aip->ai_offset > off)
6161			cancel_allocindir(aip, bp, freeblks, 1);
6162	/*
6163	 * These will exist in savedbp.
6164	 */
6165	LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
6166		if (aip->ai_offset > off)
6167			cancel_allocindir(aip, NULL, freeblks, 0);
6168	LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
6169		if (aip->ai_offset > off)
6170			cancel_allocindir(aip, NULL, freeblks, 0);
6171}
6172
6173/*
6174 * Follow the chain of indirects down to lastlbn creating a freework
6175 * structure for each.  This will be used to start indir_trunc() at
6176 * the right offset and create the journal records for the parrtial
6177 * truncation.  A second step will handle the truncated dependencies.
6178 */
6179static int
6180setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno)
6181	struct freeblks *freeblks;
6182	struct inode *ip;
6183	ufs_lbn_t lbn;
6184	ufs_lbn_t lastlbn;
6185	ufs2_daddr_t blkno;
6186{
6187	struct indirdep *indirdep;
6188	struct indirdep *indirn;
6189	struct freework *freework;
6190	struct newblk *newblk;
6191	struct mount *mp;
6192	struct buf *bp;
6193	uint8_t *start;
6194	uint8_t *end;
6195	ufs_lbn_t lbnadd;
6196	int level;
6197	int error;
6198	int off;
6199
6200
6201	freework = NULL;
6202	if (blkno == 0)
6203		return (0);
6204	mp = freeblks->fb_list.wk_mp;
6205	bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
6206	if ((bp->b_flags & B_CACHE) == 0) {
6207		bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno);
6208		bp->b_iocmd = BIO_READ;
6209		bp->b_flags &= ~B_INVAL;
6210		bp->b_ioflags &= ~BIO_ERROR;
6211		vfs_busy_pages(bp, 0);
6212		bp->b_iooffset = dbtob(bp->b_blkno);
6213		bstrategy(bp);
6214		curthread->td_ru.ru_inblock++;
6215		error = bufwait(bp);
6216		if (error) {
6217			brelse(bp);
6218			return (error);
6219		}
6220	}
6221	level = lbn_level(lbn);
6222	lbnadd = lbn_offset(ip->i_fs, level);
6223	/*
6224	 * Compute the offset of the last block we want to keep.  Store
6225	 * in the freework the first block we want to completely free.
6226	 */
6227	off = (lastlbn - -(lbn + level)) / lbnadd;
6228	if (off + 1 == NINDIR(ip->i_fs))
6229		goto nowork;
6230	freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1,
6231	    0);
6232	/*
6233	 * Link the freework into the indirdep.  This will prevent any new
6234	 * allocations from proceeding until we are finished with the
6235	 * truncate and the block is written.
6236	 */
6237	ACQUIRE_LOCK(ip->i_ump);
6238	indirdep = indirdep_lookup(mp, ip, bp);
6239	if (indirdep->ir_freeblks)
6240		panic("setup_trunc_indir: indirdep already truncated.");
6241	TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6242	freework->fw_indir = indirdep;
6243	/*
6244	 * Cancel any allocindirs that will not make it to disk.
6245	 * We have to do this for all copies of the indirdep that
6246	 * live on this newblk.
6247	 */
6248	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6249		newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk);
6250		LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6251			trunc_indirdep(indirn, freeblks, bp, off);
6252	} else
6253		trunc_indirdep(indirdep, freeblks, bp, off);
6254	FREE_LOCK(ip->i_ump);
6255	/*
6256	 * Creation is protected by the buf lock. The saveddata is only
6257	 * needed if a full truncation follows a partial truncation but it
6258	 * is difficult to allocate in that case so we fetch it anyway.
6259	 */
6260	if (indirdep->ir_saveddata == NULL)
6261		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6262		    M_SOFTDEP_FLAGS);
6263nowork:
6264	/* Fetch the blkno of the child and the zero start offset. */
6265	if (ip->i_ump->um_fstype == UFS1) {
6266		blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6267		start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6268	} else {
6269		blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6270		start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6271	}
6272	if (freework) {
6273		/* Zero the truncated pointers. */
6274		end = bp->b_data + bp->b_bcount;
6275		bzero(start, end - start);
6276		bdwrite(bp);
6277	} else
6278		bqrelse(bp);
6279	if (level == 0)
6280		return (0);
6281	lbn++; /* adjust level */
6282	lbn -= (off * lbnadd);
6283	return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6284}
6285
6286/*
6287 * Complete the partial truncation of an indirect block setup by
6288 * setup_trunc_indir().  This zeros the truncated pointers in the saved
6289 * copy and writes them to disk before the freeblks is allowed to complete.
6290 */
6291static void
6292complete_trunc_indir(freework)
6293	struct freework *freework;
6294{
6295	struct freework *fwn;
6296	struct indirdep *indirdep;
6297	struct ufsmount *ump;
6298	struct buf *bp;
6299	uintptr_t start;
6300	int count;
6301
6302	ump = VFSTOUFS(freework->fw_list.wk_mp);
6303	LOCK_OWNED(ump);
6304	indirdep = freework->fw_indir;
6305	for (;;) {
6306		bp = indirdep->ir_bp;
6307		/* See if the block was discarded. */
6308		if (bp == NULL)
6309			break;
6310		/* Inline part of getdirtybuf().  We dont want bremfree. */
6311		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6312			break;
6313		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6314		    LOCK_PTR(ump)) == 0)
6315			BUF_UNLOCK(bp);
6316		ACQUIRE_LOCK(ump);
6317	}
6318	freework->fw_state |= DEPCOMPLETE;
6319	TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6320	/*
6321	 * Zero the pointers in the saved copy.
6322	 */
6323	if (indirdep->ir_state & UFS1FMT)
6324		start = sizeof(ufs1_daddr_t);
6325	else
6326		start = sizeof(ufs2_daddr_t);
6327	start *= freework->fw_start;
6328	count = indirdep->ir_savebp->b_bcount - start;
6329	start += (uintptr_t)indirdep->ir_savebp->b_data;
6330	bzero((char *)start, count);
6331	/*
6332	 * We need to start the next truncation in the list if it has not
6333	 * been started yet.
6334	 */
6335	fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6336	if (fwn != NULL) {
6337		if (fwn->fw_freeblks == indirdep->ir_freeblks)
6338			TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6339		if ((fwn->fw_state & ONWORKLIST) == 0)
6340			freework_enqueue(fwn);
6341	}
6342	/*
6343	 * If bp is NULL the block was fully truncated, restore
6344	 * the saved block list otherwise free it if it is no
6345	 * longer needed.
6346	 */
6347	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6348		if (bp == NULL)
6349			bcopy(indirdep->ir_saveddata,
6350			    indirdep->ir_savebp->b_data,
6351			    indirdep->ir_savebp->b_bcount);
6352		free(indirdep->ir_saveddata, M_INDIRDEP);
6353		indirdep->ir_saveddata = NULL;
6354	}
6355	/*
6356	 * When bp is NULL there is a full truncation pending.  We
6357	 * must wait for this full truncation to be journaled before
6358	 * we can release this freework because the disk pointers will
6359	 * never be written as zero.
6360	 */
6361	if (bp == NULL)  {
6362		if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6363			handle_written_freework(freework);
6364		else
6365			WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6366			   &freework->fw_list);
6367	} else {
6368		/* Complete when the real copy is written. */
6369		WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6370		BUF_UNLOCK(bp);
6371	}
6372}
6373
6374/*
6375 * Calculate the number of blocks we are going to release where datablocks
6376 * is the current total and length is the new file size.
6377 */
6378static ufs2_daddr_t
6379blkcount(fs, datablocks, length)
6380	struct fs *fs;
6381	ufs2_daddr_t datablocks;
6382	off_t length;
6383{
6384	off_t totblks, numblks;
6385
6386	totblks = 0;
6387	numblks = howmany(length, fs->fs_bsize);
6388	if (numblks <= NDADDR) {
6389		totblks = howmany(length, fs->fs_fsize);
6390		goto out;
6391	}
6392        totblks = blkstofrags(fs, numblks);
6393	numblks -= NDADDR;
6394	/*
6395	 * Count all single, then double, then triple indirects required.
6396	 * Subtracting one indirects worth of blocks for each pass
6397	 * acknowledges one of each pointed to by the inode.
6398	 */
6399	for (;;) {
6400		totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6401		numblks -= NINDIR(fs);
6402		if (numblks <= 0)
6403			break;
6404		numblks = howmany(numblks, NINDIR(fs));
6405	}
6406out:
6407	totblks = fsbtodb(fs, totblks);
6408	/*
6409	 * Handle sparse files.  We can't reclaim more blocks than the inode
6410	 * references.  We will correct it later in handle_complete_freeblks()
6411	 * when we know the real count.
6412	 */
6413	if (totblks > datablocks)
6414		return (0);
6415	return (datablocks - totblks);
6416}
6417
6418/*
6419 * Handle freeblocks for journaled softupdate filesystems.
6420 *
6421 * Contrary to normal softupdates, we must preserve the block pointers in
6422 * indirects until their subordinates are free.  This is to avoid journaling
6423 * every block that is freed which may consume more space than the journal
6424 * itself.  The recovery program will see the free block journals at the
6425 * base of the truncated area and traverse them to reclaim space.  The
6426 * pointers in the inode may be cleared immediately after the journal
6427 * records are written because each direct and indirect pointer in the
6428 * inode is recorded in a journal.  This permits full truncation to proceed
6429 * asynchronously.  The write order is journal -> inode -> cgs -> indirects.
6430 *
6431 * The algorithm is as follows:
6432 * 1) Traverse the in-memory state and create journal entries to release
6433 *    the relevant blocks and full indirect trees.
6434 * 2) Traverse the indirect block chain adding partial truncation freework
6435 *    records to indirects in the path to lastlbn.  The freework will
6436 *    prevent new allocation dependencies from being satisfied in this
6437 *    indirect until the truncation completes.
6438 * 3) Read and lock the inode block, performing an update with the new size
6439 *    and pointers.  This prevents truncated data from becoming valid on
6440 *    disk through step 4.
6441 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6442 *    eliminate journal work for those records that do not require it.
6443 * 5) Schedule the journal records to be written followed by the inode block.
6444 * 6) Allocate any necessary frags for the end of file.
6445 * 7) Zero any partially truncated blocks.
6446 *
6447 * From this truncation proceeds asynchronously using the freework and
6448 * indir_trunc machinery.  The file will not be extended again into a
6449 * partially truncated indirect block until all work is completed but
6450 * the normal dependency mechanism ensures that it is rolled back/forward
6451 * as appropriate.  Further truncation may occur without delay and is
6452 * serialized in indir_trunc().
6453 */
6454void
6455softdep_journal_freeblocks(ip, cred, length, flags)
6456	struct inode *ip;	/* The inode whose length is to be reduced */
6457	struct ucred *cred;
6458	off_t length;		/* The new length for the file */
6459	int flags;		/* IO_EXT and/or IO_NORMAL */
6460{
6461	struct freeblks *freeblks, *fbn;
6462	struct worklist *wk, *wkn;
6463	struct inodedep *inodedep;
6464	struct jblkdep *jblkdep;
6465	struct allocdirect *adp, *adpn;
6466	struct ufsmount *ump;
6467	struct fs *fs;
6468	struct buf *bp;
6469	struct vnode *vp;
6470	struct mount *mp;
6471	ufs2_daddr_t extblocks, datablocks;
6472	ufs_lbn_t tmpval, lbn, lastlbn;
6473	int frags, lastoff, iboff, allocblock, needj, dflags, error, i;
6474
6475	fs = ip->i_fs;
6476	ump = ip->i_ump;
6477	mp = UFSTOVFS(ump);
6478	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6479	    ("softdep_journal_freeblocks called on non-softdep filesystem"));
6480	vp = ITOV(ip);
6481	needj = 1;
6482	iboff = -1;
6483	allocblock = 0;
6484	extblocks = 0;
6485	datablocks = 0;
6486	frags = 0;
6487	freeblks = newfreeblks(mp, ip);
6488	ACQUIRE_LOCK(ump);
6489	/*
6490	 * If we're truncating a removed file that will never be written
6491	 * we don't need to journal the block frees.  The canceled journals
6492	 * for the allocations will suffice.
6493	 */
6494	dflags = DEPALLOC;
6495	if (IS_SNAPSHOT(ip))
6496		dflags |= NODELAY;
6497	inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6498	if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6499	    length == 0)
6500		needj = 0;
6501	CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6502	    ip->i_number, length, needj);
6503	FREE_LOCK(ump);
6504	/*
6505	 * Calculate the lbn that we are truncating to.  This results in -1
6506	 * if we're truncating the 0 bytes.  So it is the last lbn we want
6507	 * to keep, not the first lbn we want to truncate.
6508	 */
6509	lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6510	lastoff = blkoff(fs, length);
6511	/*
6512	 * Compute frags we are keeping in lastlbn.  0 means all.
6513	 */
6514	if (lastlbn >= 0 && lastlbn < NDADDR) {
6515		frags = fragroundup(fs, lastoff);
6516		/* adp offset of last valid allocdirect. */
6517		iboff = lastlbn;
6518	} else if (lastlbn > 0)
6519		iboff = NDADDR;
6520	if (fs->fs_magic == FS_UFS2_MAGIC)
6521		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6522	/*
6523	 * Handle normal data blocks and indirects.  This section saves
6524	 * values used after the inode update to complete frag and indirect
6525	 * truncation.
6526	 */
6527	if ((flags & IO_NORMAL) != 0) {
6528		/*
6529		 * Handle truncation of whole direct and indirect blocks.
6530		 */
6531		for (i = iboff + 1; i < NDADDR; i++)
6532			setup_freedirect(freeblks, ip, i, needj);
6533		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6534		    i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6535			/* Release a whole indirect tree. */
6536			if (lbn > lastlbn) {
6537				setup_freeindir(freeblks, ip, i, -lbn -i,
6538				    needj);
6539				continue;
6540			}
6541			iboff = i + NDADDR;
6542			/*
6543			 * Traverse partially truncated indirect tree.
6544			 */
6545			if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6546				setup_trunc_indir(freeblks, ip, -lbn - i,
6547				    lastlbn, DIP(ip, i_ib[i]));
6548		}
6549		/*
6550		 * Handle partial truncation to a frag boundary.
6551		 */
6552		if (frags) {
6553			ufs2_daddr_t blkno;
6554			long oldfrags;
6555
6556			oldfrags = blksize(fs, ip, lastlbn);
6557			blkno = DIP(ip, i_db[lastlbn]);
6558			if (blkno && oldfrags != frags) {
6559				oldfrags -= frags;
6560				oldfrags = numfrags(ip->i_fs, oldfrags);
6561				blkno += numfrags(ip->i_fs, frags);
6562				newfreework(ump, freeblks, NULL, lastlbn,
6563				    blkno, oldfrags, 0, needj);
6564				if (needj)
6565					adjust_newfreework(freeblks,
6566					    numfrags(ip->i_fs, frags));
6567			} else if (blkno == 0)
6568				allocblock = 1;
6569		}
6570		/*
6571		 * Add a journal record for partial truncate if we are
6572		 * handling indirect blocks.  Non-indirects need no extra
6573		 * journaling.
6574		 */
6575		if (length != 0 && lastlbn >= NDADDR) {
6576			ip->i_flag |= IN_TRUNCATED;
6577			newjtrunc(freeblks, length, 0);
6578		}
6579		ip->i_size = length;
6580		DIP_SET(ip, i_size, ip->i_size);
6581		datablocks = DIP(ip, i_blocks) - extblocks;
6582		if (length != 0)
6583			datablocks = blkcount(ip->i_fs, datablocks, length);
6584		freeblks->fb_len = length;
6585	}
6586	if ((flags & IO_EXT) != 0) {
6587		for (i = 0; i < NXADDR; i++)
6588			setup_freeext(freeblks, ip, i, needj);
6589		ip->i_din2->di_extsize = 0;
6590		datablocks += extblocks;
6591	}
6592#ifdef QUOTA
6593	/* Reference the quotas in case the block count is wrong in the end. */
6594	quotaref(vp, freeblks->fb_quota);
6595	(void) chkdq(ip, -datablocks, NOCRED, 0);
6596#endif
6597	freeblks->fb_chkcnt = -datablocks;
6598	UFS_LOCK(ump);
6599	fs->fs_pendingblocks += datablocks;
6600	UFS_UNLOCK(ump);
6601	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6602	/*
6603	 * Handle truncation of incomplete alloc direct dependencies.  We
6604	 * hold the inode block locked to prevent incomplete dependencies
6605	 * from reaching the disk while we are eliminating those that
6606	 * have been truncated.  This is a partially inlined ffs_update().
6607	 */
6608	ufs_itimes(vp);
6609	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6610	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6611	    (int)fs->fs_bsize, cred, &bp);
6612	if (error) {
6613		brelse(bp);
6614		softdep_error("softdep_journal_freeblocks", error);
6615		return;
6616	}
6617	if (bp->b_bufsize == fs->fs_bsize)
6618		bp->b_flags |= B_CLUSTEROK;
6619	softdep_update_inodeblock(ip, bp, 0);
6620	if (ump->um_fstype == UFS1)
6621		*((struct ufs1_dinode *)bp->b_data +
6622		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6623	else
6624		*((struct ufs2_dinode *)bp->b_data +
6625		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6626	ACQUIRE_LOCK(ump);
6627	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6628	if ((inodedep->id_state & IOSTARTED) != 0)
6629		panic("softdep_setup_freeblocks: inode busy");
6630	/*
6631	 * Add the freeblks structure to the list of operations that
6632	 * must await the zero'ed inode being written to disk. If we
6633	 * still have a bitmap dependency (needj), then the inode
6634	 * has never been written to disk, so we can process the
6635	 * freeblks below once we have deleted the dependencies.
6636	 */
6637	if (needj)
6638		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6639	else
6640		freeblks->fb_state |= COMPLETE;
6641	if ((flags & IO_NORMAL) != 0) {
6642		TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6643			if (adp->ad_offset > iboff)
6644				cancel_allocdirect(&inodedep->id_inoupdt, adp,
6645				    freeblks);
6646			/*
6647			 * Truncate the allocdirect.  We could eliminate
6648			 * or modify journal records as well.
6649			 */
6650			else if (adp->ad_offset == iboff && frags)
6651				adp->ad_newsize = frags;
6652		}
6653	}
6654	if ((flags & IO_EXT) != 0)
6655		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6656			cancel_allocdirect(&inodedep->id_extupdt, adp,
6657			    freeblks);
6658	/*
6659	 * Scan the bufwait list for newblock dependencies that will never
6660	 * make it to disk.
6661	 */
6662	LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6663		if (wk->wk_type != D_ALLOCDIRECT)
6664			continue;
6665		adp = WK_ALLOCDIRECT(wk);
6666		if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6667		    ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6668			cancel_jfreeblk(freeblks, adp->ad_newblkno);
6669			cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6670			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6671		}
6672	}
6673	/*
6674	 * Add journal work.
6675	 */
6676	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6677		add_to_journal(&jblkdep->jb_list);
6678	FREE_LOCK(ump);
6679	bdwrite(bp);
6680	/*
6681	 * Truncate dependency structures beyond length.
6682	 */
6683	trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6684	/*
6685	 * This is only set when we need to allocate a fragment because
6686	 * none existed at the end of a frag-sized file.  It handles only
6687	 * allocating a new, zero filled block.
6688	 */
6689	if (allocblock) {
6690		ip->i_size = length - lastoff;
6691		DIP_SET(ip, i_size, ip->i_size);
6692		error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6693		if (error != 0) {
6694			softdep_error("softdep_journal_freeblks", error);
6695			return;
6696		}
6697		ip->i_size = length;
6698		DIP_SET(ip, i_size, length);
6699		ip->i_flag |= IN_CHANGE | IN_UPDATE;
6700		allocbuf(bp, frags);
6701		ffs_update(vp, 0);
6702		bawrite(bp);
6703	} else if (lastoff != 0 && vp->v_type != VDIR) {
6704		int size;
6705
6706		/*
6707		 * Zero the end of a truncated frag or block.
6708		 */
6709		size = sblksize(fs, length, lastlbn);
6710		error = bread(vp, lastlbn, size, cred, &bp);
6711		if (error) {
6712			softdep_error("softdep_journal_freeblks", error);
6713			return;
6714		}
6715		bzero((char *)bp->b_data + lastoff, size - lastoff);
6716		bawrite(bp);
6717
6718	}
6719	ACQUIRE_LOCK(ump);
6720	inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6721	TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
6722	freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
6723	/*
6724	 * We zero earlier truncations so they don't erroneously
6725	 * update i_blocks.
6726	 */
6727	if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
6728		TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
6729			fbn->fb_len = 0;
6730	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
6731	    LIST_EMPTY(&freeblks->fb_jblkdephd))
6732		freeblks->fb_state |= INPROGRESS;
6733	else
6734		freeblks = NULL;
6735	FREE_LOCK(ump);
6736	if (freeblks)
6737		handle_workitem_freeblocks(freeblks, 0);
6738	trunc_pages(ip, length, extblocks, flags);
6739
6740}
6741
6742/*
6743 * Flush a JOP_SYNC to the journal.
6744 */
6745void
6746softdep_journal_fsync(ip)
6747	struct inode *ip;
6748{
6749	struct jfsync *jfsync;
6750
6751	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
6752	    ("softdep_journal_fsync called on non-softdep filesystem"));
6753	if ((ip->i_flag & IN_TRUNCATED) == 0)
6754		return;
6755	ip->i_flag &= ~IN_TRUNCATED;
6756	jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
6757	workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump));
6758	jfsync->jfs_size = ip->i_size;
6759	jfsync->jfs_ino = ip->i_number;
6760	ACQUIRE_LOCK(ip->i_ump);
6761	add_to_journal(&jfsync->jfs_list);
6762	jwait(&jfsync->jfs_list, MNT_WAIT);
6763	FREE_LOCK(ip->i_ump);
6764}
6765
6766/*
6767 * Block de-allocation dependencies.
6768 *
6769 * When blocks are de-allocated, the on-disk pointers must be nullified before
6770 * the blocks are made available for use by other files.  (The true
6771 * requirement is that old pointers must be nullified before new on-disk
6772 * pointers are set.  We chose this slightly more stringent requirement to
6773 * reduce complexity.) Our implementation handles this dependency by updating
6774 * the inode (or indirect block) appropriately but delaying the actual block
6775 * de-allocation (i.e., freemap and free space count manipulation) until
6776 * after the updated versions reach stable storage.  After the disk is
6777 * updated, the blocks can be safely de-allocated whenever it is convenient.
6778 * This implementation handles only the common case of reducing a file's
6779 * length to zero. Other cases are handled by the conventional synchronous
6780 * write approach.
6781 *
6782 * The ffs implementation with which we worked double-checks
6783 * the state of the block pointers and file size as it reduces
6784 * a file's length.  Some of this code is replicated here in our
6785 * soft updates implementation.  The freeblks->fb_chkcnt field is
6786 * used to transfer a part of this information to the procedure
6787 * that eventually de-allocates the blocks.
6788 *
6789 * This routine should be called from the routine that shortens
6790 * a file's length, before the inode's size or block pointers
6791 * are modified. It will save the block pointer information for
6792 * later release and zero the inode so that the calling routine
6793 * can release it.
6794 */
6795void
6796softdep_setup_freeblocks(ip, length, flags)
6797	struct inode *ip;	/* The inode whose length is to be reduced */
6798	off_t length;		/* The new length for the file */
6799	int flags;		/* IO_EXT and/or IO_NORMAL */
6800{
6801	struct ufs1_dinode *dp1;
6802	struct ufs2_dinode *dp2;
6803	struct freeblks *freeblks;
6804	struct inodedep *inodedep;
6805	struct allocdirect *adp;
6806	struct ufsmount *ump;
6807	struct buf *bp;
6808	struct fs *fs;
6809	ufs2_daddr_t extblocks, datablocks;
6810	struct mount *mp;
6811	int i, delay, error, dflags;
6812	ufs_lbn_t tmpval;
6813	ufs_lbn_t lbn;
6814
6815	ump = ip->i_ump;
6816	mp = UFSTOVFS(ump);
6817	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6818	    ("softdep_setup_freeblocks called on non-softdep filesystem"));
6819	CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
6820	    ip->i_number, length);
6821	KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length"));
6822	fs = ip->i_fs;
6823	freeblks = newfreeblks(mp, ip);
6824	extblocks = 0;
6825	datablocks = 0;
6826	if (fs->fs_magic == FS_UFS2_MAGIC)
6827		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6828	if ((flags & IO_NORMAL) != 0) {
6829		for (i = 0; i < NDADDR; i++)
6830			setup_freedirect(freeblks, ip, i, 0);
6831		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6832		    i++, lbn += tmpval, tmpval *= NINDIR(fs))
6833			setup_freeindir(freeblks, ip, i, -lbn -i, 0);
6834		ip->i_size = 0;
6835		DIP_SET(ip, i_size, 0);
6836		datablocks = DIP(ip, i_blocks) - extblocks;
6837	}
6838	if ((flags & IO_EXT) != 0) {
6839		for (i = 0; i < NXADDR; i++)
6840			setup_freeext(freeblks, ip, i, 0);
6841		ip->i_din2->di_extsize = 0;
6842		datablocks += extblocks;
6843	}
6844#ifdef QUOTA
6845	/* Reference the quotas in case the block count is wrong in the end. */
6846	quotaref(ITOV(ip), freeblks->fb_quota);
6847	(void) chkdq(ip, -datablocks, NOCRED, 0);
6848#endif
6849	freeblks->fb_chkcnt = -datablocks;
6850	UFS_LOCK(ump);
6851	fs->fs_pendingblocks += datablocks;
6852	UFS_UNLOCK(ump);
6853	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6854	/*
6855	 * Push the zero'ed inode to to its disk buffer so that we are free
6856	 * to delete its dependencies below. Once the dependencies are gone
6857	 * the buffer can be safely released.
6858	 */
6859	if ((error = bread(ip->i_devvp,
6860	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6861	    (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
6862		brelse(bp);
6863		softdep_error("softdep_setup_freeblocks", error);
6864	}
6865	if (ump->um_fstype == UFS1) {
6866		dp1 = ((struct ufs1_dinode *)bp->b_data +
6867		    ino_to_fsbo(fs, ip->i_number));
6868		ip->i_din1->di_freelink = dp1->di_freelink;
6869		*dp1 = *ip->i_din1;
6870	} else {
6871		dp2 = ((struct ufs2_dinode *)bp->b_data +
6872		    ino_to_fsbo(fs, ip->i_number));
6873		ip->i_din2->di_freelink = dp2->di_freelink;
6874		*dp2 = *ip->i_din2;
6875	}
6876	/*
6877	 * Find and eliminate any inode dependencies.
6878	 */
6879	ACQUIRE_LOCK(ump);
6880	dflags = DEPALLOC;
6881	if (IS_SNAPSHOT(ip))
6882		dflags |= NODELAY;
6883	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6884	if ((inodedep->id_state & IOSTARTED) != 0)
6885		panic("softdep_setup_freeblocks: inode busy");
6886	/*
6887	 * Add the freeblks structure to the list of operations that
6888	 * must await the zero'ed inode being written to disk. If we
6889	 * still have a bitmap dependency (delay == 0), then the inode
6890	 * has never been written to disk, so we can process the
6891	 * freeblks below once we have deleted the dependencies.
6892	 */
6893	delay = (inodedep->id_state & DEPCOMPLETE);
6894	if (delay)
6895		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6896	else
6897		freeblks->fb_state |= COMPLETE;
6898	/*
6899	 * Because the file length has been truncated to zero, any
6900	 * pending block allocation dependency structures associated
6901	 * with this inode are obsolete and can simply be de-allocated.
6902	 * We must first merge the two dependency lists to get rid of
6903	 * any duplicate freefrag structures, then purge the merged list.
6904	 * If we still have a bitmap dependency, then the inode has never
6905	 * been written to disk, so we can free any fragments without delay.
6906	 */
6907	if (flags & IO_NORMAL) {
6908		merge_inode_lists(&inodedep->id_newinoupdt,
6909		    &inodedep->id_inoupdt);
6910		while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
6911			cancel_allocdirect(&inodedep->id_inoupdt, adp,
6912			    freeblks);
6913	}
6914	if (flags & IO_EXT) {
6915		merge_inode_lists(&inodedep->id_newextupdt,
6916		    &inodedep->id_extupdt);
6917		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6918			cancel_allocdirect(&inodedep->id_extupdt, adp,
6919			    freeblks);
6920	}
6921	FREE_LOCK(ump);
6922	bdwrite(bp);
6923	trunc_dependencies(ip, freeblks, -1, 0, flags);
6924	ACQUIRE_LOCK(ump);
6925	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
6926		(void) free_inodedep(inodedep);
6927	freeblks->fb_state |= DEPCOMPLETE;
6928	/*
6929	 * If the inode with zeroed block pointers is now on disk
6930	 * we can start freeing blocks.
6931	 */
6932	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
6933		freeblks->fb_state |= INPROGRESS;
6934	else
6935		freeblks = NULL;
6936	FREE_LOCK(ump);
6937	if (freeblks)
6938		handle_workitem_freeblocks(freeblks, 0);
6939	trunc_pages(ip, length, extblocks, flags);
6940}
6941
6942/*
6943 * Eliminate pages from the page cache that back parts of this inode and
6944 * adjust the vnode pager's idea of our size.  This prevents stale data
6945 * from hanging around in the page cache.
6946 */
6947static void
6948trunc_pages(ip, length, extblocks, flags)
6949	struct inode *ip;
6950	off_t length;
6951	ufs2_daddr_t extblocks;
6952	int flags;
6953{
6954	struct vnode *vp;
6955	struct fs *fs;
6956	ufs_lbn_t lbn;
6957	off_t end, extend;
6958
6959	vp = ITOV(ip);
6960	fs = ip->i_fs;
6961	extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
6962	if ((flags & IO_EXT) != 0)
6963		vn_pages_remove(vp, extend, 0);
6964	if ((flags & IO_NORMAL) == 0)
6965		return;
6966	BO_LOCK(&vp->v_bufobj);
6967	drain_output(vp);
6968	BO_UNLOCK(&vp->v_bufobj);
6969	/*
6970	 * The vnode pager eliminates file pages we eliminate indirects
6971	 * below.
6972	 */
6973	vnode_pager_setsize(vp, length);
6974	/*
6975	 * Calculate the end based on the last indirect we want to keep.  If
6976	 * the block extends into indirects we can just use the negative of
6977	 * its lbn.  Doubles and triples exist at lower numbers so we must
6978	 * be careful not to remove those, if they exist.  double and triple
6979	 * indirect lbns do not overlap with others so it is not important
6980	 * to verify how many levels are required.
6981	 */
6982	lbn = lblkno(fs, length);
6983	if (lbn >= NDADDR) {
6984		/* Calculate the virtual lbn of the triple indirect. */
6985		lbn = -lbn - (NIADDR - 1);
6986		end = OFF_TO_IDX(lblktosize(fs, lbn));
6987	} else
6988		end = extend;
6989	vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
6990}
6991
6992/*
6993 * See if the buf bp is in the range eliminated by truncation.
6994 */
6995static int
6996trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags)
6997	struct buf *bp;
6998	int *blkoffp;
6999	ufs_lbn_t lastlbn;
7000	int lastoff;
7001	int flags;
7002{
7003	ufs_lbn_t lbn;
7004
7005	*blkoffp = 0;
7006	/* Only match ext/normal blocks as appropriate. */
7007	if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
7008	    ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
7009		return (0);
7010	/* ALTDATA is always a full truncation. */
7011	if ((bp->b_xflags & BX_ALTDATA) != 0)
7012		return (1);
7013	/* -1 is full truncation. */
7014	if (lastlbn == -1)
7015		return (1);
7016	/*
7017	 * If this is a partial truncate we only want those
7018	 * blocks and indirect blocks that cover the range
7019	 * we're after.
7020	 */
7021	lbn = bp->b_lblkno;
7022	if (lbn < 0)
7023		lbn = -(lbn + lbn_level(lbn));
7024	if (lbn < lastlbn)
7025		return (0);
7026	/* Here we only truncate lblkno if it's partial. */
7027	if (lbn == lastlbn) {
7028		if (lastoff == 0)
7029			return (0);
7030		*blkoffp = lastoff;
7031	}
7032	return (1);
7033}
7034
7035/*
7036 * Eliminate any dependencies that exist in memory beyond lblkno:off
7037 */
7038static void
7039trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
7040	struct inode *ip;
7041	struct freeblks *freeblks;
7042	ufs_lbn_t lastlbn;
7043	int lastoff;
7044	int flags;
7045{
7046	struct bufobj *bo;
7047	struct vnode *vp;
7048	struct buf *bp;
7049	struct fs *fs;
7050	int blkoff;
7051
7052	/*
7053	 * We must wait for any I/O in progress to finish so that
7054	 * all potential buffers on the dirty list will be visible.
7055	 * Once they are all there, walk the list and get rid of
7056	 * any dependencies.
7057	 */
7058	fs = ip->i_fs;
7059	vp = ITOV(ip);
7060	bo = &vp->v_bufobj;
7061	BO_LOCK(bo);
7062	drain_output(vp);
7063	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
7064		bp->b_vflags &= ~BV_SCANNED;
7065restart:
7066	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
7067		if (bp->b_vflags & BV_SCANNED)
7068			continue;
7069		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7070			bp->b_vflags |= BV_SCANNED;
7071			continue;
7072		}
7073		KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer"));
7074		if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL)
7075			goto restart;
7076		BO_UNLOCK(bo);
7077		if (deallocate_dependencies(bp, freeblks, blkoff))
7078			bqrelse(bp);
7079		else
7080			brelse(bp);
7081		BO_LOCK(bo);
7082		goto restart;
7083	}
7084	/*
7085	 * Now do the work of vtruncbuf while also matching indirect blocks.
7086	 */
7087	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
7088		bp->b_vflags &= ~BV_SCANNED;
7089cleanrestart:
7090	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
7091		if (bp->b_vflags & BV_SCANNED)
7092			continue;
7093		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7094			bp->b_vflags |= BV_SCANNED;
7095			continue;
7096		}
7097		if (BUF_LOCK(bp,
7098		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
7099		    BO_LOCKPTR(bo)) == ENOLCK) {
7100			BO_LOCK(bo);
7101			goto cleanrestart;
7102		}
7103		bp->b_vflags |= BV_SCANNED;
7104		bremfree(bp);
7105		if (blkoff != 0) {
7106			allocbuf(bp, blkoff);
7107			bqrelse(bp);
7108		} else {
7109			bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
7110			brelse(bp);
7111		}
7112		BO_LOCK(bo);
7113		goto cleanrestart;
7114	}
7115	drain_output(vp);
7116	BO_UNLOCK(bo);
7117}
7118
7119static int
7120cancel_pagedep(pagedep, freeblks, blkoff)
7121	struct pagedep *pagedep;
7122	struct freeblks *freeblks;
7123	int blkoff;
7124{
7125	struct jremref *jremref;
7126	struct jmvref *jmvref;
7127	struct dirrem *dirrem, *tmp;
7128	int i;
7129
7130	/*
7131	 * Copy any directory remove dependencies to the list
7132	 * to be processed after the freeblks proceeds.  If
7133	 * directory entry never made it to disk they
7134	 * can be dumped directly onto the work list.
7135	 */
7136	LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
7137		/* Skip this directory removal if it is intended to remain. */
7138		if (dirrem->dm_offset < blkoff)
7139			continue;
7140		/*
7141		 * If there are any dirrems we wait for the journal write
7142		 * to complete and then restart the buf scan as the lock
7143		 * has been dropped.
7144		 */
7145		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
7146			jwait(&jremref->jr_list, MNT_WAIT);
7147			return (ERESTART);
7148		}
7149		LIST_REMOVE(dirrem, dm_next);
7150		dirrem->dm_dirinum = pagedep->pd_ino;
7151		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
7152	}
7153	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
7154		jwait(&jmvref->jm_list, MNT_WAIT);
7155		return (ERESTART);
7156	}
7157	/*
7158	 * When we're partially truncating a pagedep we just want to flush
7159	 * journal entries and return.  There can not be any adds in the
7160	 * truncated portion of the directory and newblk must remain if
7161	 * part of the block remains.
7162	 */
7163	if (blkoff != 0) {
7164		struct diradd *dap;
7165
7166		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
7167			if (dap->da_offset > blkoff)
7168				panic("cancel_pagedep: diradd %p off %d > %d",
7169				    dap, dap->da_offset, blkoff);
7170		for (i = 0; i < DAHASHSZ; i++)
7171			LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
7172				if (dap->da_offset > blkoff)
7173					panic("cancel_pagedep: diradd %p off %d > %d",
7174					    dap, dap->da_offset, blkoff);
7175		return (0);
7176	}
7177	/*
7178	 * There should be no directory add dependencies present
7179	 * as the directory could not be truncated until all
7180	 * children were removed.
7181	 */
7182	KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
7183	    ("deallocate_dependencies: pendinghd != NULL"));
7184	for (i = 0; i < DAHASHSZ; i++)
7185		KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
7186		    ("deallocate_dependencies: diraddhd != NULL"));
7187	if ((pagedep->pd_state & NEWBLOCK) != 0)
7188		free_newdirblk(pagedep->pd_newdirblk);
7189	if (free_pagedep(pagedep) == 0)
7190		panic("Failed to free pagedep %p", pagedep);
7191	return (0);
7192}
7193
7194/*
7195 * Reclaim any dependency structures from a buffer that is about to
7196 * be reallocated to a new vnode. The buffer must be locked, thus,
7197 * no I/O completion operations can occur while we are manipulating
7198 * its associated dependencies. The mutex is held so that other I/O's
7199 * associated with related dependencies do not occur.
7200 */
7201static int
7202deallocate_dependencies(bp, freeblks, off)
7203	struct buf *bp;
7204	struct freeblks *freeblks;
7205	int off;
7206{
7207	struct indirdep *indirdep;
7208	struct pagedep *pagedep;
7209	struct allocdirect *adp;
7210	struct worklist *wk, *wkn;
7211	struct ufsmount *ump;
7212
7213	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
7214		goto done;
7215	ump = VFSTOUFS(wk->wk_mp);
7216	ACQUIRE_LOCK(ump);
7217	LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7218		switch (wk->wk_type) {
7219		case D_INDIRDEP:
7220			indirdep = WK_INDIRDEP(wk);
7221			if (bp->b_lblkno >= 0 ||
7222			    bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7223				panic("deallocate_dependencies: not indir");
7224			cancel_indirdep(indirdep, bp, freeblks);
7225			continue;
7226
7227		case D_PAGEDEP:
7228			pagedep = WK_PAGEDEP(wk);
7229			if (cancel_pagedep(pagedep, freeblks, off)) {
7230				FREE_LOCK(ump);
7231				return (ERESTART);
7232			}
7233			continue;
7234
7235		case D_ALLOCINDIR:
7236			/*
7237			 * Simply remove the allocindir, we'll find it via
7238			 * the indirdep where we can clear pointers if
7239			 * needed.
7240			 */
7241			WORKLIST_REMOVE(wk);
7242			continue;
7243
7244		case D_FREEWORK:
7245			/*
7246			 * A truncation is waiting for the zero'd pointers
7247			 * to be written.  It can be freed when the freeblks
7248			 * is journaled.
7249			 */
7250			WORKLIST_REMOVE(wk);
7251			wk->wk_state |= ONDEPLIST;
7252			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7253			break;
7254
7255		case D_ALLOCDIRECT:
7256			adp = WK_ALLOCDIRECT(wk);
7257			if (off != 0)
7258				continue;
7259			/* FALLTHROUGH */
7260		default:
7261			panic("deallocate_dependencies: Unexpected type %s",
7262			    TYPENAME(wk->wk_type));
7263			/* NOTREACHED */
7264		}
7265	}
7266	FREE_LOCK(ump);
7267done:
7268	/*
7269	 * Don't throw away this buf, we were partially truncating and
7270	 * some deps may always remain.
7271	 */
7272	if (off) {
7273		allocbuf(bp, off);
7274		bp->b_vflags |= BV_SCANNED;
7275		return (EBUSY);
7276	}
7277	bp->b_flags |= B_INVAL | B_NOCACHE;
7278
7279	return (0);
7280}
7281
7282/*
7283 * An allocdirect is being canceled due to a truncate.  We must make sure
7284 * the journal entry is released in concert with the blkfree that releases
7285 * the storage.  Completed journal entries must not be released until the
7286 * space is no longer pointed to by the inode or in the bitmap.
7287 */
7288static void
7289cancel_allocdirect(adphead, adp, freeblks)
7290	struct allocdirectlst *adphead;
7291	struct allocdirect *adp;
7292	struct freeblks *freeblks;
7293{
7294	struct freework *freework;
7295	struct newblk *newblk;
7296	struct worklist *wk;
7297
7298	TAILQ_REMOVE(adphead, adp, ad_next);
7299	newblk = (struct newblk *)adp;
7300	freework = NULL;
7301	/*
7302	 * Find the correct freework structure.
7303	 */
7304	LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7305		if (wk->wk_type != D_FREEWORK)
7306			continue;
7307		freework = WK_FREEWORK(wk);
7308		if (freework->fw_blkno == newblk->nb_newblkno)
7309			break;
7310	}
7311	if (freework == NULL)
7312		panic("cancel_allocdirect: Freework not found");
7313	/*
7314	 * If a newblk exists at all we still have the journal entry that
7315	 * initiated the allocation so we do not need to journal the free.
7316	 */
7317	cancel_jfreeblk(freeblks, freework->fw_blkno);
7318	/*
7319	 * If the journal hasn't been written the jnewblk must be passed
7320	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
7321	 * this by linking the journal dependency into the freework to be
7322	 * freed when freework_freeblock() is called.  If the journal has
7323	 * been written we can simply reclaim the journal space when the
7324	 * freeblks work is complete.
7325	 */
7326	freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7327	    &freeblks->fb_jwork);
7328	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7329}
7330
7331
7332/*
7333 * Cancel a new block allocation.  May be an indirect or direct block.  We
7334 * remove it from various lists and return any journal record that needs to
7335 * be resolved by the caller.
7336 *
7337 * A special consideration is made for indirects which were never pointed
7338 * at on disk and will never be found once this block is released.
7339 */
7340static struct jnewblk *
7341cancel_newblk(newblk, wk, wkhd)
7342	struct newblk *newblk;
7343	struct worklist *wk;
7344	struct workhead *wkhd;
7345{
7346	struct jnewblk *jnewblk;
7347
7348	CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7349
7350	newblk->nb_state |= GOINGAWAY;
7351	/*
7352	 * Previously we traversed the completedhd on each indirdep
7353	 * attached to this newblk to cancel them and gather journal
7354	 * work.  Since we need only the oldest journal segment and
7355	 * the lowest point on the tree will always have the oldest
7356	 * journal segment we are free to release the segments
7357	 * of any subordinates and may leave the indirdep list to
7358	 * indirdep_complete() when this newblk is freed.
7359	 */
7360	if (newblk->nb_state & ONDEPLIST) {
7361		newblk->nb_state &= ~ONDEPLIST;
7362		LIST_REMOVE(newblk, nb_deps);
7363	}
7364	if (newblk->nb_state & ONWORKLIST)
7365		WORKLIST_REMOVE(&newblk->nb_list);
7366	/*
7367	 * If the journal entry hasn't been written we save a pointer to
7368	 * the dependency that frees it until it is written or the
7369	 * superseding operation completes.
7370	 */
7371	jnewblk = newblk->nb_jnewblk;
7372	if (jnewblk != NULL && wk != NULL) {
7373		newblk->nb_jnewblk = NULL;
7374		jnewblk->jn_dep = wk;
7375	}
7376	if (!LIST_EMPTY(&newblk->nb_jwork))
7377		jwork_move(wkhd, &newblk->nb_jwork);
7378	/*
7379	 * When truncating we must free the newdirblk early to remove
7380	 * the pagedep from the hash before returning.
7381	 */
7382	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7383		free_newdirblk(WK_NEWDIRBLK(wk));
7384	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7385		panic("cancel_newblk: extra newdirblk");
7386
7387	return (jnewblk);
7388}
7389
7390/*
7391 * Schedule the freefrag associated with a newblk to be released once
7392 * the pointers are written and the previous block is no longer needed.
7393 */
7394static void
7395newblk_freefrag(newblk)
7396	struct newblk *newblk;
7397{
7398	struct freefrag *freefrag;
7399
7400	if (newblk->nb_freefrag == NULL)
7401		return;
7402	freefrag = newblk->nb_freefrag;
7403	newblk->nb_freefrag = NULL;
7404	freefrag->ff_state |= COMPLETE;
7405	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7406		add_to_worklist(&freefrag->ff_list, 0);
7407}
7408
7409/*
7410 * Free a newblk. Generate a new freefrag work request if appropriate.
7411 * This must be called after the inode pointer and any direct block pointers
7412 * are valid or fully removed via truncate or frag extension.
7413 */
7414static void
7415free_newblk(newblk)
7416	struct newblk *newblk;
7417{
7418	struct indirdep *indirdep;
7419	struct worklist *wk;
7420
7421	KASSERT(newblk->nb_jnewblk == NULL,
7422	    ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk));
7423	KASSERT(newblk->nb_list.wk_type != D_NEWBLK,
7424	    ("free_newblk: unclaimed newblk"));
7425	LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp));
7426	newblk_freefrag(newblk);
7427	if (newblk->nb_state & ONDEPLIST)
7428		LIST_REMOVE(newblk, nb_deps);
7429	if (newblk->nb_state & ONWORKLIST)
7430		WORKLIST_REMOVE(&newblk->nb_list);
7431	LIST_REMOVE(newblk, nb_hash);
7432	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7433		free_newdirblk(WK_NEWDIRBLK(wk));
7434	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7435		panic("free_newblk: extra newdirblk");
7436	while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7437		indirdep_complete(indirdep);
7438	handle_jwork(&newblk->nb_jwork);
7439	WORKITEM_FREE(newblk, D_NEWBLK);
7440}
7441
7442/*
7443 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7444 * This routine must be called with splbio interrupts blocked.
7445 */
7446static void
7447free_newdirblk(newdirblk)
7448	struct newdirblk *newdirblk;
7449{
7450	struct pagedep *pagedep;
7451	struct diradd *dap;
7452	struct worklist *wk;
7453
7454	LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp));
7455	WORKLIST_REMOVE(&newdirblk->db_list);
7456	/*
7457	 * If the pagedep is still linked onto the directory buffer
7458	 * dependency chain, then some of the entries on the
7459	 * pd_pendinghd list may not be committed to disk yet. In
7460	 * this case, we will simply clear the NEWBLOCK flag and
7461	 * let the pd_pendinghd list be processed when the pagedep
7462	 * is next written. If the pagedep is no longer on the buffer
7463	 * dependency chain, then all the entries on the pd_pending
7464	 * list are committed to disk and we can free them here.
7465	 */
7466	pagedep = newdirblk->db_pagedep;
7467	pagedep->pd_state &= ~NEWBLOCK;
7468	if ((pagedep->pd_state & ONWORKLIST) == 0) {
7469		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7470			free_diradd(dap, NULL);
7471		/*
7472		 * If no dependencies remain, the pagedep will be freed.
7473		 */
7474		free_pagedep(pagedep);
7475	}
7476	/* Should only ever be one item in the list. */
7477	while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7478		WORKLIST_REMOVE(wk);
7479		handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7480	}
7481	WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7482}
7483
7484/*
7485 * Prepare an inode to be freed. The actual free operation is not
7486 * done until the zero'ed inode has been written to disk.
7487 */
7488void
7489softdep_freefile(pvp, ino, mode)
7490	struct vnode *pvp;
7491	ino_t ino;
7492	int mode;
7493{
7494	struct inode *ip = VTOI(pvp);
7495	struct inodedep *inodedep;
7496	struct freefile *freefile;
7497	struct freeblks *freeblks;
7498	struct ufsmount *ump;
7499
7500	ump = ip->i_ump;
7501	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7502	    ("softdep_freefile called on non-softdep filesystem"));
7503	/*
7504	 * This sets up the inode de-allocation dependency.
7505	 */
7506	freefile = malloc(sizeof(struct freefile),
7507		M_FREEFILE, M_SOFTDEP_FLAGS);
7508	workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7509	freefile->fx_mode = mode;
7510	freefile->fx_oldinum = ino;
7511	freefile->fx_devvp = ip->i_devvp;
7512	LIST_INIT(&freefile->fx_jwork);
7513	UFS_LOCK(ump);
7514	ip->i_fs->fs_pendinginodes += 1;
7515	UFS_UNLOCK(ump);
7516
7517	/*
7518	 * If the inodedep does not exist, then the zero'ed inode has
7519	 * been written to disk. If the allocated inode has never been
7520	 * written to disk, then the on-disk inode is zero'ed. In either
7521	 * case we can free the file immediately.  If the journal was
7522	 * canceled before being written the inode will never make it to
7523	 * disk and we must send the canceled journal entrys to
7524	 * ffs_freefile() to be cleared in conjunction with the bitmap.
7525	 * Any blocks waiting on the inode to write can be safely freed
7526	 * here as it will never been written.
7527	 */
7528	ACQUIRE_LOCK(ump);
7529	inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7530	if (inodedep) {
7531		/*
7532		 * Clear out freeblks that no longer need to reference
7533		 * this inode.
7534		 */
7535		while ((freeblks =
7536		    TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7537			TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7538			    fb_next);
7539			freeblks->fb_state &= ~ONDEPLIST;
7540		}
7541		/*
7542		 * Remove this inode from the unlinked list.
7543		 */
7544		if (inodedep->id_state & UNLINKED) {
7545			/*
7546			 * Save the journal work to be freed with the bitmap
7547			 * before we clear UNLINKED.  Otherwise it can be lost
7548			 * if the inode block is written.
7549			 */
7550			handle_bufwait(inodedep, &freefile->fx_jwork);
7551			clear_unlinked_inodedep(inodedep);
7552			/*
7553			 * Re-acquire inodedep as we've dropped the
7554			 * per-filesystem lock in clear_unlinked_inodedep().
7555			 */
7556			inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7557		}
7558	}
7559	if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7560		FREE_LOCK(ump);
7561		handle_workitem_freefile(freefile);
7562		return;
7563	}
7564	if ((inodedep->id_state & DEPCOMPLETE) == 0)
7565		inodedep->id_state |= GOINGAWAY;
7566	WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7567	FREE_LOCK(ump);
7568	if (ip->i_number == ino)
7569		ip->i_flag |= IN_MODIFIED;
7570}
7571
7572/*
7573 * Check to see if an inode has never been written to disk. If
7574 * so free the inodedep and return success, otherwise return failure.
7575 * This routine must be called with splbio interrupts blocked.
7576 *
7577 * If we still have a bitmap dependency, then the inode has never
7578 * been written to disk. Drop the dependency as it is no longer
7579 * necessary since the inode is being deallocated. We set the
7580 * ALLCOMPLETE flags since the bitmap now properly shows that the
7581 * inode is not allocated. Even if the inode is actively being
7582 * written, it has been rolled back to its zero'ed state, so we
7583 * are ensured that a zero inode is what is on the disk. For short
7584 * lived files, this change will usually result in removing all the
7585 * dependencies from the inode so that it can be freed immediately.
7586 */
7587static int
7588check_inode_unwritten(inodedep)
7589	struct inodedep *inodedep;
7590{
7591
7592	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7593
7594	if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7595	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7596	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7597	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7598	    !LIST_EMPTY(&inodedep->id_inowait) ||
7599	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7600	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7601	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7602	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7603	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7604	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7605	    inodedep->id_mkdiradd != NULL ||
7606	    inodedep->id_nlinkdelta != 0)
7607		return (0);
7608	/*
7609	 * Another process might be in initiate_write_inodeblock_ufs[12]
7610	 * trying to allocate memory without holding "Softdep Lock".
7611	 */
7612	if ((inodedep->id_state & IOSTARTED) != 0 &&
7613	    inodedep->id_savedino1 == NULL)
7614		return (0);
7615
7616	if (inodedep->id_state & ONDEPLIST)
7617		LIST_REMOVE(inodedep, id_deps);
7618	inodedep->id_state &= ~ONDEPLIST;
7619	inodedep->id_state |= ALLCOMPLETE;
7620	inodedep->id_bmsafemap = NULL;
7621	if (inodedep->id_state & ONWORKLIST)
7622		WORKLIST_REMOVE(&inodedep->id_list);
7623	if (inodedep->id_savedino1 != NULL) {
7624		free(inodedep->id_savedino1, M_SAVEDINO);
7625		inodedep->id_savedino1 = NULL;
7626	}
7627	if (free_inodedep(inodedep) == 0)
7628		panic("check_inode_unwritten: busy inode");
7629	return (1);
7630}
7631
7632/*
7633 * Try to free an inodedep structure. Return 1 if it could be freed.
7634 */
7635static int
7636free_inodedep(inodedep)
7637	struct inodedep *inodedep;
7638{
7639
7640	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7641	if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7642	    (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7643	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7644	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7645	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7646	    !LIST_EMPTY(&inodedep->id_inowait) ||
7647	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7648	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7649	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7650	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7651	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7652	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7653	    inodedep->id_mkdiradd != NULL ||
7654	    inodedep->id_nlinkdelta != 0 ||
7655	    inodedep->id_savedino1 != NULL)
7656		return (0);
7657	if (inodedep->id_state & ONDEPLIST)
7658		LIST_REMOVE(inodedep, id_deps);
7659	LIST_REMOVE(inodedep, id_hash);
7660	WORKITEM_FREE(inodedep, D_INODEDEP);
7661	return (1);
7662}
7663
7664/*
7665 * Free the block referenced by a freework structure.  The parent freeblks
7666 * structure is released and completed when the final cg bitmap reaches
7667 * the disk.  This routine may be freeing a jnewblk which never made it to
7668 * disk in which case we do not have to wait as the operation is undone
7669 * in memory immediately.
7670 */
7671static void
7672freework_freeblock(freework)
7673	struct freework *freework;
7674{
7675	struct freeblks *freeblks;
7676	struct jnewblk *jnewblk;
7677	struct ufsmount *ump;
7678	struct workhead wkhd;
7679	struct fs *fs;
7680	int bsize;
7681	int needj;
7682
7683	ump = VFSTOUFS(freework->fw_list.wk_mp);
7684	LOCK_OWNED(ump);
7685	/*
7686	 * Handle partial truncate separately.
7687	 */
7688	if (freework->fw_indir) {
7689		complete_trunc_indir(freework);
7690		return;
7691	}
7692	freeblks = freework->fw_freeblks;
7693	fs = ump->um_fs;
7694	needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7695	bsize = lfragtosize(fs, freework->fw_frags);
7696	LIST_INIT(&wkhd);
7697	/*
7698	 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7699	 * on the indirblk hashtable and prevents premature freeing.
7700	 */
7701	freework->fw_state |= DEPCOMPLETE;
7702	/*
7703	 * SUJ needs to wait for the segment referencing freed indirect
7704	 * blocks to expire so that we know the checker will not confuse
7705	 * a re-allocated indirect block with its old contents.
7706	 */
7707	if (needj && freework->fw_lbn <= -NDADDR)
7708		indirblk_insert(freework);
7709	/*
7710	 * If we are canceling an existing jnewblk pass it to the free
7711	 * routine, otherwise pass the freeblk which will ultimately
7712	 * release the freeblks.  If we're not journaling, we can just
7713	 * free the freeblks immediately.
7714	 */
7715	jnewblk = freework->fw_jnewblk;
7716	if (jnewblk != NULL) {
7717		cancel_jnewblk(jnewblk, &wkhd);
7718		needj = 0;
7719	} else if (needj) {
7720		freework->fw_state |= DELAYEDFREE;
7721		freeblks->fb_cgwait++;
7722		WORKLIST_INSERT(&wkhd, &freework->fw_list);
7723	}
7724	FREE_LOCK(ump);
7725	freeblks_free(ump, freeblks, btodb(bsize));
7726	CTR4(KTR_SUJ,
7727	    "freework_freeblock: ino %d blkno %jd lbn %jd size %ld",
7728	    freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
7729	ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
7730	    freeblks->fb_inum, freeblks->fb_vtype, &wkhd);
7731	ACQUIRE_LOCK(ump);
7732	/*
7733	 * The jnewblk will be discarded and the bits in the map never
7734	 * made it to disk.  We can immediately free the freeblk.
7735	 */
7736	if (needj == 0)
7737		handle_written_freework(freework);
7738}
7739
7740/*
7741 * We enqueue freework items that need processing back on the freeblks and
7742 * add the freeblks to the worklist.  This makes it easier to find all work
7743 * required to flush a truncation in process_truncates().
7744 */
7745static void
7746freework_enqueue(freework)
7747	struct freework *freework;
7748{
7749	struct freeblks *freeblks;
7750
7751	freeblks = freework->fw_freeblks;
7752	if ((freework->fw_state & INPROGRESS) == 0)
7753		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
7754	if ((freeblks->fb_state &
7755	    (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
7756	    LIST_EMPTY(&freeblks->fb_jblkdephd))
7757		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7758}
7759
7760/*
7761 * Start, continue, or finish the process of freeing an indirect block tree.
7762 * The free operation may be paused at any point with fw_off containing the
7763 * offset to restart from.  This enables us to implement some flow control
7764 * for large truncates which may fan out and generate a huge number of
7765 * dependencies.
7766 */
7767static void
7768handle_workitem_indirblk(freework)
7769	struct freework *freework;
7770{
7771	struct freeblks *freeblks;
7772	struct ufsmount *ump;
7773	struct fs *fs;
7774
7775	freeblks = freework->fw_freeblks;
7776	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7777	fs = ump->um_fs;
7778	if (freework->fw_state & DEPCOMPLETE) {
7779		handle_written_freework(freework);
7780		return;
7781	}
7782	if (freework->fw_off == NINDIR(fs)) {
7783		freework_freeblock(freework);
7784		return;
7785	}
7786	freework->fw_state |= INPROGRESS;
7787	FREE_LOCK(ump);
7788	indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
7789	    freework->fw_lbn);
7790	ACQUIRE_LOCK(ump);
7791}
7792
7793/*
7794 * Called when a freework structure attached to a cg buf is written.  The
7795 * ref on either the parent or the freeblks structure is released and
7796 * the freeblks is added back to the worklist if there is more work to do.
7797 */
7798static void
7799handle_written_freework(freework)
7800	struct freework *freework;
7801{
7802	struct freeblks *freeblks;
7803	struct freework *parent;
7804
7805	freeblks = freework->fw_freeblks;
7806	parent = freework->fw_parent;
7807	if (freework->fw_state & DELAYEDFREE)
7808		freeblks->fb_cgwait--;
7809	freework->fw_state |= COMPLETE;
7810	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
7811		WORKITEM_FREE(freework, D_FREEWORK);
7812	if (parent) {
7813		if (--parent->fw_ref == 0)
7814			freework_enqueue(parent);
7815		return;
7816	}
7817	if (--freeblks->fb_ref != 0)
7818		return;
7819	if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
7820	    ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
7821		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7822}
7823
7824/*
7825 * This workitem routine performs the block de-allocation.
7826 * The workitem is added to the pending list after the updated
7827 * inode block has been written to disk.  As mentioned above,
7828 * checks regarding the number of blocks de-allocated (compared
7829 * to the number of blocks allocated for the file) are also
7830 * performed in this function.
7831 */
7832static int
7833handle_workitem_freeblocks(freeblks, flags)
7834	struct freeblks *freeblks;
7835	int flags;
7836{
7837	struct freework *freework;
7838	struct newblk *newblk;
7839	struct allocindir *aip;
7840	struct ufsmount *ump;
7841	struct worklist *wk;
7842
7843	KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
7844	    ("handle_workitem_freeblocks: Journal entries not written."));
7845	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7846	ACQUIRE_LOCK(ump);
7847	while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
7848		WORKLIST_REMOVE(wk);
7849		switch (wk->wk_type) {
7850		case D_DIRREM:
7851			wk->wk_state |= COMPLETE;
7852			add_to_worklist(wk, 0);
7853			continue;
7854
7855		case D_ALLOCDIRECT:
7856			free_newblk(WK_NEWBLK(wk));
7857			continue;
7858
7859		case D_ALLOCINDIR:
7860			aip = WK_ALLOCINDIR(wk);
7861			freework = NULL;
7862			if (aip->ai_state & DELAYEDFREE) {
7863				FREE_LOCK(ump);
7864				freework = newfreework(ump, freeblks, NULL,
7865				    aip->ai_lbn, aip->ai_newblkno,
7866				    ump->um_fs->fs_frag, 0, 0);
7867				ACQUIRE_LOCK(ump);
7868			}
7869			newblk = WK_NEWBLK(wk);
7870			if (newblk->nb_jnewblk) {
7871				freework->fw_jnewblk = newblk->nb_jnewblk;
7872				newblk->nb_jnewblk->jn_dep = &freework->fw_list;
7873				newblk->nb_jnewblk = NULL;
7874			}
7875			free_newblk(newblk);
7876			continue;
7877
7878		case D_FREEWORK:
7879			freework = WK_FREEWORK(wk);
7880			if (freework->fw_lbn <= -NDADDR)
7881				handle_workitem_indirblk(freework);
7882			else
7883				freework_freeblock(freework);
7884			continue;
7885		default:
7886			panic("handle_workitem_freeblocks: Unknown type %s",
7887			    TYPENAME(wk->wk_type));
7888		}
7889	}
7890	if (freeblks->fb_ref != 0) {
7891		freeblks->fb_state &= ~INPROGRESS;
7892		wake_worklist(&freeblks->fb_list);
7893		freeblks = NULL;
7894	}
7895	FREE_LOCK(ump);
7896	if (freeblks)
7897		return handle_complete_freeblocks(freeblks, flags);
7898	return (0);
7899}
7900
7901/*
7902 * Handle completion of block free via truncate.  This allows fs_pending
7903 * to track the actual free block count more closely than if we only updated
7904 * it at the end.  We must be careful to handle cases where the block count
7905 * on free was incorrect.
7906 */
7907static void
7908freeblks_free(ump, freeblks, blocks)
7909	struct ufsmount *ump;
7910	struct freeblks *freeblks;
7911	int blocks;
7912{
7913	struct fs *fs;
7914	ufs2_daddr_t remain;
7915
7916	UFS_LOCK(ump);
7917	remain = -freeblks->fb_chkcnt;
7918	freeblks->fb_chkcnt += blocks;
7919	if (remain > 0) {
7920		if (remain < blocks)
7921			blocks = remain;
7922		fs = ump->um_fs;
7923		fs->fs_pendingblocks -= blocks;
7924	}
7925	UFS_UNLOCK(ump);
7926}
7927
7928/*
7929 * Once all of the freework workitems are complete we can retire the
7930 * freeblocks dependency and any journal work awaiting completion.  This
7931 * can not be called until all other dependencies are stable on disk.
7932 */
7933static int
7934handle_complete_freeblocks(freeblks, flags)
7935	struct freeblks *freeblks;
7936	int flags;
7937{
7938	struct inodedep *inodedep;
7939	struct inode *ip;
7940	struct vnode *vp;
7941	struct fs *fs;
7942	struct ufsmount *ump;
7943	ufs2_daddr_t spare;
7944
7945	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7946	fs = ump->um_fs;
7947	flags = LK_EXCLUSIVE | flags;
7948	spare = freeblks->fb_chkcnt;
7949
7950	/*
7951	 * If we did not release the expected number of blocks we may have
7952	 * to adjust the inode block count here.  Only do so if it wasn't
7953	 * a truncation to zero and the modrev still matches.
7954	 */
7955	if (spare && freeblks->fb_len != 0) {
7956		if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7957		    flags, &vp, FFSV_FORCEINSMQ) != 0)
7958			return (EBUSY);
7959		ip = VTOI(vp);
7960		if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
7961			DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
7962			ip->i_flag |= IN_CHANGE;
7963			/*
7964			 * We must wait so this happens before the
7965			 * journal is reclaimed.
7966			 */
7967			ffs_update(vp, 1);
7968		}
7969		vput(vp);
7970	}
7971	if (spare < 0) {
7972		UFS_LOCK(ump);
7973		fs->fs_pendingblocks += spare;
7974		UFS_UNLOCK(ump);
7975	}
7976#ifdef QUOTA
7977	/* Handle spare. */
7978	if (spare)
7979		quotaadj(freeblks->fb_quota, ump, -spare);
7980	quotarele(freeblks->fb_quota);
7981#endif
7982	ACQUIRE_LOCK(ump);
7983	if (freeblks->fb_state & ONDEPLIST) {
7984		inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7985		    0, &inodedep);
7986		TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
7987		freeblks->fb_state &= ~ONDEPLIST;
7988		if (TAILQ_EMPTY(&inodedep->id_freeblklst))
7989			free_inodedep(inodedep);
7990	}
7991	/*
7992	 * All of the freeblock deps must be complete prior to this call
7993	 * so it's now safe to complete earlier outstanding journal entries.
7994	 */
7995	handle_jwork(&freeblks->fb_jwork);
7996	WORKITEM_FREE(freeblks, D_FREEBLKS);
7997	FREE_LOCK(ump);
7998	return (0);
7999}
8000
8001/*
8002 * Release blocks associated with the freeblks and stored in the indirect
8003 * block dbn. If level is greater than SINGLE, the block is an indirect block
8004 * and recursive calls to indirtrunc must be used to cleanse other indirect
8005 * blocks.
8006 *
8007 * This handles partial and complete truncation of blocks.  Partial is noted
8008 * with goingaway == 0.  In this case the freework is completed after the
8009 * zero'd indirects are written to disk.  For full truncation the freework
8010 * is completed after the block is freed.
8011 */
8012static void
8013indir_trunc(freework, dbn, lbn)
8014	struct freework *freework;
8015	ufs2_daddr_t dbn;
8016	ufs_lbn_t lbn;
8017{
8018	struct freework *nfreework;
8019	struct workhead wkhd;
8020	struct freeblks *freeblks;
8021	struct buf *bp;
8022	struct fs *fs;
8023	struct indirdep *indirdep;
8024	struct ufsmount *ump;
8025	ufs1_daddr_t *bap1 = 0;
8026	ufs2_daddr_t nb, nnb, *bap2 = 0;
8027	ufs_lbn_t lbnadd, nlbn;
8028	int i, nblocks, ufs1fmt;
8029	int freedblocks;
8030	int goingaway;
8031	int freedeps;
8032	int needj;
8033	int level;
8034	int cnt;
8035
8036	freeblks = freework->fw_freeblks;
8037	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8038	fs = ump->um_fs;
8039	/*
8040	 * Get buffer of block pointers to be freed.  There are three cases:
8041	 *
8042	 * 1) Partial truncate caches the indirdep pointer in the freework
8043	 *    which provides us a back copy to the save bp which holds the
8044	 *    pointers we want to clear.  When this completes the zero
8045	 *    pointers are written to the real copy.
8046	 * 2) The indirect is being completely truncated, cancel_indirdep()
8047	 *    eliminated the real copy and placed the indirdep on the saved
8048	 *    copy.  The indirdep and buf are discarded when this completes.
8049	 * 3) The indirect was not in memory, we read a copy off of the disk
8050	 *    using the devvp and drop and invalidate the buffer when we're
8051	 *    done.
8052	 */
8053	goingaway = 1;
8054	indirdep = NULL;
8055	if (freework->fw_indir != NULL) {
8056		goingaway = 0;
8057		indirdep = freework->fw_indir;
8058		bp = indirdep->ir_savebp;
8059		if (bp == NULL || bp->b_blkno != dbn)
8060			panic("indir_trunc: Bad saved buf %p blkno %jd",
8061			    bp, (intmax_t)dbn);
8062	} else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
8063		/*
8064		 * The lock prevents the buf dep list from changing and
8065	 	 * indirects on devvp should only ever have one dependency.
8066		 */
8067		indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
8068		if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
8069			panic("indir_trunc: Bad indirdep %p from buf %p",
8070			    indirdep, bp);
8071	} else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize,
8072	    NOCRED, &bp) != 0) {
8073		brelse(bp);
8074		return;
8075	}
8076	ACQUIRE_LOCK(ump);
8077	/* Protects against a race with complete_trunc_indir(). */
8078	freework->fw_state &= ~INPROGRESS;
8079	/*
8080	 * If we have an indirdep we need to enforce the truncation order
8081	 * and discard it when it is complete.
8082	 */
8083	if (indirdep) {
8084		if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
8085		    !TAILQ_EMPTY(&indirdep->ir_trunc)) {
8086			/*
8087			 * Add the complete truncate to the list on the
8088			 * indirdep to enforce in-order processing.
8089			 */
8090			if (freework->fw_indir == NULL)
8091				TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
8092				    freework, fw_next);
8093			FREE_LOCK(ump);
8094			return;
8095		}
8096		/*
8097		 * If we're goingaway, free the indirdep.  Otherwise it will
8098		 * linger until the write completes.
8099		 */
8100		if (goingaway)
8101			free_indirdep(indirdep);
8102	}
8103	FREE_LOCK(ump);
8104	/* Initialize pointers depending on block size. */
8105	if (ump->um_fstype == UFS1) {
8106		bap1 = (ufs1_daddr_t *)bp->b_data;
8107		nb = bap1[freework->fw_off];
8108		ufs1fmt = 1;
8109	} else {
8110		bap2 = (ufs2_daddr_t *)bp->b_data;
8111		nb = bap2[freework->fw_off];
8112		ufs1fmt = 0;
8113	}
8114	level = lbn_level(lbn);
8115	needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
8116	lbnadd = lbn_offset(fs, level);
8117	nblocks = btodb(fs->fs_bsize);
8118	nfreework = freework;
8119	freedeps = 0;
8120	cnt = 0;
8121	/*
8122	 * Reclaim blocks.  Traverses into nested indirect levels and
8123	 * arranges for the current level to be freed when subordinates
8124	 * are free when journaling.
8125	 */
8126	for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
8127		if (i != NINDIR(fs) - 1) {
8128			if (ufs1fmt)
8129				nnb = bap1[i+1];
8130			else
8131				nnb = bap2[i+1];
8132		} else
8133			nnb = 0;
8134		if (nb == 0)
8135			continue;
8136		cnt++;
8137		if (level != 0) {
8138			nlbn = (lbn + 1) - (i * lbnadd);
8139			if (needj != 0) {
8140				nfreework = newfreework(ump, freeblks, freework,
8141				    nlbn, nb, fs->fs_frag, 0, 0);
8142				freedeps++;
8143			}
8144			indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
8145		} else {
8146			struct freedep *freedep;
8147
8148			/*
8149			 * Attempt to aggregate freedep dependencies for
8150			 * all blocks being released to the same CG.
8151			 */
8152			LIST_INIT(&wkhd);
8153			if (needj != 0 &&
8154			    (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
8155				freedep = newfreedep(freework);
8156				WORKLIST_INSERT_UNLOCKED(&wkhd,
8157				    &freedep->fd_list);
8158				freedeps++;
8159			}
8160			CTR3(KTR_SUJ,
8161			    "indir_trunc: ino %d blkno %jd size %ld",
8162			    freeblks->fb_inum, nb, fs->fs_bsize);
8163			ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
8164			    fs->fs_bsize, freeblks->fb_inum,
8165			    freeblks->fb_vtype, &wkhd);
8166		}
8167	}
8168	if (goingaway) {
8169		bp->b_flags |= B_INVAL | B_NOCACHE;
8170		brelse(bp);
8171	}
8172	freedblocks = 0;
8173	if (level == 0)
8174		freedblocks = (nblocks * cnt);
8175	if (needj == 0)
8176		freedblocks += nblocks;
8177	freeblks_free(ump, freeblks, freedblocks);
8178	/*
8179	 * If we are journaling set up the ref counts and offset so this
8180	 * indirect can be completed when its children are free.
8181	 */
8182	if (needj) {
8183		ACQUIRE_LOCK(ump);
8184		freework->fw_off = i;
8185		freework->fw_ref += freedeps;
8186		freework->fw_ref -= NINDIR(fs) + 1;
8187		if (level == 0)
8188			freeblks->fb_cgwait += freedeps;
8189		if (freework->fw_ref == 0)
8190			freework_freeblock(freework);
8191		FREE_LOCK(ump);
8192		return;
8193	}
8194	/*
8195	 * If we're not journaling we can free the indirect now.
8196	 */
8197	dbn = dbtofsb(fs, dbn);
8198	CTR3(KTR_SUJ,
8199	    "indir_trunc 2: ino %d blkno %jd size %ld",
8200	    freeblks->fb_inum, dbn, fs->fs_bsize);
8201	ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8202	    freeblks->fb_inum, freeblks->fb_vtype, NULL);
8203	/* Non SUJ softdep does single-threaded truncations. */
8204	if (freework->fw_blkno == dbn) {
8205		freework->fw_state |= ALLCOMPLETE;
8206		ACQUIRE_LOCK(ump);
8207		handle_written_freework(freework);
8208		FREE_LOCK(ump);
8209	}
8210	return;
8211}
8212
8213/*
8214 * Cancel an allocindir when it is removed via truncation.  When bp is not
8215 * NULL the indirect never appeared on disk and is scheduled to be freed
8216 * independently of the indir so we can more easily track journal work.
8217 */
8218static void
8219cancel_allocindir(aip, bp, freeblks, trunc)
8220	struct allocindir *aip;
8221	struct buf *bp;
8222	struct freeblks *freeblks;
8223	int trunc;
8224{
8225	struct indirdep *indirdep;
8226	struct freefrag *freefrag;
8227	struct newblk *newblk;
8228
8229	newblk = (struct newblk *)aip;
8230	LIST_REMOVE(aip, ai_next);
8231	/*
8232	 * We must eliminate the pointer in bp if it must be freed on its
8233	 * own due to partial truncate or pending journal work.
8234	 */
8235	if (bp && (trunc || newblk->nb_jnewblk)) {
8236		/*
8237		 * Clear the pointer and mark the aip to be freed
8238		 * directly if it never existed on disk.
8239		 */
8240		aip->ai_state |= DELAYEDFREE;
8241		indirdep = aip->ai_indirdep;
8242		if (indirdep->ir_state & UFS1FMT)
8243			((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8244		else
8245			((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8246	}
8247	/*
8248	 * When truncating the previous pointer will be freed via
8249	 * savedbp.  Eliminate the freefrag which would dup free.
8250	 */
8251	if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8252		newblk->nb_freefrag = NULL;
8253		if (freefrag->ff_jdep)
8254			cancel_jfreefrag(
8255			    WK_JFREEFRAG(freefrag->ff_jdep));
8256		jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8257		WORKITEM_FREE(freefrag, D_FREEFRAG);
8258	}
8259	/*
8260	 * If the journal hasn't been written the jnewblk must be passed
8261	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
8262	 * this by leaving the journal dependency on the newblk to be freed
8263	 * when a freework is created in handle_workitem_freeblocks().
8264	 */
8265	cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8266	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8267}
8268
8269/*
8270 * Create the mkdir dependencies for . and .. in a new directory.  Link them
8271 * in to a newdirblk so any subsequent additions are tracked properly.  The
8272 * caller is responsible for adding the mkdir1 dependency to the journal
8273 * and updating id_mkdiradd.  This function returns with the per-filesystem
8274 * lock held.
8275 */
8276static struct mkdir *
8277setup_newdir(dap, newinum, dinum, newdirbp, mkdirp)
8278	struct diradd *dap;
8279	ino_t newinum;
8280	ino_t dinum;
8281	struct buf *newdirbp;
8282	struct mkdir **mkdirp;
8283{
8284	struct newblk *newblk;
8285	struct pagedep *pagedep;
8286	struct inodedep *inodedep;
8287	struct newdirblk *newdirblk = 0;
8288	struct mkdir *mkdir1, *mkdir2;
8289	struct worklist *wk;
8290	struct jaddref *jaddref;
8291	struct ufsmount *ump;
8292	struct mount *mp;
8293
8294	mp = dap->da_list.wk_mp;
8295	ump = VFSTOUFS(mp);
8296	newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8297	    M_SOFTDEP_FLAGS);
8298	workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8299	LIST_INIT(&newdirblk->db_mkdir);
8300	mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8301	workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8302	mkdir1->md_state = ATTACHED | MKDIR_BODY;
8303	mkdir1->md_diradd = dap;
8304	mkdir1->md_jaddref = NULL;
8305	mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8306	workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8307	mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8308	mkdir2->md_diradd = dap;
8309	mkdir2->md_jaddref = NULL;
8310	if (MOUNTEDSUJ(mp) == 0) {
8311		mkdir1->md_state |= DEPCOMPLETE;
8312		mkdir2->md_state |= DEPCOMPLETE;
8313	}
8314	/*
8315	 * Dependency on "." and ".." being written to disk.
8316	 */
8317	mkdir1->md_buf = newdirbp;
8318	ACQUIRE_LOCK(VFSTOUFS(mp));
8319	LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs);
8320	/*
8321	 * We must link the pagedep, allocdirect, and newdirblk for
8322	 * the initial file page so the pointer to the new directory
8323	 * is not written until the directory contents are live and
8324	 * any subsequent additions are not marked live until the
8325	 * block is reachable via the inode.
8326	 */
8327	if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8328		panic("setup_newdir: lost pagedep");
8329	LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8330		if (wk->wk_type == D_ALLOCDIRECT)
8331			break;
8332	if (wk == NULL)
8333		panic("setup_newdir: lost allocdirect");
8334	if (pagedep->pd_state & NEWBLOCK)
8335		panic("setup_newdir: NEWBLOCK already set");
8336	newblk = WK_NEWBLK(wk);
8337	pagedep->pd_state |= NEWBLOCK;
8338	pagedep->pd_newdirblk = newdirblk;
8339	newdirblk->db_pagedep = pagedep;
8340	WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8341	WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8342	/*
8343	 * Look up the inodedep for the parent directory so that we
8344	 * can link mkdir2 into the pending dotdot jaddref or
8345	 * the inode write if there is none.  If the inode is
8346	 * ALLCOMPLETE and no jaddref is present all dependencies have
8347	 * been satisfied and mkdir2 can be freed.
8348	 */
8349	inodedep_lookup(mp, dinum, 0, &inodedep);
8350	if (MOUNTEDSUJ(mp)) {
8351		if (inodedep == NULL)
8352			panic("setup_newdir: Lost parent.");
8353		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8354		    inoreflst);
8355		KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8356		    (jaddref->ja_state & MKDIR_PARENT),
8357		    ("setup_newdir: bad dotdot jaddref %p", jaddref));
8358		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8359		mkdir2->md_jaddref = jaddref;
8360		jaddref->ja_mkdir = mkdir2;
8361	} else if (inodedep == NULL ||
8362	    (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8363		dap->da_state &= ~MKDIR_PARENT;
8364		WORKITEM_FREE(mkdir2, D_MKDIR);
8365		mkdir2 = NULL;
8366	} else {
8367		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8368		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8369	}
8370	*mkdirp = mkdir2;
8371
8372	return (mkdir1);
8373}
8374
8375/*
8376 * Directory entry addition dependencies.
8377 *
8378 * When adding a new directory entry, the inode (with its incremented link
8379 * count) must be written to disk before the directory entry's pointer to it.
8380 * Also, if the inode is newly allocated, the corresponding freemap must be
8381 * updated (on disk) before the directory entry's pointer. These requirements
8382 * are met via undo/redo on the directory entry's pointer, which consists
8383 * simply of the inode number.
8384 *
8385 * As directory entries are added and deleted, the free space within a
8386 * directory block can become fragmented.  The ufs filesystem will compact
8387 * a fragmented directory block to make space for a new entry. When this
8388 * occurs, the offsets of previously added entries change. Any "diradd"
8389 * dependency structures corresponding to these entries must be updated with
8390 * the new offsets.
8391 */
8392
8393/*
8394 * This routine is called after the in-memory inode's link
8395 * count has been incremented, but before the directory entry's
8396 * pointer to the inode has been set.
8397 */
8398int
8399softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
8400	struct buf *bp;		/* buffer containing directory block */
8401	struct inode *dp;	/* inode for directory */
8402	off_t diroffset;	/* offset of new entry in directory */
8403	ino_t newinum;		/* inode referenced by new directory entry */
8404	struct buf *newdirbp;	/* non-NULL => contents of new mkdir */
8405	int isnewblk;		/* entry is in a newly allocated block */
8406{
8407	int offset;		/* offset of new entry within directory block */
8408	ufs_lbn_t lbn;		/* block in directory containing new entry */
8409	struct fs *fs;
8410	struct diradd *dap;
8411	struct newblk *newblk;
8412	struct pagedep *pagedep;
8413	struct inodedep *inodedep;
8414	struct newdirblk *newdirblk = 0;
8415	struct mkdir *mkdir1, *mkdir2;
8416	struct jaddref *jaddref;
8417	struct ufsmount *ump;
8418	struct mount *mp;
8419	int isindir;
8420
8421	ump = dp->i_ump;
8422	mp = UFSTOVFS(ump);
8423	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8424	    ("softdep_setup_directory_add called on non-softdep filesystem"));
8425	/*
8426	 * Whiteouts have no dependencies.
8427	 */
8428	if (newinum == WINO) {
8429		if (newdirbp != NULL)
8430			bdwrite(newdirbp);
8431		return (0);
8432	}
8433	jaddref = NULL;
8434	mkdir1 = mkdir2 = NULL;
8435	fs = dp->i_fs;
8436	lbn = lblkno(fs, diroffset);
8437	offset = blkoff(fs, diroffset);
8438	dap = malloc(sizeof(struct diradd), M_DIRADD,
8439		M_SOFTDEP_FLAGS|M_ZERO);
8440	workitem_alloc(&dap->da_list, D_DIRADD, mp);
8441	dap->da_offset = offset;
8442	dap->da_newinum = newinum;
8443	dap->da_state = ATTACHED;
8444	LIST_INIT(&dap->da_jwork);
8445	isindir = bp->b_lblkno >= NDADDR;
8446	if (isnewblk &&
8447	    (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8448		newdirblk = malloc(sizeof(struct newdirblk),
8449		    M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8450		workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8451		LIST_INIT(&newdirblk->db_mkdir);
8452	}
8453	/*
8454	 * If we're creating a new directory setup the dependencies and set
8455	 * the dap state to wait for them.  Otherwise it's COMPLETE and
8456	 * we can move on.
8457	 */
8458	if (newdirbp == NULL) {
8459		dap->da_state |= DEPCOMPLETE;
8460		ACQUIRE_LOCK(ump);
8461	} else {
8462		dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8463		mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8464		    &mkdir2);
8465	}
8466	/*
8467	 * Link into parent directory pagedep to await its being written.
8468	 */
8469	pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8470#ifdef DEBUG
8471	if (diradd_lookup(pagedep, offset) != NULL)
8472		panic("softdep_setup_directory_add: %p already at off %d\n",
8473		    diradd_lookup(pagedep, offset), offset);
8474#endif
8475	dap->da_pagedep = pagedep;
8476	LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8477	    da_pdlist);
8478	inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
8479	/*
8480	 * If we're journaling, link the diradd into the jaddref so it
8481	 * may be completed after the journal entry is written.  Otherwise,
8482	 * link the diradd into its inodedep.  If the inode is not yet
8483	 * written place it on the bufwait list, otherwise do the post-inode
8484	 * write processing to put it on the id_pendinghd list.
8485	 */
8486	if (MOUNTEDSUJ(mp)) {
8487		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8488		    inoreflst);
8489		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8490		    ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8491		jaddref->ja_diroff = diroffset;
8492		jaddref->ja_diradd = dap;
8493		add_to_journal(&jaddref->ja_list);
8494	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8495		diradd_inode_written(dap, inodedep);
8496	else
8497		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8498	/*
8499	 * Add the journal entries for . and .. links now that the primary
8500	 * link is written.
8501	 */
8502	if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8503		jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8504		    inoreflst, if_deps);
8505		KASSERT(jaddref != NULL &&
8506		    jaddref->ja_ino == jaddref->ja_parent &&
8507		    (jaddref->ja_state & MKDIR_BODY),
8508		    ("softdep_setup_directory_add: bad dot jaddref %p",
8509		    jaddref));
8510		mkdir1->md_jaddref = jaddref;
8511		jaddref->ja_mkdir = mkdir1;
8512		/*
8513		 * It is important that the dotdot journal entry
8514		 * is added prior to the dot entry since dot writes
8515		 * both the dot and dotdot links.  These both must
8516		 * be added after the primary link for the journal
8517		 * to remain consistent.
8518		 */
8519		add_to_journal(&mkdir2->md_jaddref->ja_list);
8520		add_to_journal(&jaddref->ja_list);
8521	}
8522	/*
8523	 * If we are adding a new directory remember this diradd so that if
8524	 * we rename it we can keep the dot and dotdot dependencies.  If
8525	 * we are adding a new name for an inode that has a mkdiradd we
8526	 * must be in rename and we have to move the dot and dotdot
8527	 * dependencies to this new name.  The old name is being orphaned
8528	 * soon.
8529	 */
8530	if (mkdir1 != NULL) {
8531		if (inodedep->id_mkdiradd != NULL)
8532			panic("softdep_setup_directory_add: Existing mkdir");
8533		inodedep->id_mkdiradd = dap;
8534	} else if (inodedep->id_mkdiradd)
8535		merge_diradd(inodedep, dap);
8536	if (newdirblk) {
8537		/*
8538		 * There is nothing to do if we are already tracking
8539		 * this block.
8540		 */
8541		if ((pagedep->pd_state & NEWBLOCK) != 0) {
8542			WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8543			FREE_LOCK(ump);
8544			return (0);
8545		}
8546		if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8547		    == 0)
8548			panic("softdep_setup_directory_add: lost entry");
8549		WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8550		pagedep->pd_state |= NEWBLOCK;
8551		pagedep->pd_newdirblk = newdirblk;
8552		newdirblk->db_pagedep = pagedep;
8553		FREE_LOCK(ump);
8554		/*
8555		 * If we extended into an indirect signal direnter to sync.
8556		 */
8557		if (isindir)
8558			return (1);
8559		return (0);
8560	}
8561	FREE_LOCK(ump);
8562	return (0);
8563}
8564
8565/*
8566 * This procedure is called to change the offset of a directory
8567 * entry when compacting a directory block which must be owned
8568 * exclusively by the caller. Note that the actual entry movement
8569 * must be done in this procedure to ensure that no I/O completions
8570 * occur while the move is in progress.
8571 */
8572void
8573softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
8574	struct buf *bp;		/* Buffer holding directory block. */
8575	struct inode *dp;	/* inode for directory */
8576	caddr_t base;		/* address of dp->i_offset */
8577	caddr_t oldloc;		/* address of old directory location */
8578	caddr_t newloc;		/* address of new directory location */
8579	int entrysize;		/* size of directory entry */
8580{
8581	int offset, oldoffset, newoffset;
8582	struct pagedep *pagedep;
8583	struct jmvref *jmvref;
8584	struct diradd *dap;
8585	struct direct *de;
8586	struct mount *mp;
8587	ufs_lbn_t lbn;
8588	int flags;
8589
8590	mp = UFSTOVFS(dp->i_ump);
8591	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8592	    ("softdep_change_directoryentry_offset called on "
8593	     "non-softdep filesystem"));
8594	de = (struct direct *)oldloc;
8595	jmvref = NULL;
8596	flags = 0;
8597	/*
8598	 * Moves are always journaled as it would be too complex to
8599	 * determine if any affected adds or removes are present in the
8600	 * journal.
8601	 */
8602	if (MOUNTEDSUJ(mp)) {
8603		flags = DEPALLOC;
8604		jmvref = newjmvref(dp, de->d_ino,
8605		    dp->i_offset + (oldloc - base),
8606		    dp->i_offset + (newloc - base));
8607	}
8608	lbn = lblkno(dp->i_fs, dp->i_offset);
8609	offset = blkoff(dp->i_fs, dp->i_offset);
8610	oldoffset = offset + (oldloc - base);
8611	newoffset = offset + (newloc - base);
8612	ACQUIRE_LOCK(dp->i_ump);
8613	if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8614		goto done;
8615	dap = diradd_lookup(pagedep, oldoffset);
8616	if (dap) {
8617		dap->da_offset = newoffset;
8618		newoffset = DIRADDHASH(newoffset);
8619		oldoffset = DIRADDHASH(oldoffset);
8620		if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8621		    newoffset != oldoffset) {
8622			LIST_REMOVE(dap, da_pdlist);
8623			LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8624			    dap, da_pdlist);
8625		}
8626	}
8627done:
8628	if (jmvref) {
8629		jmvref->jm_pagedep = pagedep;
8630		LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8631		add_to_journal(&jmvref->jm_list);
8632	}
8633	bcopy(oldloc, newloc, entrysize);
8634	FREE_LOCK(dp->i_ump);
8635}
8636
8637/*
8638 * Move the mkdir dependencies and journal work from one diradd to another
8639 * when renaming a directory.  The new name must depend on the mkdir deps
8640 * completing as the old name did.  Directories can only have one valid link
8641 * at a time so one must be canonical.
8642 */
8643static void
8644merge_diradd(inodedep, newdap)
8645	struct inodedep *inodedep;
8646	struct diradd *newdap;
8647{
8648	struct diradd *olddap;
8649	struct mkdir *mkdir, *nextmd;
8650	struct ufsmount *ump;
8651	short state;
8652
8653	olddap = inodedep->id_mkdiradd;
8654	inodedep->id_mkdiradd = newdap;
8655	if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8656		newdap->da_state &= ~DEPCOMPLETE;
8657		ump = VFSTOUFS(inodedep->id_list.wk_mp);
8658		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8659		     mkdir = nextmd) {
8660			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8661			if (mkdir->md_diradd != olddap)
8662				continue;
8663			mkdir->md_diradd = newdap;
8664			state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8665			newdap->da_state |= state;
8666			olddap->da_state &= ~state;
8667			if ((olddap->da_state &
8668			    (MKDIR_PARENT | MKDIR_BODY)) == 0)
8669				break;
8670		}
8671		if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8672			panic("merge_diradd: unfound ref");
8673	}
8674	/*
8675	 * Any mkdir related journal items are not safe to be freed until
8676	 * the new name is stable.
8677	 */
8678	jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8679	olddap->da_state |= DEPCOMPLETE;
8680	complete_diradd(olddap);
8681}
8682
8683/*
8684 * Move the diradd to the pending list when all diradd dependencies are
8685 * complete.
8686 */
8687static void
8688complete_diradd(dap)
8689	struct diradd *dap;
8690{
8691	struct pagedep *pagedep;
8692
8693	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8694		if (dap->da_state & DIRCHG)
8695			pagedep = dap->da_previous->dm_pagedep;
8696		else
8697			pagedep = dap->da_pagedep;
8698		LIST_REMOVE(dap, da_pdlist);
8699		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8700	}
8701}
8702
8703/*
8704 * Cancel a diradd when a dirrem overlaps with it.  We must cancel the journal
8705 * add entries and conditonally journal the remove.
8706 */
8707static void
8708cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref)
8709	struct diradd *dap;
8710	struct dirrem *dirrem;
8711	struct jremref *jremref;
8712	struct jremref *dotremref;
8713	struct jremref *dotdotremref;
8714{
8715	struct inodedep *inodedep;
8716	struct jaddref *jaddref;
8717	struct inoref *inoref;
8718	struct ufsmount *ump;
8719	struct mkdir *mkdir;
8720
8721	/*
8722	 * If no remove references were allocated we're on a non-journaled
8723	 * filesystem and can skip the cancel step.
8724	 */
8725	if (jremref == NULL) {
8726		free_diradd(dap, NULL);
8727		return;
8728	}
8729	/*
8730	 * Cancel the primary name an free it if it does not require
8731	 * journaling.
8732	 */
8733	if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
8734	    0, &inodedep) != 0) {
8735		/* Abort the addref that reference this diradd.  */
8736		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
8737			if (inoref->if_list.wk_type != D_JADDREF)
8738				continue;
8739			jaddref = (struct jaddref *)inoref;
8740			if (jaddref->ja_diradd != dap)
8741				continue;
8742			if (cancel_jaddref(jaddref, inodedep,
8743			    &dirrem->dm_jwork) == 0) {
8744				free_jremref(jremref);
8745				jremref = NULL;
8746			}
8747			break;
8748		}
8749	}
8750	/*
8751	 * Cancel subordinate names and free them if they do not require
8752	 * journaling.
8753	 */
8754	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8755		ump = VFSTOUFS(dap->da_list.wk_mp);
8756		LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) {
8757			if (mkdir->md_diradd != dap)
8758				continue;
8759			if ((jaddref = mkdir->md_jaddref) == NULL)
8760				continue;
8761			mkdir->md_jaddref = NULL;
8762			if (mkdir->md_state & MKDIR_PARENT) {
8763				if (cancel_jaddref(jaddref, NULL,
8764				    &dirrem->dm_jwork) == 0) {
8765					free_jremref(dotdotremref);
8766					dotdotremref = NULL;
8767				}
8768			} else {
8769				if (cancel_jaddref(jaddref, inodedep,
8770				    &dirrem->dm_jwork) == 0) {
8771					free_jremref(dotremref);
8772					dotremref = NULL;
8773				}
8774			}
8775		}
8776	}
8777
8778	if (jremref)
8779		journal_jremref(dirrem, jremref, inodedep);
8780	if (dotremref)
8781		journal_jremref(dirrem, dotremref, inodedep);
8782	if (dotdotremref)
8783		journal_jremref(dirrem, dotdotremref, NULL);
8784	jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
8785	free_diradd(dap, &dirrem->dm_jwork);
8786}
8787
8788/*
8789 * Free a diradd dependency structure. This routine must be called
8790 * with splbio interrupts blocked.
8791 */
8792static void
8793free_diradd(dap, wkhd)
8794	struct diradd *dap;
8795	struct workhead *wkhd;
8796{
8797	struct dirrem *dirrem;
8798	struct pagedep *pagedep;
8799	struct inodedep *inodedep;
8800	struct mkdir *mkdir, *nextmd;
8801	struct ufsmount *ump;
8802
8803	ump = VFSTOUFS(dap->da_list.wk_mp);
8804	LOCK_OWNED(ump);
8805	LIST_REMOVE(dap, da_pdlist);
8806	if (dap->da_state & ONWORKLIST)
8807		WORKLIST_REMOVE(&dap->da_list);
8808	if ((dap->da_state & DIRCHG) == 0) {
8809		pagedep = dap->da_pagedep;
8810	} else {
8811		dirrem = dap->da_previous;
8812		pagedep = dirrem->dm_pagedep;
8813		dirrem->dm_dirinum = pagedep->pd_ino;
8814		dirrem->dm_state |= COMPLETE;
8815		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
8816			add_to_worklist(&dirrem->dm_list, 0);
8817	}
8818	if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
8819	    0, &inodedep) != 0)
8820		if (inodedep->id_mkdiradd == dap)
8821			inodedep->id_mkdiradd = NULL;
8822	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8823		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8824		     mkdir = nextmd) {
8825			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8826			if (mkdir->md_diradd != dap)
8827				continue;
8828			dap->da_state &=
8829			    ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
8830			LIST_REMOVE(mkdir, md_mkdirs);
8831			if (mkdir->md_state & ONWORKLIST)
8832				WORKLIST_REMOVE(&mkdir->md_list);
8833			if (mkdir->md_jaddref != NULL)
8834				panic("free_diradd: Unexpected jaddref");
8835			WORKITEM_FREE(mkdir, D_MKDIR);
8836			if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
8837				break;
8838		}
8839		if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8840			panic("free_diradd: unfound ref");
8841	}
8842	if (inodedep)
8843		free_inodedep(inodedep);
8844	/*
8845	 * Free any journal segments waiting for the directory write.
8846	 */
8847	handle_jwork(&dap->da_jwork);
8848	WORKITEM_FREE(dap, D_DIRADD);
8849}
8850
8851/*
8852 * Directory entry removal dependencies.
8853 *
8854 * When removing a directory entry, the entry's inode pointer must be
8855 * zero'ed on disk before the corresponding inode's link count is decremented
8856 * (possibly freeing the inode for re-use). This dependency is handled by
8857 * updating the directory entry but delaying the inode count reduction until
8858 * after the directory block has been written to disk. After this point, the
8859 * inode count can be decremented whenever it is convenient.
8860 */
8861
8862/*
8863 * This routine should be called immediately after removing
8864 * a directory entry.  The inode's link count should not be
8865 * decremented by the calling procedure -- the soft updates
8866 * code will do this task when it is safe.
8867 */
8868void
8869softdep_setup_remove(bp, dp, ip, isrmdir)
8870	struct buf *bp;		/* buffer containing directory block */
8871	struct inode *dp;	/* inode for the directory being modified */
8872	struct inode *ip;	/* inode for directory entry being removed */
8873	int isrmdir;		/* indicates if doing RMDIR */
8874{
8875	struct dirrem *dirrem, *prevdirrem;
8876	struct inodedep *inodedep;
8877	int direct;
8878
8879	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
8880	    ("softdep_setup_remove called on non-softdep filesystem"));
8881	/*
8882	 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.  We want
8883	 * newdirrem() to setup the full directory remove which requires
8884	 * isrmdir > 1.
8885	 */
8886	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
8887	/*
8888	 * Add the dirrem to the inodedep's pending remove list for quick
8889	 * discovery later.
8890	 */
8891	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8892	    &inodedep) == 0)
8893		panic("softdep_setup_remove: Lost inodedep.");
8894	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
8895	dirrem->dm_state |= ONDEPLIST;
8896	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
8897
8898	/*
8899	 * If the COMPLETE flag is clear, then there were no active
8900	 * entries and we want to roll back to a zeroed entry until
8901	 * the new inode is committed to disk. If the COMPLETE flag is
8902	 * set then we have deleted an entry that never made it to
8903	 * disk. If the entry we deleted resulted from a name change,
8904	 * then the old name still resides on disk. We cannot delete
8905	 * its inode (returned to us in prevdirrem) until the zeroed
8906	 * directory entry gets to disk. The new inode has never been
8907	 * referenced on the disk, so can be deleted immediately.
8908	 */
8909	if ((dirrem->dm_state & COMPLETE) == 0) {
8910		LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
8911		    dm_next);
8912		FREE_LOCK(ip->i_ump);
8913	} else {
8914		if (prevdirrem != NULL)
8915			LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
8916			    prevdirrem, dm_next);
8917		dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
8918		direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
8919		FREE_LOCK(ip->i_ump);
8920		if (direct)
8921			handle_workitem_remove(dirrem, 0);
8922	}
8923}
8924
8925/*
8926 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
8927 * pd_pendinghd list of a pagedep.
8928 */
8929static struct diradd *
8930diradd_lookup(pagedep, offset)
8931	struct pagedep *pagedep;
8932	int offset;
8933{
8934	struct diradd *dap;
8935
8936	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
8937		if (dap->da_offset == offset)
8938			return (dap);
8939	LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
8940		if (dap->da_offset == offset)
8941			return (dap);
8942	return (NULL);
8943}
8944
8945/*
8946 * Search for a .. diradd dependency in a directory that is being removed.
8947 * If the directory was renamed to a new parent we have a diradd rather
8948 * than a mkdir for the .. entry.  We need to cancel it now before
8949 * it is found in truncate().
8950 */
8951static struct jremref *
8952cancel_diradd_dotdot(ip, dirrem, jremref)
8953	struct inode *ip;
8954	struct dirrem *dirrem;
8955	struct jremref *jremref;
8956{
8957	struct pagedep *pagedep;
8958	struct diradd *dap;
8959	struct worklist *wk;
8960
8961	if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0,
8962	    &pagedep) == 0)
8963		return (jremref);
8964	dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
8965	if (dap == NULL)
8966		return (jremref);
8967	cancel_diradd(dap, dirrem, jremref, NULL, NULL);
8968	/*
8969	 * Mark any journal work as belonging to the parent so it is freed
8970	 * with the .. reference.
8971	 */
8972	LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
8973		wk->wk_state |= MKDIR_PARENT;
8974	return (NULL);
8975}
8976
8977/*
8978 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
8979 * replace it with a dirrem/diradd pair as a result of re-parenting a
8980 * directory.  This ensures that we don't simultaneously have a mkdir and
8981 * a diradd for the same .. entry.
8982 */
8983static struct jremref *
8984cancel_mkdir_dotdot(ip, dirrem, jremref)
8985	struct inode *ip;
8986	struct dirrem *dirrem;
8987	struct jremref *jremref;
8988{
8989	struct inodedep *inodedep;
8990	struct jaddref *jaddref;
8991	struct ufsmount *ump;
8992	struct mkdir *mkdir;
8993	struct diradd *dap;
8994
8995	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8996	    &inodedep) == 0)
8997		return (jremref);
8998	dap = inodedep->id_mkdiradd;
8999	if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
9000		return (jremref);
9001	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9002	for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
9003	    mkdir = LIST_NEXT(mkdir, md_mkdirs))
9004		if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
9005			break;
9006	if (mkdir == NULL)
9007		panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
9008	if ((jaddref = mkdir->md_jaddref) != NULL) {
9009		mkdir->md_jaddref = NULL;
9010		jaddref->ja_state &= ~MKDIR_PARENT;
9011		if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0,
9012		    &inodedep) == 0)
9013			panic("cancel_mkdir_dotdot: Lost parent inodedep");
9014		if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
9015			journal_jremref(dirrem, jremref, inodedep);
9016			jremref = NULL;
9017		}
9018	}
9019	if (mkdir->md_state & ONWORKLIST)
9020		WORKLIST_REMOVE(&mkdir->md_list);
9021	mkdir->md_state |= ALLCOMPLETE;
9022	complete_mkdir(mkdir);
9023	return (jremref);
9024}
9025
9026static void
9027journal_jremref(dirrem, jremref, inodedep)
9028	struct dirrem *dirrem;
9029	struct jremref *jremref;
9030	struct inodedep *inodedep;
9031{
9032
9033	if (inodedep == NULL)
9034		if (inodedep_lookup(jremref->jr_list.wk_mp,
9035		    jremref->jr_ref.if_ino, 0, &inodedep) == 0)
9036			panic("journal_jremref: Lost inodedep");
9037	LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
9038	TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
9039	add_to_journal(&jremref->jr_list);
9040}
9041
9042static void
9043dirrem_journal(dirrem, jremref, dotremref, dotdotremref)
9044	struct dirrem *dirrem;
9045	struct jremref *jremref;
9046	struct jremref *dotremref;
9047	struct jremref *dotdotremref;
9048{
9049	struct inodedep *inodedep;
9050
9051
9052	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
9053	    &inodedep) == 0)
9054		panic("dirrem_journal: Lost inodedep");
9055	journal_jremref(dirrem, jremref, inodedep);
9056	if (dotremref)
9057		journal_jremref(dirrem, dotremref, inodedep);
9058	if (dotdotremref)
9059		journal_jremref(dirrem, dotdotremref, NULL);
9060}
9061
9062/*
9063 * Allocate a new dirrem if appropriate and return it along with
9064 * its associated pagedep. Called without a lock, returns with lock.
9065 */
9066static struct dirrem *
9067newdirrem(bp, dp, ip, isrmdir, prevdirremp)
9068	struct buf *bp;		/* buffer containing directory block */
9069	struct inode *dp;	/* inode for the directory being modified */
9070	struct inode *ip;	/* inode for directory entry being removed */
9071	int isrmdir;		/* indicates if doing RMDIR */
9072	struct dirrem **prevdirremp; /* previously referenced inode, if any */
9073{
9074	int offset;
9075	ufs_lbn_t lbn;
9076	struct diradd *dap;
9077	struct dirrem *dirrem;
9078	struct pagedep *pagedep;
9079	struct jremref *jremref;
9080	struct jremref *dotremref;
9081	struct jremref *dotdotremref;
9082	struct vnode *dvp;
9083
9084	/*
9085	 * Whiteouts have no deletion dependencies.
9086	 */
9087	if (ip == NULL)
9088		panic("newdirrem: whiteout");
9089	dvp = ITOV(dp);
9090	/*
9091	 * If the system is over its limit and our filesystem is
9092	 * responsible for more than our share of that usage and
9093	 * we are not a snapshot, request some inodedep cleanup.
9094	 * Limiting the number of dirrem structures will also limit
9095	 * the number of freefile and freeblks structures.
9096	 */
9097	ACQUIRE_LOCK(ip->i_ump);
9098	while (!IS_SNAPSHOT(ip) && dep_current[D_DIRREM] > max_softdeps / 2 &&
9099	    ip->i_ump->softdep_curdeps[D_DIRREM] >
9100	    (max_softdeps / 2) / stat_flush_threads)
9101		(void) request_cleanup(ITOV(dp)->v_mount, FLUSH_BLOCKS);
9102	FREE_LOCK(ip->i_ump);
9103	dirrem = malloc(sizeof(struct dirrem),
9104		M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO);
9105	workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
9106	LIST_INIT(&dirrem->dm_jremrefhd);
9107	LIST_INIT(&dirrem->dm_jwork);
9108	dirrem->dm_state = isrmdir ? RMDIR : 0;
9109	dirrem->dm_oldinum = ip->i_number;
9110	*prevdirremp = NULL;
9111	/*
9112	 * Allocate remove reference structures to track journal write
9113	 * dependencies.  We will always have one for the link and
9114	 * when doing directories we will always have one more for dot.
9115	 * When renaming a directory we skip the dotdot link change so
9116	 * this is not needed.
9117	 */
9118	jremref = dotremref = dotdotremref = NULL;
9119	if (DOINGSUJ(dvp)) {
9120		if (isrmdir) {
9121			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
9122			    ip->i_effnlink + 2);
9123			dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
9124			    ip->i_effnlink + 1);
9125			dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
9126			    dp->i_effnlink + 1);
9127			dotdotremref->jr_state |= MKDIR_PARENT;
9128		} else
9129			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
9130			    ip->i_effnlink + 1);
9131	}
9132	ACQUIRE_LOCK(ip->i_ump);
9133	lbn = lblkno(dp->i_fs, dp->i_offset);
9134	offset = blkoff(dp->i_fs, dp->i_offset);
9135	pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC,
9136	    &pagedep);
9137	dirrem->dm_pagedep = pagedep;
9138	dirrem->dm_offset = offset;
9139	/*
9140	 * If we're renaming a .. link to a new directory, cancel any
9141	 * existing MKDIR_PARENT mkdir.  If it has already been canceled
9142	 * the jremref is preserved for any potential diradd in this
9143	 * location.  This can not coincide with a rmdir.
9144	 */
9145	if (dp->i_offset == DOTDOT_OFFSET) {
9146		if (isrmdir)
9147			panic("newdirrem: .. directory change during remove?");
9148		jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
9149	}
9150	/*
9151	 * If we're removing a directory search for the .. dependency now and
9152	 * cancel it.  Any pending journal work will be added to the dirrem
9153	 * to be completed when the workitem remove completes.
9154	 */
9155	if (isrmdir)
9156		dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
9157	/*
9158	 * Check for a diradd dependency for the same directory entry.
9159	 * If present, then both dependencies become obsolete and can
9160	 * be de-allocated.
9161	 */
9162	dap = diradd_lookup(pagedep, offset);
9163	if (dap == NULL) {
9164		/*
9165		 * Link the jremref structures into the dirrem so they are
9166		 * written prior to the pagedep.
9167		 */
9168		if (jremref)
9169			dirrem_journal(dirrem, jremref, dotremref,
9170			    dotdotremref);
9171		return (dirrem);
9172	}
9173	/*
9174	 * Must be ATTACHED at this point.
9175	 */
9176	if ((dap->da_state & ATTACHED) == 0)
9177		panic("newdirrem: not ATTACHED");
9178	if (dap->da_newinum != ip->i_number)
9179		panic("newdirrem: inum %ju should be %ju",
9180		    (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum);
9181	/*
9182	 * If we are deleting a changed name that never made it to disk,
9183	 * then return the dirrem describing the previous inode (which
9184	 * represents the inode currently referenced from this entry on disk).
9185	 */
9186	if ((dap->da_state & DIRCHG) != 0) {
9187		*prevdirremp = dap->da_previous;
9188		dap->da_state &= ~DIRCHG;
9189		dap->da_pagedep = pagedep;
9190	}
9191	/*
9192	 * We are deleting an entry that never made it to disk.
9193	 * Mark it COMPLETE so we can delete its inode immediately.
9194	 */
9195	dirrem->dm_state |= COMPLETE;
9196	cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
9197#ifdef SUJ_DEBUG
9198	if (isrmdir == 0) {
9199		struct worklist *wk;
9200
9201		LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9202			if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
9203				panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
9204	}
9205#endif
9206
9207	return (dirrem);
9208}
9209
9210/*
9211 * Directory entry change dependencies.
9212 *
9213 * Changing an existing directory entry requires that an add operation
9214 * be completed first followed by a deletion. The semantics for the addition
9215 * are identical to the description of adding a new entry above except
9216 * that the rollback is to the old inode number rather than zero. Once
9217 * the addition dependency is completed, the removal is done as described
9218 * in the removal routine above.
9219 */
9220
9221/*
9222 * This routine should be called immediately after changing
9223 * a directory entry.  The inode's link count should not be
9224 * decremented by the calling procedure -- the soft updates
9225 * code will perform this task when it is safe.
9226 */
9227void
9228softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
9229	struct buf *bp;		/* buffer containing directory block */
9230	struct inode *dp;	/* inode for the directory being modified */
9231	struct inode *ip;	/* inode for directory entry being removed */
9232	ino_t newinum;		/* new inode number for changed entry */
9233	int isrmdir;		/* indicates if doing RMDIR */
9234{
9235	int offset;
9236	struct diradd *dap = NULL;
9237	struct dirrem *dirrem, *prevdirrem;
9238	struct pagedep *pagedep;
9239	struct inodedep *inodedep;
9240	struct jaddref *jaddref;
9241	struct mount *mp;
9242
9243	offset = blkoff(dp->i_fs, dp->i_offset);
9244	mp = UFSTOVFS(dp->i_ump);
9245	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
9246	   ("softdep_setup_directory_change called on non-softdep filesystem"));
9247
9248	/*
9249	 * Whiteouts do not need diradd dependencies.
9250	 */
9251	if (newinum != WINO) {
9252		dap = malloc(sizeof(struct diradd),
9253		    M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9254		workitem_alloc(&dap->da_list, D_DIRADD, mp);
9255		dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9256		dap->da_offset = offset;
9257		dap->da_newinum = newinum;
9258		LIST_INIT(&dap->da_jwork);
9259	}
9260
9261	/*
9262	 * Allocate a new dirrem and ACQUIRE_LOCK.
9263	 */
9264	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9265	pagedep = dirrem->dm_pagedep;
9266	/*
9267	 * The possible values for isrmdir:
9268	 *	0 - non-directory file rename
9269	 *	1 - directory rename within same directory
9270	 *   inum - directory rename to new directory of given inode number
9271	 * When renaming to a new directory, we are both deleting and
9272	 * creating a new directory entry, so the link count on the new
9273	 * directory should not change. Thus we do not need the followup
9274	 * dirrem which is usually done in handle_workitem_remove. We set
9275	 * the DIRCHG flag to tell handle_workitem_remove to skip the
9276	 * followup dirrem.
9277	 */
9278	if (isrmdir > 1)
9279		dirrem->dm_state |= DIRCHG;
9280
9281	/*
9282	 * Whiteouts have no additional dependencies,
9283	 * so just put the dirrem on the correct list.
9284	 */
9285	if (newinum == WINO) {
9286		if ((dirrem->dm_state & COMPLETE) == 0) {
9287			LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9288			    dm_next);
9289		} else {
9290			dirrem->dm_dirinum = pagedep->pd_ino;
9291			if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9292				add_to_worklist(&dirrem->dm_list, 0);
9293		}
9294		FREE_LOCK(dp->i_ump);
9295		return;
9296	}
9297	/*
9298	 * Add the dirrem to the inodedep's pending remove list for quick
9299	 * discovery later.  A valid nlinkdelta ensures that this lookup
9300	 * will not fail.
9301	 */
9302	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9303		panic("softdep_setup_directory_change: Lost inodedep.");
9304	dirrem->dm_state |= ONDEPLIST;
9305	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9306
9307	/*
9308	 * If the COMPLETE flag is clear, then there were no active
9309	 * entries and we want to roll back to the previous inode until
9310	 * the new inode is committed to disk. If the COMPLETE flag is
9311	 * set, then we have deleted an entry that never made it to disk.
9312	 * If the entry we deleted resulted from a name change, then the old
9313	 * inode reference still resides on disk. Any rollback that we do
9314	 * needs to be to that old inode (returned to us in prevdirrem). If
9315	 * the entry we deleted resulted from a create, then there is
9316	 * no entry on the disk, so we want to roll back to zero rather
9317	 * than the uncommitted inode. In either of the COMPLETE cases we
9318	 * want to immediately free the unwritten and unreferenced inode.
9319	 */
9320	if ((dirrem->dm_state & COMPLETE) == 0) {
9321		dap->da_previous = dirrem;
9322	} else {
9323		if (prevdirrem != NULL) {
9324			dap->da_previous = prevdirrem;
9325		} else {
9326			dap->da_state &= ~DIRCHG;
9327			dap->da_pagedep = pagedep;
9328		}
9329		dirrem->dm_dirinum = pagedep->pd_ino;
9330		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9331			add_to_worklist(&dirrem->dm_list, 0);
9332	}
9333	/*
9334	 * Lookup the jaddref for this journal entry.  We must finish
9335	 * initializing it and make the diradd write dependent on it.
9336	 * If we're not journaling, put it on the id_bufwait list if the
9337	 * inode is not yet written. If it is written, do the post-inode
9338	 * write processing to put it on the id_pendinghd list.
9339	 */
9340	inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
9341	if (MOUNTEDSUJ(mp)) {
9342		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9343		    inoreflst);
9344		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9345		    ("softdep_setup_directory_change: bad jaddref %p",
9346		    jaddref));
9347		jaddref->ja_diroff = dp->i_offset;
9348		jaddref->ja_diradd = dap;
9349		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9350		    dap, da_pdlist);
9351		add_to_journal(&jaddref->ja_list);
9352	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9353		dap->da_state |= COMPLETE;
9354		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9355		WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9356	} else {
9357		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9358		    dap, da_pdlist);
9359		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9360	}
9361	/*
9362	 * If we're making a new name for a directory that has not been
9363	 * committed when need to move the dot and dotdot references to
9364	 * this new name.
9365	 */
9366	if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET)
9367		merge_diradd(inodedep, dap);
9368	FREE_LOCK(dp->i_ump);
9369}
9370
9371/*
9372 * Called whenever the link count on an inode is changed.
9373 * It creates an inode dependency so that the new reference(s)
9374 * to the inode cannot be committed to disk until the updated
9375 * inode has been written.
9376 */
9377void
9378softdep_change_linkcnt(ip)
9379	struct inode *ip;	/* the inode with the increased link count */
9380{
9381	struct inodedep *inodedep;
9382	int dflags;
9383
9384	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
9385	    ("softdep_change_linkcnt called on non-softdep filesystem"));
9386	ACQUIRE_LOCK(ip->i_ump);
9387	dflags = DEPALLOC;
9388	if (IS_SNAPSHOT(ip))
9389		dflags |= NODELAY;
9390	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
9391	if (ip->i_nlink < ip->i_effnlink)
9392		panic("softdep_change_linkcnt: bad delta");
9393	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9394	FREE_LOCK(ip->i_ump);
9395}
9396
9397/*
9398 * Attach a sbdep dependency to the superblock buf so that we can keep
9399 * track of the head of the linked list of referenced but unlinked inodes.
9400 */
9401void
9402softdep_setup_sbupdate(ump, fs, bp)
9403	struct ufsmount *ump;
9404	struct fs *fs;
9405	struct buf *bp;
9406{
9407	struct sbdep *sbdep;
9408	struct worklist *wk;
9409
9410	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9411	    ("softdep_setup_sbupdate called on non-softdep filesystem"));
9412	LIST_FOREACH(wk, &bp->b_dep, wk_list)
9413		if (wk->wk_type == D_SBDEP)
9414			break;
9415	if (wk != NULL)
9416		return;
9417	sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9418	workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9419	sbdep->sb_fs = fs;
9420	sbdep->sb_ump = ump;
9421	ACQUIRE_LOCK(ump);
9422	WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9423	FREE_LOCK(ump);
9424}
9425
9426/*
9427 * Return the first unlinked inodedep which is ready to be the head of the
9428 * list.  The inodedep and all those after it must have valid next pointers.
9429 */
9430static struct inodedep *
9431first_unlinked_inodedep(ump)
9432	struct ufsmount *ump;
9433{
9434	struct inodedep *inodedep;
9435	struct inodedep *idp;
9436
9437	LOCK_OWNED(ump);
9438	for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9439	    inodedep; inodedep = idp) {
9440		if ((inodedep->id_state & UNLINKNEXT) == 0)
9441			return (NULL);
9442		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9443		if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9444			break;
9445		if ((inodedep->id_state & UNLINKPREV) == 0)
9446			break;
9447	}
9448	return (inodedep);
9449}
9450
9451/*
9452 * Set the sujfree unlinked head pointer prior to writing a superblock.
9453 */
9454static void
9455initiate_write_sbdep(sbdep)
9456	struct sbdep *sbdep;
9457{
9458	struct inodedep *inodedep;
9459	struct fs *bpfs;
9460	struct fs *fs;
9461
9462	bpfs = sbdep->sb_fs;
9463	fs = sbdep->sb_ump->um_fs;
9464	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9465	if (inodedep) {
9466		fs->fs_sujfree = inodedep->id_ino;
9467		inodedep->id_state |= UNLINKPREV;
9468	} else
9469		fs->fs_sujfree = 0;
9470	bpfs->fs_sujfree = fs->fs_sujfree;
9471}
9472
9473/*
9474 * After a superblock is written determine whether it must be written again
9475 * due to a changing unlinked list head.
9476 */
9477static int
9478handle_written_sbdep(sbdep, bp)
9479	struct sbdep *sbdep;
9480	struct buf *bp;
9481{
9482	struct inodedep *inodedep;
9483	struct mount *mp;
9484	struct fs *fs;
9485
9486	LOCK_OWNED(sbdep->sb_ump);
9487	fs = sbdep->sb_fs;
9488	mp = UFSTOVFS(sbdep->sb_ump);
9489	/*
9490	 * If the superblock doesn't match the in-memory list start over.
9491	 */
9492	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9493	if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9494	    (inodedep == NULL && fs->fs_sujfree != 0)) {
9495		bdirty(bp);
9496		return (1);
9497	}
9498	WORKITEM_FREE(sbdep, D_SBDEP);
9499	if (fs->fs_sujfree == 0)
9500		return (0);
9501	/*
9502	 * Now that we have a record of this inode in stable store allow it
9503	 * to be written to free up pending work.  Inodes may see a lot of
9504	 * write activity after they are unlinked which we must not hold up.
9505	 */
9506	for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9507		if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9508			panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9509			    inodedep, inodedep->id_state);
9510		if (inodedep->id_state & UNLINKONLIST)
9511			break;
9512		inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9513	}
9514
9515	return (0);
9516}
9517
9518/*
9519 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9520 */
9521static void
9522unlinked_inodedep(mp, inodedep)
9523	struct mount *mp;
9524	struct inodedep *inodedep;
9525{
9526	struct ufsmount *ump;
9527
9528	ump = VFSTOUFS(mp);
9529	LOCK_OWNED(ump);
9530	if (MOUNTEDSUJ(mp) == 0)
9531		return;
9532	ump->um_fs->fs_fmod = 1;
9533	if (inodedep->id_state & UNLINKED)
9534		panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9535	inodedep->id_state |= UNLINKED;
9536	TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9537}
9538
9539/*
9540 * Remove an inodedep from the unlinked inodedep list.  This may require
9541 * disk writes if the inode has made it that far.
9542 */
9543static void
9544clear_unlinked_inodedep(inodedep)
9545	struct inodedep *inodedep;
9546{
9547	struct ufsmount *ump;
9548	struct inodedep *idp;
9549	struct inodedep *idn;
9550	struct fs *fs;
9551	struct buf *bp;
9552	ino_t ino;
9553	ino_t nino;
9554	ino_t pino;
9555	int error;
9556
9557	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9558	fs = ump->um_fs;
9559	ino = inodedep->id_ino;
9560	error = 0;
9561	for (;;) {
9562		LOCK_OWNED(ump);
9563		KASSERT((inodedep->id_state & UNLINKED) != 0,
9564		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9565		    inodedep));
9566		/*
9567		 * If nothing has yet been written simply remove us from
9568		 * the in memory list and return.  This is the most common
9569		 * case where handle_workitem_remove() loses the final
9570		 * reference.
9571		 */
9572		if ((inodedep->id_state & UNLINKLINKS) == 0)
9573			break;
9574		/*
9575		 * If we have a NEXT pointer and no PREV pointer we can simply
9576		 * clear NEXT's PREV and remove ourselves from the list.  Be
9577		 * careful not to clear PREV if the superblock points at
9578		 * next as well.
9579		 */
9580		idn = TAILQ_NEXT(inodedep, id_unlinked);
9581		if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9582			if (idn && fs->fs_sujfree != idn->id_ino)
9583				idn->id_state &= ~UNLINKPREV;
9584			break;
9585		}
9586		/*
9587		 * Here we have an inodedep which is actually linked into
9588		 * the list.  We must remove it by forcing a write to the
9589		 * link before us, whether it be the superblock or an inode.
9590		 * Unfortunately the list may change while we're waiting
9591		 * on the buf lock for either resource so we must loop until
9592		 * we lock the right one.  If both the superblock and an
9593		 * inode point to this inode we must clear the inode first
9594		 * followed by the superblock.
9595		 */
9596		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9597		pino = 0;
9598		if (idp && (idp->id_state & UNLINKNEXT))
9599			pino = idp->id_ino;
9600		FREE_LOCK(ump);
9601		if (pino == 0) {
9602			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9603			    (int)fs->fs_sbsize, 0, 0, 0);
9604		} else {
9605			error = bread(ump->um_devvp,
9606			    fsbtodb(fs, ino_to_fsba(fs, pino)),
9607			    (int)fs->fs_bsize, NOCRED, &bp);
9608			if (error)
9609				brelse(bp);
9610		}
9611		ACQUIRE_LOCK(ump);
9612		if (error)
9613			break;
9614		/* If the list has changed restart the loop. */
9615		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9616		nino = 0;
9617		if (idp && (idp->id_state & UNLINKNEXT))
9618			nino = idp->id_ino;
9619		if (nino != pino ||
9620		    (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9621			FREE_LOCK(ump);
9622			brelse(bp);
9623			ACQUIRE_LOCK(ump);
9624			continue;
9625		}
9626		nino = 0;
9627		idn = TAILQ_NEXT(inodedep, id_unlinked);
9628		if (idn)
9629			nino = idn->id_ino;
9630		/*
9631		 * Remove us from the in memory list.  After this we cannot
9632		 * access the inodedep.
9633		 */
9634		KASSERT((inodedep->id_state & UNLINKED) != 0,
9635		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9636		    inodedep));
9637		inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9638		TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9639		FREE_LOCK(ump);
9640		/*
9641		 * The predecessor's next pointer is manually updated here
9642		 * so that the NEXT flag is never cleared for an element
9643		 * that is in the list.
9644		 */
9645		if (pino == 0) {
9646			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9647			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9648			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9649			    bp);
9650		} else if (fs->fs_magic == FS_UFS1_MAGIC)
9651			((struct ufs1_dinode *)bp->b_data +
9652			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9653		else
9654			((struct ufs2_dinode *)bp->b_data +
9655			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9656		/*
9657		 * If the bwrite fails we have no recourse to recover.  The
9658		 * filesystem is corrupted already.
9659		 */
9660		bwrite(bp);
9661		ACQUIRE_LOCK(ump);
9662		/*
9663		 * If the superblock pointer still needs to be cleared force
9664		 * a write here.
9665		 */
9666		if (fs->fs_sujfree == ino) {
9667			FREE_LOCK(ump);
9668			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9669			    (int)fs->fs_sbsize, 0, 0, 0);
9670			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9671			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9672			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9673			    bp);
9674			bwrite(bp);
9675			ACQUIRE_LOCK(ump);
9676		}
9677
9678		if (fs->fs_sujfree != ino)
9679			return;
9680		panic("clear_unlinked_inodedep: Failed to clear free head");
9681	}
9682	if (inodedep->id_ino == fs->fs_sujfree)
9683		panic("clear_unlinked_inodedep: Freeing head of free list");
9684	inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9685	TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9686	return;
9687}
9688
9689/*
9690 * This workitem decrements the inode's link count.
9691 * If the link count reaches zero, the file is removed.
9692 */
9693static int
9694handle_workitem_remove(dirrem, flags)
9695	struct dirrem *dirrem;
9696	int flags;
9697{
9698	struct inodedep *inodedep;
9699	struct workhead dotdotwk;
9700	struct worklist *wk;
9701	struct ufsmount *ump;
9702	struct mount *mp;
9703	struct vnode *vp;
9704	struct inode *ip;
9705	ino_t oldinum;
9706
9707	if (dirrem->dm_state & ONWORKLIST)
9708		panic("handle_workitem_remove: dirrem %p still on worklist",
9709		    dirrem);
9710	oldinum = dirrem->dm_oldinum;
9711	mp = dirrem->dm_list.wk_mp;
9712	ump = VFSTOUFS(mp);
9713	flags |= LK_EXCLUSIVE;
9714	if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0)
9715		return (EBUSY);
9716	ip = VTOI(vp);
9717	ACQUIRE_LOCK(ump);
9718	if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
9719		panic("handle_workitem_remove: lost inodedep");
9720	if (dirrem->dm_state & ONDEPLIST)
9721		LIST_REMOVE(dirrem, dm_inonext);
9722	KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
9723	    ("handle_workitem_remove:  Journal entries not written."));
9724
9725	/*
9726	 * Move all dependencies waiting on the remove to complete
9727	 * from the dirrem to the inode inowait list to be completed
9728	 * after the inode has been updated and written to disk.  Any
9729	 * marked MKDIR_PARENT are saved to be completed when the .. ref
9730	 * is removed.
9731	 */
9732	LIST_INIT(&dotdotwk);
9733	while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
9734		WORKLIST_REMOVE(wk);
9735		if (wk->wk_state & MKDIR_PARENT) {
9736			wk->wk_state &= ~MKDIR_PARENT;
9737			WORKLIST_INSERT(&dotdotwk, wk);
9738			continue;
9739		}
9740		WORKLIST_INSERT(&inodedep->id_inowait, wk);
9741	}
9742	LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
9743	/*
9744	 * Normal file deletion.
9745	 */
9746	if ((dirrem->dm_state & RMDIR) == 0) {
9747		ip->i_nlink--;
9748		DIP_SET(ip, i_nlink, ip->i_nlink);
9749		ip->i_flag |= IN_CHANGE;
9750		if (ip->i_nlink < ip->i_effnlink)
9751			panic("handle_workitem_remove: bad file delta");
9752		if (ip->i_nlink == 0)
9753			unlinked_inodedep(mp, inodedep);
9754		inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9755		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9756		    ("handle_workitem_remove: worklist not empty. %s",
9757		    TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
9758		WORKITEM_FREE(dirrem, D_DIRREM);
9759		FREE_LOCK(ump);
9760		goto out;
9761	}
9762	/*
9763	 * Directory deletion. Decrement reference count for both the
9764	 * just deleted parent directory entry and the reference for ".".
9765	 * Arrange to have the reference count on the parent decremented
9766	 * to account for the loss of "..".
9767	 */
9768	ip->i_nlink -= 2;
9769	DIP_SET(ip, i_nlink, ip->i_nlink);
9770	ip->i_flag |= IN_CHANGE;
9771	if (ip->i_nlink < ip->i_effnlink)
9772		panic("handle_workitem_remove: bad dir delta");
9773	if (ip->i_nlink == 0)
9774		unlinked_inodedep(mp, inodedep);
9775	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9776	/*
9777	 * Rename a directory to a new parent. Since, we are both deleting
9778	 * and creating a new directory entry, the link count on the new
9779	 * directory should not change. Thus we skip the followup dirrem.
9780	 */
9781	if (dirrem->dm_state & DIRCHG) {
9782		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9783		    ("handle_workitem_remove: DIRCHG and worklist not empty."));
9784		WORKITEM_FREE(dirrem, D_DIRREM);
9785		FREE_LOCK(ump);
9786		goto out;
9787	}
9788	dirrem->dm_state = ONDEPLIST;
9789	dirrem->dm_oldinum = dirrem->dm_dirinum;
9790	/*
9791	 * Place the dirrem on the parent's diremhd list.
9792	 */
9793	if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
9794		panic("handle_workitem_remove: lost dir inodedep");
9795	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9796	/*
9797	 * If the allocated inode has never been written to disk, then
9798	 * the on-disk inode is zero'ed and we can remove the file
9799	 * immediately.  When journaling if the inode has been marked
9800	 * unlinked and not DEPCOMPLETE we know it can never be written.
9801	 */
9802	inodedep_lookup(mp, oldinum, 0, &inodedep);
9803	if (inodedep == NULL ||
9804	    (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
9805	    check_inode_unwritten(inodedep)) {
9806		FREE_LOCK(ump);
9807		vput(vp);
9808		return handle_workitem_remove(dirrem, flags);
9809	}
9810	WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
9811	FREE_LOCK(ump);
9812	ip->i_flag |= IN_CHANGE;
9813out:
9814	ffs_update(vp, 0);
9815	vput(vp);
9816	return (0);
9817}
9818
9819/*
9820 * Inode de-allocation dependencies.
9821 *
9822 * When an inode's link count is reduced to zero, it can be de-allocated. We
9823 * found it convenient to postpone de-allocation until after the inode is
9824 * written to disk with its new link count (zero).  At this point, all of the
9825 * on-disk inode's block pointers are nullified and, with careful dependency
9826 * list ordering, all dependencies related to the inode will be satisfied and
9827 * the corresponding dependency structures de-allocated.  So, if/when the
9828 * inode is reused, there will be no mixing of old dependencies with new
9829 * ones.  This artificial dependency is set up by the block de-allocation
9830 * procedure above (softdep_setup_freeblocks) and completed by the
9831 * following procedure.
9832 */
9833static void
9834handle_workitem_freefile(freefile)
9835	struct freefile *freefile;
9836{
9837	struct workhead wkhd;
9838	struct fs *fs;
9839	struct inodedep *idp;
9840	struct ufsmount *ump;
9841	int error;
9842
9843	ump = VFSTOUFS(freefile->fx_list.wk_mp);
9844	fs = ump->um_fs;
9845#ifdef DEBUG
9846	ACQUIRE_LOCK(ump);
9847	error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
9848	FREE_LOCK(ump);
9849	if (error)
9850		panic("handle_workitem_freefile: inodedep %p survived", idp);
9851#endif
9852	UFS_LOCK(ump);
9853	fs->fs_pendinginodes -= 1;
9854	UFS_UNLOCK(ump);
9855	LIST_INIT(&wkhd);
9856	LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
9857	if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
9858	    freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
9859		softdep_error("handle_workitem_freefile", error);
9860	ACQUIRE_LOCK(ump);
9861	WORKITEM_FREE(freefile, D_FREEFILE);
9862	FREE_LOCK(ump);
9863}
9864
9865
9866/*
9867 * Helper function which unlinks marker element from work list and returns
9868 * the next element on the list.
9869 */
9870static __inline struct worklist *
9871markernext(struct worklist *marker)
9872{
9873	struct worklist *next;
9874
9875	next = LIST_NEXT(marker, wk_list);
9876	LIST_REMOVE(marker, wk_list);
9877	return next;
9878}
9879
9880/*
9881 * Disk writes.
9882 *
9883 * The dependency structures constructed above are most actively used when file
9884 * system blocks are written to disk.  No constraints are placed on when a
9885 * block can be written, but unsatisfied update dependencies are made safe by
9886 * modifying (or replacing) the source memory for the duration of the disk
9887 * write.  When the disk write completes, the memory block is again brought
9888 * up-to-date.
9889 *
9890 * In-core inode structure reclamation.
9891 *
9892 * Because there are a finite number of "in-core" inode structures, they are
9893 * reused regularly.  By transferring all inode-related dependencies to the
9894 * in-memory inode block and indexing them separately (via "inodedep"s), we
9895 * can allow "in-core" inode structures to be reused at any time and avoid
9896 * any increase in contention.
9897 *
9898 * Called just before entering the device driver to initiate a new disk I/O.
9899 * The buffer must be locked, thus, no I/O completion operations can occur
9900 * while we are manipulating its associated dependencies.
9901 */
9902static void
9903softdep_disk_io_initiation(bp)
9904	struct buf *bp;		/* structure describing disk write to occur */
9905{
9906	struct worklist *wk;
9907	struct worklist marker;
9908	struct inodedep *inodedep;
9909	struct freeblks *freeblks;
9910	struct jblkdep *jblkdep;
9911	struct newblk *newblk;
9912	struct ufsmount *ump;
9913
9914	/*
9915	 * We only care about write operations. There should never
9916	 * be dependencies for reads.
9917	 */
9918	if (bp->b_iocmd != BIO_WRITE)
9919		panic("softdep_disk_io_initiation: not write");
9920
9921	if (bp->b_vflags & BV_BKGRDINPROG)
9922		panic("softdep_disk_io_initiation: Writing buffer with "
9923		    "background write in progress: %p", bp);
9924
9925	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
9926		return;
9927	ump = VFSTOUFS(wk->wk_mp);
9928
9929	marker.wk_type = D_LAST + 1;	/* Not a normal workitem */
9930	PHOLD(curproc);			/* Don't swap out kernel stack */
9931	ACQUIRE_LOCK(ump);
9932	/*
9933	 * Do any necessary pre-I/O processing.
9934	 */
9935	for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
9936	     wk = markernext(&marker)) {
9937		LIST_INSERT_AFTER(wk, &marker, wk_list);
9938		switch (wk->wk_type) {
9939
9940		case D_PAGEDEP:
9941			initiate_write_filepage(WK_PAGEDEP(wk), bp);
9942			continue;
9943
9944		case D_INODEDEP:
9945			inodedep = WK_INODEDEP(wk);
9946			if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
9947				initiate_write_inodeblock_ufs1(inodedep, bp);
9948			else
9949				initiate_write_inodeblock_ufs2(inodedep, bp);
9950			continue;
9951
9952		case D_INDIRDEP:
9953			initiate_write_indirdep(WK_INDIRDEP(wk), bp);
9954			continue;
9955
9956		case D_BMSAFEMAP:
9957			initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
9958			continue;
9959
9960		case D_JSEG:
9961			WK_JSEG(wk)->js_buf = NULL;
9962			continue;
9963
9964		case D_FREEBLKS:
9965			freeblks = WK_FREEBLKS(wk);
9966			jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
9967			/*
9968			 * We have to wait for the freeblks to be journaled
9969			 * before we can write an inodeblock with updated
9970			 * pointers.  Be careful to arrange the marker so
9971			 * we revisit the freeblks if it's not removed by
9972			 * the first jwait().
9973			 */
9974			if (jblkdep != NULL) {
9975				LIST_REMOVE(&marker, wk_list);
9976				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9977				jwait(&jblkdep->jb_list, MNT_WAIT);
9978			}
9979			continue;
9980		case D_ALLOCDIRECT:
9981		case D_ALLOCINDIR:
9982			/*
9983			 * We have to wait for the jnewblk to be journaled
9984			 * before we can write to a block if the contents
9985			 * may be confused with an earlier file's indirect
9986			 * at recovery time.  Handle the marker as described
9987			 * above.
9988			 */
9989			newblk = WK_NEWBLK(wk);
9990			if (newblk->nb_jnewblk != NULL &&
9991			    indirblk_lookup(newblk->nb_list.wk_mp,
9992			    newblk->nb_newblkno)) {
9993				LIST_REMOVE(&marker, wk_list);
9994				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9995				jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
9996			}
9997			continue;
9998
9999		case D_SBDEP:
10000			initiate_write_sbdep(WK_SBDEP(wk));
10001			continue;
10002
10003		case D_MKDIR:
10004		case D_FREEWORK:
10005		case D_FREEDEP:
10006		case D_JSEGDEP:
10007			continue;
10008
10009		default:
10010			panic("handle_disk_io_initiation: Unexpected type %s",
10011			    TYPENAME(wk->wk_type));
10012			/* NOTREACHED */
10013		}
10014	}
10015	FREE_LOCK(ump);
10016	PRELE(curproc);			/* Allow swapout of kernel stack */
10017}
10018
10019/*
10020 * Called from within the procedure above to deal with unsatisfied
10021 * allocation dependencies in a directory. The buffer must be locked,
10022 * thus, no I/O completion operations can occur while we are
10023 * manipulating its associated dependencies.
10024 */
10025static void
10026initiate_write_filepage(pagedep, bp)
10027	struct pagedep *pagedep;
10028	struct buf *bp;
10029{
10030	struct jremref *jremref;
10031	struct jmvref *jmvref;
10032	struct dirrem *dirrem;
10033	struct diradd *dap;
10034	struct direct *ep;
10035	int i;
10036
10037	if (pagedep->pd_state & IOSTARTED) {
10038		/*
10039		 * This can only happen if there is a driver that does not
10040		 * understand chaining. Here biodone will reissue the call
10041		 * to strategy for the incomplete buffers.
10042		 */
10043		printf("initiate_write_filepage: already started\n");
10044		return;
10045	}
10046	pagedep->pd_state |= IOSTARTED;
10047	/*
10048	 * Wait for all journal remove dependencies to hit the disk.
10049	 * We can not allow any potentially conflicting directory adds
10050	 * to be visible before removes and rollback is too difficult.
10051	 * The per-filesystem lock may be dropped and re-acquired, however
10052	 * we hold the buf locked so the dependency can not go away.
10053	 */
10054	LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
10055		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
10056			jwait(&jremref->jr_list, MNT_WAIT);
10057	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
10058		jwait(&jmvref->jm_list, MNT_WAIT);
10059	for (i = 0; i < DAHASHSZ; i++) {
10060		LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
10061			ep = (struct direct *)
10062			    ((char *)bp->b_data + dap->da_offset);
10063			if (ep->d_ino != dap->da_newinum)
10064				panic("%s: dir inum %ju != new %ju",
10065				    "initiate_write_filepage",
10066				    (uintmax_t)ep->d_ino,
10067				    (uintmax_t)dap->da_newinum);
10068			if (dap->da_state & DIRCHG)
10069				ep->d_ino = dap->da_previous->dm_oldinum;
10070			else
10071				ep->d_ino = 0;
10072			dap->da_state &= ~ATTACHED;
10073			dap->da_state |= UNDONE;
10074		}
10075	}
10076}
10077
10078/*
10079 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
10080 * Note that any bug fixes made to this routine must be done in the
10081 * version found below.
10082 *
10083 * Called from within the procedure above to deal with unsatisfied
10084 * allocation dependencies in an inodeblock. The buffer must be
10085 * locked, thus, no I/O completion operations can occur while we
10086 * are manipulating its associated dependencies.
10087 */
10088static void
10089initiate_write_inodeblock_ufs1(inodedep, bp)
10090	struct inodedep *inodedep;
10091	struct buf *bp;			/* The inode block */
10092{
10093	struct allocdirect *adp, *lastadp;
10094	struct ufs1_dinode *dp;
10095	struct ufs1_dinode *sip;
10096	struct inoref *inoref;
10097	struct ufsmount *ump;
10098	struct fs *fs;
10099	ufs_lbn_t i;
10100#ifdef INVARIANTS
10101	ufs_lbn_t prevlbn = 0;
10102#endif
10103	int deplist;
10104
10105	if (inodedep->id_state & IOSTARTED)
10106		panic("initiate_write_inodeblock_ufs1: already started");
10107	inodedep->id_state |= IOSTARTED;
10108	fs = inodedep->id_fs;
10109	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10110	LOCK_OWNED(ump);
10111	dp = (struct ufs1_dinode *)bp->b_data +
10112	    ino_to_fsbo(fs, inodedep->id_ino);
10113
10114	/*
10115	 * If we're on the unlinked list but have not yet written our
10116	 * next pointer initialize it here.
10117	 */
10118	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10119		struct inodedep *inon;
10120
10121		inon = TAILQ_NEXT(inodedep, id_unlinked);
10122		dp->di_freelink = inon ? inon->id_ino : 0;
10123	}
10124	/*
10125	 * If the bitmap is not yet written, then the allocated
10126	 * inode cannot be written to disk.
10127	 */
10128	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10129		if (inodedep->id_savedino1 != NULL)
10130			panic("initiate_write_inodeblock_ufs1: I/O underway");
10131		FREE_LOCK(ump);
10132		sip = malloc(sizeof(struct ufs1_dinode),
10133		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10134		ACQUIRE_LOCK(ump);
10135		inodedep->id_savedino1 = sip;
10136		*inodedep->id_savedino1 = *dp;
10137		bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
10138		dp->di_gen = inodedep->id_savedino1->di_gen;
10139		dp->di_freelink = inodedep->id_savedino1->di_freelink;
10140		return;
10141	}
10142	/*
10143	 * If no dependencies, then there is nothing to roll back.
10144	 */
10145	inodedep->id_savedsize = dp->di_size;
10146	inodedep->id_savedextsize = 0;
10147	inodedep->id_savednlink = dp->di_nlink;
10148	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10149	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10150		return;
10151	/*
10152	 * Revert the link count to that of the first unwritten journal entry.
10153	 */
10154	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10155	if (inoref)
10156		dp->di_nlink = inoref->if_nlink;
10157	/*
10158	 * Set the dependencies to busy.
10159	 */
10160	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10161	     adp = TAILQ_NEXT(adp, ad_next)) {
10162#ifdef INVARIANTS
10163		if (deplist != 0 && prevlbn >= adp->ad_offset)
10164			panic("softdep_write_inodeblock: lbn order");
10165		prevlbn = adp->ad_offset;
10166		if (adp->ad_offset < NDADDR &&
10167		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10168			panic("%s: direct pointer #%jd mismatch %d != %jd",
10169			    "softdep_write_inodeblock",
10170			    (intmax_t)adp->ad_offset,
10171			    dp->di_db[adp->ad_offset],
10172			    (intmax_t)adp->ad_newblkno);
10173		if (adp->ad_offset >= NDADDR &&
10174		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10175			panic("%s: indirect pointer #%jd mismatch %d != %jd",
10176			    "softdep_write_inodeblock",
10177			    (intmax_t)adp->ad_offset - NDADDR,
10178			    dp->di_ib[adp->ad_offset - NDADDR],
10179			    (intmax_t)adp->ad_newblkno);
10180		deplist |= 1 << adp->ad_offset;
10181		if ((adp->ad_state & ATTACHED) == 0)
10182			panic("softdep_write_inodeblock: Unknown state 0x%x",
10183			    adp->ad_state);
10184#endif /* INVARIANTS */
10185		adp->ad_state &= ~ATTACHED;
10186		adp->ad_state |= UNDONE;
10187	}
10188	/*
10189	 * The on-disk inode cannot claim to be any larger than the last
10190	 * fragment that has been written. Otherwise, the on-disk inode
10191	 * might have fragments that were not the last block in the file
10192	 * which would corrupt the filesystem.
10193	 */
10194	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10195	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10196		if (adp->ad_offset >= NDADDR)
10197			break;
10198		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10199		/* keep going until hitting a rollback to a frag */
10200		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10201			continue;
10202		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10203		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10204#ifdef INVARIANTS
10205			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10206				panic("softdep_write_inodeblock: lost dep1");
10207#endif /* INVARIANTS */
10208			dp->di_db[i] = 0;
10209		}
10210		for (i = 0; i < NIADDR; i++) {
10211#ifdef INVARIANTS
10212			if (dp->di_ib[i] != 0 &&
10213			    (deplist & ((1 << NDADDR) << i)) == 0)
10214				panic("softdep_write_inodeblock: lost dep2");
10215#endif /* INVARIANTS */
10216			dp->di_ib[i] = 0;
10217		}
10218		return;
10219	}
10220	/*
10221	 * If we have zero'ed out the last allocated block of the file,
10222	 * roll back the size to the last currently allocated block.
10223	 * We know that this last allocated block is a full-sized as
10224	 * we already checked for fragments in the loop above.
10225	 */
10226	if (lastadp != NULL &&
10227	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10228		for (i = lastadp->ad_offset; i >= 0; i--)
10229			if (dp->di_db[i] != 0)
10230				break;
10231		dp->di_size = (i + 1) * fs->fs_bsize;
10232	}
10233	/*
10234	 * The only dependencies are for indirect blocks.
10235	 *
10236	 * The file size for indirect block additions is not guaranteed.
10237	 * Such a guarantee would be non-trivial to achieve. The conventional
10238	 * synchronous write implementation also does not make this guarantee.
10239	 * Fsck should catch and fix discrepancies. Arguably, the file size
10240	 * can be over-estimated without destroying integrity when the file
10241	 * moves into the indirect blocks (i.e., is large). If we want to
10242	 * postpone fsck, we are stuck with this argument.
10243	 */
10244	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10245		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10246}
10247
10248/*
10249 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10250 * Note that any bug fixes made to this routine must be done in the
10251 * version found above.
10252 *
10253 * Called from within the procedure above to deal with unsatisfied
10254 * allocation dependencies in an inodeblock. The buffer must be
10255 * locked, thus, no I/O completion operations can occur while we
10256 * are manipulating its associated dependencies.
10257 */
10258static void
10259initiate_write_inodeblock_ufs2(inodedep, bp)
10260	struct inodedep *inodedep;
10261	struct buf *bp;			/* The inode block */
10262{
10263	struct allocdirect *adp, *lastadp;
10264	struct ufs2_dinode *dp;
10265	struct ufs2_dinode *sip;
10266	struct inoref *inoref;
10267	struct ufsmount *ump;
10268	struct fs *fs;
10269	ufs_lbn_t i;
10270#ifdef INVARIANTS
10271	ufs_lbn_t prevlbn = 0;
10272#endif
10273	int deplist;
10274
10275	if (inodedep->id_state & IOSTARTED)
10276		panic("initiate_write_inodeblock_ufs2: already started");
10277	inodedep->id_state |= IOSTARTED;
10278	fs = inodedep->id_fs;
10279	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10280	LOCK_OWNED(ump);
10281	dp = (struct ufs2_dinode *)bp->b_data +
10282	    ino_to_fsbo(fs, inodedep->id_ino);
10283
10284	/*
10285	 * If we're on the unlinked list but have not yet written our
10286	 * next pointer initialize it here.
10287	 */
10288	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10289		struct inodedep *inon;
10290
10291		inon = TAILQ_NEXT(inodedep, id_unlinked);
10292		dp->di_freelink = inon ? inon->id_ino : 0;
10293	}
10294	/*
10295	 * If the bitmap is not yet written, then the allocated
10296	 * inode cannot be written to disk.
10297	 */
10298	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10299		if (inodedep->id_savedino2 != NULL)
10300			panic("initiate_write_inodeblock_ufs2: I/O underway");
10301		FREE_LOCK(ump);
10302		sip = malloc(sizeof(struct ufs2_dinode),
10303		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10304		ACQUIRE_LOCK(ump);
10305		inodedep->id_savedino2 = sip;
10306		*inodedep->id_savedino2 = *dp;
10307		bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10308		dp->di_gen = inodedep->id_savedino2->di_gen;
10309		dp->di_freelink = inodedep->id_savedino2->di_freelink;
10310		return;
10311	}
10312	/*
10313	 * If no dependencies, then there is nothing to roll back.
10314	 */
10315	inodedep->id_savedsize = dp->di_size;
10316	inodedep->id_savedextsize = dp->di_extsize;
10317	inodedep->id_savednlink = dp->di_nlink;
10318	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10319	    TAILQ_EMPTY(&inodedep->id_extupdt) &&
10320	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10321		return;
10322	/*
10323	 * Revert the link count to that of the first unwritten journal entry.
10324	 */
10325	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10326	if (inoref)
10327		dp->di_nlink = inoref->if_nlink;
10328
10329	/*
10330	 * Set the ext data dependencies to busy.
10331	 */
10332	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10333	     adp = TAILQ_NEXT(adp, ad_next)) {
10334#ifdef INVARIANTS
10335		if (deplist != 0 && prevlbn >= adp->ad_offset)
10336			panic("softdep_write_inodeblock: lbn order");
10337		prevlbn = adp->ad_offset;
10338		if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10339			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10340			    "softdep_write_inodeblock",
10341			    (intmax_t)adp->ad_offset,
10342			    (intmax_t)dp->di_extb[adp->ad_offset],
10343			    (intmax_t)adp->ad_newblkno);
10344		deplist |= 1 << adp->ad_offset;
10345		if ((adp->ad_state & ATTACHED) == 0)
10346			panic("softdep_write_inodeblock: Unknown state 0x%x",
10347			    adp->ad_state);
10348#endif /* INVARIANTS */
10349		adp->ad_state &= ~ATTACHED;
10350		adp->ad_state |= UNDONE;
10351	}
10352	/*
10353	 * The on-disk inode cannot claim to be any larger than the last
10354	 * fragment that has been written. Otherwise, the on-disk inode
10355	 * might have fragments that were not the last block in the ext
10356	 * data which would corrupt the filesystem.
10357	 */
10358	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10359	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10360		dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10361		/* keep going until hitting a rollback to a frag */
10362		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10363			continue;
10364		dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10365		for (i = adp->ad_offset + 1; i < NXADDR; i++) {
10366#ifdef INVARIANTS
10367			if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10368				panic("softdep_write_inodeblock: lost dep1");
10369#endif /* INVARIANTS */
10370			dp->di_extb[i] = 0;
10371		}
10372		lastadp = NULL;
10373		break;
10374	}
10375	/*
10376	 * If we have zero'ed out the last allocated block of the ext
10377	 * data, roll back the size to the last currently allocated block.
10378	 * We know that this last allocated block is a full-sized as
10379	 * we already checked for fragments in the loop above.
10380	 */
10381	if (lastadp != NULL &&
10382	    dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10383		for (i = lastadp->ad_offset; i >= 0; i--)
10384			if (dp->di_extb[i] != 0)
10385				break;
10386		dp->di_extsize = (i + 1) * fs->fs_bsize;
10387	}
10388	/*
10389	 * Set the file data dependencies to busy.
10390	 */
10391	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10392	     adp = TAILQ_NEXT(adp, ad_next)) {
10393#ifdef INVARIANTS
10394		if (deplist != 0 && prevlbn >= adp->ad_offset)
10395			panic("softdep_write_inodeblock: lbn order");
10396		if ((adp->ad_state & ATTACHED) == 0)
10397			panic("inodedep %p and adp %p not attached", inodedep, adp);
10398		prevlbn = adp->ad_offset;
10399		if (adp->ad_offset < NDADDR &&
10400		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10401			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10402			    "softdep_write_inodeblock",
10403			    (intmax_t)adp->ad_offset,
10404			    (intmax_t)dp->di_db[adp->ad_offset],
10405			    (intmax_t)adp->ad_newblkno);
10406		if (adp->ad_offset >= NDADDR &&
10407		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10408			panic("%s indirect pointer #%jd mismatch %jd != %jd",
10409			    "softdep_write_inodeblock:",
10410			    (intmax_t)adp->ad_offset - NDADDR,
10411			    (intmax_t)dp->di_ib[adp->ad_offset - NDADDR],
10412			    (intmax_t)adp->ad_newblkno);
10413		deplist |= 1 << adp->ad_offset;
10414		if ((adp->ad_state & ATTACHED) == 0)
10415			panic("softdep_write_inodeblock: Unknown state 0x%x",
10416			    adp->ad_state);
10417#endif /* INVARIANTS */
10418		adp->ad_state &= ~ATTACHED;
10419		adp->ad_state |= UNDONE;
10420	}
10421	/*
10422	 * The on-disk inode cannot claim to be any larger than the last
10423	 * fragment that has been written. Otherwise, the on-disk inode
10424	 * might have fragments that were not the last block in the file
10425	 * which would corrupt the filesystem.
10426	 */
10427	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10428	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10429		if (adp->ad_offset >= NDADDR)
10430			break;
10431		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10432		/* keep going until hitting a rollback to a frag */
10433		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10434			continue;
10435		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10436		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10437#ifdef INVARIANTS
10438			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10439				panic("softdep_write_inodeblock: lost dep2");
10440#endif /* INVARIANTS */
10441			dp->di_db[i] = 0;
10442		}
10443		for (i = 0; i < NIADDR; i++) {
10444#ifdef INVARIANTS
10445			if (dp->di_ib[i] != 0 &&
10446			    (deplist & ((1 << NDADDR) << i)) == 0)
10447				panic("softdep_write_inodeblock: lost dep3");
10448#endif /* INVARIANTS */
10449			dp->di_ib[i] = 0;
10450		}
10451		return;
10452	}
10453	/*
10454	 * If we have zero'ed out the last allocated block of the file,
10455	 * roll back the size to the last currently allocated block.
10456	 * We know that this last allocated block is a full-sized as
10457	 * we already checked for fragments in the loop above.
10458	 */
10459	if (lastadp != NULL &&
10460	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10461		for (i = lastadp->ad_offset; i >= 0; i--)
10462			if (dp->di_db[i] != 0)
10463				break;
10464		dp->di_size = (i + 1) * fs->fs_bsize;
10465	}
10466	/*
10467	 * The only dependencies are for indirect blocks.
10468	 *
10469	 * The file size for indirect block additions is not guaranteed.
10470	 * Such a guarantee would be non-trivial to achieve. The conventional
10471	 * synchronous write implementation also does not make this guarantee.
10472	 * Fsck should catch and fix discrepancies. Arguably, the file size
10473	 * can be over-estimated without destroying integrity when the file
10474	 * moves into the indirect blocks (i.e., is large). If we want to
10475	 * postpone fsck, we are stuck with this argument.
10476	 */
10477	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10478		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10479}
10480
10481/*
10482 * Cancel an indirdep as a result of truncation.  Release all of the
10483 * children allocindirs and place their journal work on the appropriate
10484 * list.
10485 */
10486static void
10487cancel_indirdep(indirdep, bp, freeblks)
10488	struct indirdep *indirdep;
10489	struct buf *bp;
10490	struct freeblks *freeblks;
10491{
10492	struct allocindir *aip;
10493
10494	/*
10495	 * None of the indirect pointers will ever be visible,
10496	 * so they can simply be tossed. GOINGAWAY ensures
10497	 * that allocated pointers will be saved in the buffer
10498	 * cache until they are freed. Note that they will
10499	 * only be able to be found by their physical address
10500	 * since the inode mapping the logical address will
10501	 * be gone. The save buffer used for the safe copy
10502	 * was allocated in setup_allocindir_phase2 using
10503	 * the physical address so it could be used for this
10504	 * purpose. Hence we swap the safe copy with the real
10505	 * copy, allowing the safe copy to be freed and holding
10506	 * on to the real copy for later use in indir_trunc.
10507	 */
10508	if (indirdep->ir_state & GOINGAWAY)
10509		panic("cancel_indirdep: already gone");
10510	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10511		indirdep->ir_state |= DEPCOMPLETE;
10512		LIST_REMOVE(indirdep, ir_next);
10513	}
10514	indirdep->ir_state |= GOINGAWAY;
10515	/*
10516	 * Pass in bp for blocks still have journal writes
10517	 * pending so we can cancel them on their own.
10518	 */
10519	while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
10520		cancel_allocindir(aip, bp, freeblks, 0);
10521	while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0)
10522		cancel_allocindir(aip, NULL, freeblks, 0);
10523	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0)
10524		cancel_allocindir(aip, NULL, freeblks, 0);
10525	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0)
10526		cancel_allocindir(aip, NULL, freeblks, 0);
10527	/*
10528	 * If there are pending partial truncations we need to keep the
10529	 * old block copy around until they complete.  This is because
10530	 * the current b_data is not a perfect superset of the available
10531	 * blocks.
10532	 */
10533	if (TAILQ_EMPTY(&indirdep->ir_trunc))
10534		bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10535	else
10536		bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10537	WORKLIST_REMOVE(&indirdep->ir_list);
10538	WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10539	indirdep->ir_bp = NULL;
10540	indirdep->ir_freeblks = freeblks;
10541}
10542
10543/*
10544 * Free an indirdep once it no longer has new pointers to track.
10545 */
10546static void
10547free_indirdep(indirdep)
10548	struct indirdep *indirdep;
10549{
10550
10551	KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10552	    ("free_indirdep: Indir trunc list not empty."));
10553	KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10554	    ("free_indirdep: Complete head not empty."));
10555	KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10556	    ("free_indirdep: write head not empty."));
10557	KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10558	    ("free_indirdep: done head not empty."));
10559	KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10560	    ("free_indirdep: deplist head not empty."));
10561	KASSERT((indirdep->ir_state & DEPCOMPLETE),
10562	    ("free_indirdep: %p still on newblk list.", indirdep));
10563	KASSERT(indirdep->ir_saveddata == NULL,
10564	    ("free_indirdep: %p still has saved data.", indirdep));
10565	if (indirdep->ir_state & ONWORKLIST)
10566		WORKLIST_REMOVE(&indirdep->ir_list);
10567	WORKITEM_FREE(indirdep, D_INDIRDEP);
10568}
10569
10570/*
10571 * Called before a write to an indirdep.  This routine is responsible for
10572 * rolling back pointers to a safe state which includes only those
10573 * allocindirs which have been completed.
10574 */
10575static void
10576initiate_write_indirdep(indirdep, bp)
10577	struct indirdep *indirdep;
10578	struct buf *bp;
10579{
10580	struct ufsmount *ump;
10581
10582	indirdep->ir_state |= IOSTARTED;
10583	if (indirdep->ir_state & GOINGAWAY)
10584		panic("disk_io_initiation: indirdep gone");
10585	/*
10586	 * If there are no remaining dependencies, this will be writing
10587	 * the real pointers.
10588	 */
10589	if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10590	    TAILQ_EMPTY(&indirdep->ir_trunc))
10591		return;
10592	/*
10593	 * Replace up-to-date version with safe version.
10594	 */
10595	if (indirdep->ir_saveddata == NULL) {
10596		ump = VFSTOUFS(indirdep->ir_list.wk_mp);
10597		LOCK_OWNED(ump);
10598		FREE_LOCK(ump);
10599		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10600		    M_SOFTDEP_FLAGS);
10601		ACQUIRE_LOCK(ump);
10602	}
10603	indirdep->ir_state &= ~ATTACHED;
10604	indirdep->ir_state |= UNDONE;
10605	bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10606	bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10607	    bp->b_bcount);
10608}
10609
10610/*
10611 * Called when an inode has been cleared in a cg bitmap.  This finally
10612 * eliminates any canceled jaddrefs
10613 */
10614void
10615softdep_setup_inofree(mp, bp, ino, wkhd)
10616	struct mount *mp;
10617	struct buf *bp;
10618	ino_t ino;
10619	struct workhead *wkhd;
10620{
10621	struct worklist *wk, *wkn;
10622	struct inodedep *inodedep;
10623	struct ufsmount *ump;
10624	uint8_t *inosused;
10625	struct cg *cgp;
10626	struct fs *fs;
10627
10628	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
10629	    ("softdep_setup_inofree called on non-softdep filesystem"));
10630	ump = VFSTOUFS(mp);
10631	ACQUIRE_LOCK(ump);
10632	fs = ump->um_fs;
10633	cgp = (struct cg *)bp->b_data;
10634	inosused = cg_inosused(cgp);
10635	if (isset(inosused, ino % fs->fs_ipg))
10636		panic("softdep_setup_inofree: inode %ju not freed.",
10637		    (uintmax_t)ino);
10638	if (inodedep_lookup(mp, ino, 0, &inodedep))
10639		panic("softdep_setup_inofree: ino %ju has existing inodedep %p",
10640		    (uintmax_t)ino, inodedep);
10641	if (wkhd) {
10642		LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10643			if (wk->wk_type != D_JADDREF)
10644				continue;
10645			WORKLIST_REMOVE(wk);
10646			/*
10647			 * We can free immediately even if the jaddref
10648			 * isn't attached in a background write as now
10649			 * the bitmaps are reconciled.
10650			 */
10651			wk->wk_state |= COMPLETE | ATTACHED;
10652			free_jaddref(WK_JADDREF(wk));
10653		}
10654		jwork_move(&bp->b_dep, wkhd);
10655	}
10656	FREE_LOCK(ump);
10657}
10658
10659
10660/*
10661 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10662 * map.  Any dependencies waiting for the write to clear are added to the
10663 * buf's list and any jnewblks that are being canceled are discarded
10664 * immediately.
10665 */
10666void
10667softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
10668	struct mount *mp;
10669	struct buf *bp;
10670	ufs2_daddr_t blkno;
10671	int frags;
10672	struct workhead *wkhd;
10673{
10674	struct bmsafemap *bmsafemap;
10675	struct jnewblk *jnewblk;
10676	struct ufsmount *ump;
10677	struct worklist *wk;
10678	struct fs *fs;
10679#ifdef SUJ_DEBUG
10680	uint8_t *blksfree;
10681	struct cg *cgp;
10682	ufs2_daddr_t jstart;
10683	ufs2_daddr_t jend;
10684	ufs2_daddr_t end;
10685	long bno;
10686	int i;
10687#endif
10688
10689	CTR3(KTR_SUJ,
10690	    "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
10691	    blkno, frags, wkhd);
10692
10693	ump = VFSTOUFS(mp);
10694	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
10695	    ("softdep_setup_blkfree called on non-softdep filesystem"));
10696	ACQUIRE_LOCK(ump);
10697	/* Lookup the bmsafemap so we track when it is dirty. */
10698	fs = ump->um_fs;
10699	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10700	/*
10701	 * Detach any jnewblks which have been canceled.  They must linger
10702	 * until the bitmap is cleared again by ffs_blkfree() to prevent
10703	 * an unjournaled allocation from hitting the disk.
10704	 */
10705	if (wkhd) {
10706		while ((wk = LIST_FIRST(wkhd)) != NULL) {
10707			CTR2(KTR_SUJ,
10708			    "softdep_setup_blkfree: blkno %jd wk type %d",
10709			    blkno, wk->wk_type);
10710			WORKLIST_REMOVE(wk);
10711			if (wk->wk_type != D_JNEWBLK) {
10712				WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
10713				continue;
10714			}
10715			jnewblk = WK_JNEWBLK(wk);
10716			KASSERT(jnewblk->jn_state & GOINGAWAY,
10717			    ("softdep_setup_blkfree: jnewblk not canceled."));
10718#ifdef SUJ_DEBUG
10719			/*
10720			 * Assert that this block is free in the bitmap
10721			 * before we discard the jnewblk.
10722			 */
10723			cgp = (struct cg *)bp->b_data;
10724			blksfree = cg_blksfree(cgp);
10725			bno = dtogd(fs, jnewblk->jn_blkno);
10726			for (i = jnewblk->jn_oldfrags;
10727			    i < jnewblk->jn_frags; i++) {
10728				if (isset(blksfree, bno + i))
10729					continue;
10730				panic("softdep_setup_blkfree: not free");
10731			}
10732#endif
10733			/*
10734			 * Even if it's not attached we can free immediately
10735			 * as the new bitmap is correct.
10736			 */
10737			wk->wk_state |= COMPLETE | ATTACHED;
10738			free_jnewblk(jnewblk);
10739		}
10740	}
10741
10742#ifdef SUJ_DEBUG
10743	/*
10744	 * Assert that we are not freeing a block which has an outstanding
10745	 * allocation dependency.
10746	 */
10747	fs = VFSTOUFS(mp)->um_fs;
10748	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10749	end = blkno + frags;
10750	LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10751		/*
10752		 * Don't match against blocks that will be freed when the
10753		 * background write is done.
10754		 */
10755		if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
10756		    (COMPLETE | DEPCOMPLETE))
10757			continue;
10758		jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
10759		jend = jnewblk->jn_blkno + jnewblk->jn_frags;
10760		if ((blkno >= jstart && blkno < jend) ||
10761		    (end > jstart && end <= jend)) {
10762			printf("state 0x%X %jd - %d %d dep %p\n",
10763			    jnewblk->jn_state, jnewblk->jn_blkno,
10764			    jnewblk->jn_oldfrags, jnewblk->jn_frags,
10765			    jnewblk->jn_dep);
10766			panic("softdep_setup_blkfree: "
10767			    "%jd-%jd(%d) overlaps with %jd-%jd",
10768			    blkno, end, frags, jstart, jend);
10769		}
10770	}
10771#endif
10772	FREE_LOCK(ump);
10773}
10774
10775/*
10776 * Revert a block allocation when the journal record that describes it
10777 * is not yet written.
10778 */
10779static int
10780jnewblk_rollback(jnewblk, fs, cgp, blksfree)
10781	struct jnewblk *jnewblk;
10782	struct fs *fs;
10783	struct cg *cgp;
10784	uint8_t *blksfree;
10785{
10786	ufs1_daddr_t fragno;
10787	long cgbno, bbase;
10788	int frags, blk;
10789	int i;
10790
10791	frags = 0;
10792	cgbno = dtogd(fs, jnewblk->jn_blkno);
10793	/*
10794	 * We have to test which frags need to be rolled back.  We may
10795	 * be operating on a stale copy when doing background writes.
10796	 */
10797	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
10798		if (isclr(blksfree, cgbno + i))
10799			frags++;
10800	if (frags == 0)
10801		return (0);
10802	/*
10803	 * This is mostly ffs_blkfree() sans some validation and
10804	 * superblock updates.
10805	 */
10806	if (frags == fs->fs_frag) {
10807		fragno = fragstoblks(fs, cgbno);
10808		ffs_setblock(fs, blksfree, fragno);
10809		ffs_clusteracct(fs, cgp, fragno, 1);
10810		cgp->cg_cs.cs_nbfree++;
10811	} else {
10812		cgbno += jnewblk->jn_oldfrags;
10813		bbase = cgbno - fragnum(fs, cgbno);
10814		/* Decrement the old frags.  */
10815		blk = blkmap(fs, blksfree, bbase);
10816		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
10817		/* Deallocate the fragment */
10818		for (i = 0; i < frags; i++)
10819			setbit(blksfree, cgbno + i);
10820		cgp->cg_cs.cs_nffree += frags;
10821		/* Add back in counts associated with the new frags */
10822		blk = blkmap(fs, blksfree, bbase);
10823		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
10824		/* If a complete block has been reassembled, account for it. */
10825		fragno = fragstoblks(fs, bbase);
10826		if (ffs_isblock(fs, blksfree, fragno)) {
10827			cgp->cg_cs.cs_nffree -= fs->fs_frag;
10828			ffs_clusteracct(fs, cgp, fragno, 1);
10829			cgp->cg_cs.cs_nbfree++;
10830		}
10831	}
10832	stat_jnewblk++;
10833	jnewblk->jn_state &= ~ATTACHED;
10834	jnewblk->jn_state |= UNDONE;
10835
10836	return (frags);
10837}
10838
10839static void
10840initiate_write_bmsafemap(bmsafemap, bp)
10841	struct bmsafemap *bmsafemap;
10842	struct buf *bp;			/* The cg block. */
10843{
10844	struct jaddref *jaddref;
10845	struct jnewblk *jnewblk;
10846	uint8_t *inosused;
10847	uint8_t *blksfree;
10848	struct cg *cgp;
10849	struct fs *fs;
10850	ino_t ino;
10851
10852	if (bmsafemap->sm_state & IOSTARTED)
10853		return;
10854	bmsafemap->sm_state |= IOSTARTED;
10855	/*
10856	 * Clear any inode allocations which are pending journal writes.
10857	 */
10858	if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
10859		cgp = (struct cg *)bp->b_data;
10860		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10861		inosused = cg_inosused(cgp);
10862		LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
10863			ino = jaddref->ja_ino % fs->fs_ipg;
10864			if (isset(inosused, ino)) {
10865				if ((jaddref->ja_mode & IFMT) == IFDIR)
10866					cgp->cg_cs.cs_ndir--;
10867				cgp->cg_cs.cs_nifree++;
10868				clrbit(inosused, ino);
10869				jaddref->ja_state &= ~ATTACHED;
10870				jaddref->ja_state |= UNDONE;
10871				stat_jaddref++;
10872			} else
10873				panic("initiate_write_bmsafemap: inode %ju "
10874				    "marked free", (uintmax_t)jaddref->ja_ino);
10875		}
10876	}
10877	/*
10878	 * Clear any block allocations which are pending journal writes.
10879	 */
10880	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
10881		cgp = (struct cg *)bp->b_data;
10882		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10883		blksfree = cg_blksfree(cgp);
10884		LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10885			if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
10886				continue;
10887			panic("initiate_write_bmsafemap: block %jd "
10888			    "marked free", jnewblk->jn_blkno);
10889		}
10890	}
10891	/*
10892	 * Move allocation lists to the written lists so they can be
10893	 * cleared once the block write is complete.
10894	 */
10895	LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
10896	    inodedep, id_deps);
10897	LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
10898	    newblk, nb_deps);
10899	LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
10900	    wk_list);
10901}
10902
10903/*
10904 * This routine is called during the completion interrupt
10905 * service routine for a disk write (from the procedure called
10906 * by the device driver to inform the filesystem caches of
10907 * a request completion).  It should be called early in this
10908 * procedure, before the block is made available to other
10909 * processes or other routines are called.
10910 *
10911 */
10912static void
10913softdep_disk_write_complete(bp)
10914	struct buf *bp;		/* describes the completed disk write */
10915{
10916	struct worklist *wk;
10917	struct worklist *owk;
10918	struct ufsmount *ump;
10919	struct workhead reattach;
10920	struct freeblks *freeblks;
10921	struct buf *sbp;
10922
10923	/*
10924	 * If an error occurred while doing the write, then the data
10925	 * has not hit the disk and the dependencies cannot be unrolled.
10926	 */
10927	if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0)
10928		return;
10929	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
10930		return;
10931	ump = VFSTOUFS(wk->wk_mp);
10932	LIST_INIT(&reattach);
10933	/*
10934	 * This lock must not be released anywhere in this code segment.
10935	 */
10936	sbp = NULL;
10937	owk = NULL;
10938	ACQUIRE_LOCK(ump);
10939	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
10940		WORKLIST_REMOVE(wk);
10941		atomic_add_long(&dep_write[wk->wk_type], 1);
10942		if (wk == owk)
10943			panic("duplicate worklist: %p\n", wk);
10944		owk = wk;
10945		switch (wk->wk_type) {
10946
10947		case D_PAGEDEP:
10948			if (handle_written_filepage(WK_PAGEDEP(wk), bp))
10949				WORKLIST_INSERT(&reattach, wk);
10950			continue;
10951
10952		case D_INODEDEP:
10953			if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
10954				WORKLIST_INSERT(&reattach, wk);
10955			continue;
10956
10957		case D_BMSAFEMAP:
10958			if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp))
10959				WORKLIST_INSERT(&reattach, wk);
10960			continue;
10961
10962		case D_MKDIR:
10963			handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
10964			continue;
10965
10966		case D_ALLOCDIRECT:
10967			wk->wk_state |= COMPLETE;
10968			handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
10969			continue;
10970
10971		case D_ALLOCINDIR:
10972			wk->wk_state |= COMPLETE;
10973			handle_allocindir_partdone(WK_ALLOCINDIR(wk));
10974			continue;
10975
10976		case D_INDIRDEP:
10977			if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp))
10978				WORKLIST_INSERT(&reattach, wk);
10979			continue;
10980
10981		case D_FREEBLKS:
10982			wk->wk_state |= COMPLETE;
10983			freeblks = WK_FREEBLKS(wk);
10984			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
10985			    LIST_EMPTY(&freeblks->fb_jblkdephd))
10986				add_to_worklist(wk, WK_NODELAY);
10987			continue;
10988
10989		case D_FREEWORK:
10990			handle_written_freework(WK_FREEWORK(wk));
10991			break;
10992
10993		case D_JSEGDEP:
10994			free_jsegdep(WK_JSEGDEP(wk));
10995			continue;
10996
10997		case D_JSEG:
10998			handle_written_jseg(WK_JSEG(wk), bp);
10999			continue;
11000
11001		case D_SBDEP:
11002			if (handle_written_sbdep(WK_SBDEP(wk), bp))
11003				WORKLIST_INSERT(&reattach, wk);
11004			continue;
11005
11006		case D_FREEDEP:
11007			free_freedep(WK_FREEDEP(wk));
11008			continue;
11009
11010		default:
11011			panic("handle_disk_write_complete: Unknown type %s",
11012			    TYPENAME(wk->wk_type));
11013			/* NOTREACHED */
11014		}
11015	}
11016	/*
11017	 * Reattach any requests that must be redone.
11018	 */
11019	while ((wk = LIST_FIRST(&reattach)) != NULL) {
11020		WORKLIST_REMOVE(wk);
11021		WORKLIST_INSERT(&bp->b_dep, wk);
11022	}
11023	FREE_LOCK(ump);
11024	if (sbp)
11025		brelse(sbp);
11026}
11027
11028/*
11029 * Called from within softdep_disk_write_complete above. Note that
11030 * this routine is always called from interrupt level with further
11031 * splbio interrupts blocked.
11032 */
11033static void
11034handle_allocdirect_partdone(adp, wkhd)
11035	struct allocdirect *adp;	/* the completed allocdirect */
11036	struct workhead *wkhd;		/* Work to do when inode is writtne. */
11037{
11038	struct allocdirectlst *listhead;
11039	struct allocdirect *listadp;
11040	struct inodedep *inodedep;
11041	long bsize;
11042
11043	if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11044		return;
11045	/*
11046	 * The on-disk inode cannot claim to be any larger than the last
11047	 * fragment that has been written. Otherwise, the on-disk inode
11048	 * might have fragments that were not the last block in the file
11049	 * which would corrupt the filesystem. Thus, we cannot free any
11050	 * allocdirects after one whose ad_oldblkno claims a fragment as
11051	 * these blocks must be rolled back to zero before writing the inode.
11052	 * We check the currently active set of allocdirects in id_inoupdt
11053	 * or id_extupdt as appropriate.
11054	 */
11055	inodedep = adp->ad_inodedep;
11056	bsize = inodedep->id_fs->fs_bsize;
11057	if (adp->ad_state & EXTDATA)
11058		listhead = &inodedep->id_extupdt;
11059	else
11060		listhead = &inodedep->id_inoupdt;
11061	TAILQ_FOREACH(listadp, listhead, ad_next) {
11062		/* found our block */
11063		if (listadp == adp)
11064			break;
11065		/* continue if ad_oldlbn is not a fragment */
11066		if (listadp->ad_oldsize == 0 ||
11067		    listadp->ad_oldsize == bsize)
11068			continue;
11069		/* hit a fragment */
11070		return;
11071	}
11072	/*
11073	 * If we have reached the end of the current list without
11074	 * finding the just finished dependency, then it must be
11075	 * on the future dependency list. Future dependencies cannot
11076	 * be freed until they are moved to the current list.
11077	 */
11078	if (listadp == NULL) {
11079#ifdef DEBUG
11080		if (adp->ad_state & EXTDATA)
11081			listhead = &inodedep->id_newextupdt;
11082		else
11083			listhead = &inodedep->id_newinoupdt;
11084		TAILQ_FOREACH(listadp, listhead, ad_next)
11085			/* found our block */
11086			if (listadp == adp)
11087				break;
11088		if (listadp == NULL)
11089			panic("handle_allocdirect_partdone: lost dep");
11090#endif /* DEBUG */
11091		return;
11092	}
11093	/*
11094	 * If we have found the just finished dependency, then queue
11095	 * it along with anything that follows it that is complete.
11096	 * Since the pointer has not yet been written in the inode
11097	 * as the dependency prevents it, place the allocdirect on the
11098	 * bufwait list where it will be freed once the pointer is
11099	 * valid.
11100	 */
11101	if (wkhd == NULL)
11102		wkhd = &inodedep->id_bufwait;
11103	for (; adp; adp = listadp) {
11104		listadp = TAILQ_NEXT(adp, ad_next);
11105		if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11106			return;
11107		TAILQ_REMOVE(listhead, adp, ad_next);
11108		WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
11109	}
11110}
11111
11112/*
11113 * Called from within softdep_disk_write_complete above.  This routine
11114 * completes successfully written allocindirs.
11115 */
11116static void
11117handle_allocindir_partdone(aip)
11118	struct allocindir *aip;		/* the completed allocindir */
11119{
11120	struct indirdep *indirdep;
11121
11122	if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
11123		return;
11124	indirdep = aip->ai_indirdep;
11125	LIST_REMOVE(aip, ai_next);
11126	/*
11127	 * Don't set a pointer while the buffer is undergoing IO or while
11128	 * we have active truncations.
11129	 */
11130	if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
11131		LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
11132		return;
11133	}
11134	if (indirdep->ir_state & UFS1FMT)
11135		((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11136		    aip->ai_newblkno;
11137	else
11138		((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11139		    aip->ai_newblkno;
11140	/*
11141	 * Await the pointer write before freeing the allocindir.
11142	 */
11143	LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
11144}
11145
11146/*
11147 * Release segments held on a jwork list.
11148 */
11149static void
11150handle_jwork(wkhd)
11151	struct workhead *wkhd;
11152{
11153	struct worklist *wk;
11154
11155	while ((wk = LIST_FIRST(wkhd)) != NULL) {
11156		WORKLIST_REMOVE(wk);
11157		switch (wk->wk_type) {
11158		case D_JSEGDEP:
11159			free_jsegdep(WK_JSEGDEP(wk));
11160			continue;
11161		case D_FREEDEP:
11162			free_freedep(WK_FREEDEP(wk));
11163			continue;
11164		case D_FREEFRAG:
11165			rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
11166			WORKITEM_FREE(wk, D_FREEFRAG);
11167			continue;
11168		case D_FREEWORK:
11169			handle_written_freework(WK_FREEWORK(wk));
11170			continue;
11171		default:
11172			panic("handle_jwork: Unknown type %s\n",
11173			    TYPENAME(wk->wk_type));
11174		}
11175	}
11176}
11177
11178/*
11179 * Handle the bufwait list on an inode when it is safe to release items
11180 * held there.  This normally happens after an inode block is written but
11181 * may be delayed and handled later if there are pending journal items that
11182 * are not yet safe to be released.
11183 */
11184static struct freefile *
11185handle_bufwait(inodedep, refhd)
11186	struct inodedep *inodedep;
11187	struct workhead *refhd;
11188{
11189	struct jaddref *jaddref;
11190	struct freefile *freefile;
11191	struct worklist *wk;
11192
11193	freefile = NULL;
11194	while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
11195		WORKLIST_REMOVE(wk);
11196		switch (wk->wk_type) {
11197		case D_FREEFILE:
11198			/*
11199			 * We defer adding freefile to the worklist
11200			 * until all other additions have been made to
11201			 * ensure that it will be done after all the
11202			 * old blocks have been freed.
11203			 */
11204			if (freefile != NULL)
11205				panic("handle_bufwait: freefile");
11206			freefile = WK_FREEFILE(wk);
11207			continue;
11208
11209		case D_MKDIR:
11210			handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
11211			continue;
11212
11213		case D_DIRADD:
11214			diradd_inode_written(WK_DIRADD(wk), inodedep);
11215			continue;
11216
11217		case D_FREEFRAG:
11218			wk->wk_state |= COMPLETE;
11219			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
11220				add_to_worklist(wk, 0);
11221			continue;
11222
11223		case D_DIRREM:
11224			wk->wk_state |= COMPLETE;
11225			add_to_worklist(wk, 0);
11226			continue;
11227
11228		case D_ALLOCDIRECT:
11229		case D_ALLOCINDIR:
11230			free_newblk(WK_NEWBLK(wk));
11231			continue;
11232
11233		case D_JNEWBLK:
11234			wk->wk_state |= COMPLETE;
11235			free_jnewblk(WK_JNEWBLK(wk));
11236			continue;
11237
11238		/*
11239		 * Save freed journal segments and add references on
11240		 * the supplied list which will delay their release
11241		 * until the cg bitmap is cleared on disk.
11242		 */
11243		case D_JSEGDEP:
11244			if (refhd == NULL)
11245				free_jsegdep(WK_JSEGDEP(wk));
11246			else
11247				WORKLIST_INSERT(refhd, wk);
11248			continue;
11249
11250		case D_JADDREF:
11251			jaddref = WK_JADDREF(wk);
11252			TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11253			    if_deps);
11254			/*
11255			 * Transfer any jaddrefs to the list to be freed with
11256			 * the bitmap if we're handling a removed file.
11257			 */
11258			if (refhd == NULL) {
11259				wk->wk_state |= COMPLETE;
11260				free_jaddref(jaddref);
11261			} else
11262				WORKLIST_INSERT(refhd, wk);
11263			continue;
11264
11265		default:
11266			panic("handle_bufwait: Unknown type %p(%s)",
11267			    wk, TYPENAME(wk->wk_type));
11268			/* NOTREACHED */
11269		}
11270	}
11271	return (freefile);
11272}
11273/*
11274 * Called from within softdep_disk_write_complete above to restore
11275 * in-memory inode block contents to their most up-to-date state. Note
11276 * that this routine is always called from interrupt level with further
11277 * splbio interrupts blocked.
11278 */
11279static int
11280handle_written_inodeblock(inodedep, bp)
11281	struct inodedep *inodedep;
11282	struct buf *bp;		/* buffer containing the inode block */
11283{
11284	struct freefile *freefile;
11285	struct allocdirect *adp, *nextadp;
11286	struct ufs1_dinode *dp1 = NULL;
11287	struct ufs2_dinode *dp2 = NULL;
11288	struct workhead wkhd;
11289	int hadchanges, fstype;
11290	ino_t freelink;
11291
11292	LIST_INIT(&wkhd);
11293	hadchanges = 0;
11294	freefile = NULL;
11295	if ((inodedep->id_state & IOSTARTED) == 0)
11296		panic("handle_written_inodeblock: not started");
11297	inodedep->id_state &= ~IOSTARTED;
11298	if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11299		fstype = UFS1;
11300		dp1 = (struct ufs1_dinode *)bp->b_data +
11301		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11302		freelink = dp1->di_freelink;
11303	} else {
11304		fstype = UFS2;
11305		dp2 = (struct ufs2_dinode *)bp->b_data +
11306		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11307		freelink = dp2->di_freelink;
11308	}
11309	/*
11310	 * Leave this inodeblock dirty until it's in the list.
11311	 */
11312	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) {
11313		struct inodedep *inon;
11314
11315		inon = TAILQ_NEXT(inodedep, id_unlinked);
11316		if ((inon == NULL && freelink == 0) ||
11317		    (inon && inon->id_ino == freelink)) {
11318			if (inon)
11319				inon->id_state |= UNLINKPREV;
11320			inodedep->id_state |= UNLINKNEXT;
11321		}
11322		hadchanges = 1;
11323	}
11324	/*
11325	 * If we had to rollback the inode allocation because of
11326	 * bitmaps being incomplete, then simply restore it.
11327	 * Keep the block dirty so that it will not be reclaimed until
11328	 * all associated dependencies have been cleared and the
11329	 * corresponding updates written to disk.
11330	 */
11331	if (inodedep->id_savedino1 != NULL) {
11332		hadchanges = 1;
11333		if (fstype == UFS1)
11334			*dp1 = *inodedep->id_savedino1;
11335		else
11336			*dp2 = *inodedep->id_savedino2;
11337		free(inodedep->id_savedino1, M_SAVEDINO);
11338		inodedep->id_savedino1 = NULL;
11339		if ((bp->b_flags & B_DELWRI) == 0)
11340			stat_inode_bitmap++;
11341		bdirty(bp);
11342		/*
11343		 * If the inode is clear here and GOINGAWAY it will never
11344		 * be written.  Process the bufwait and clear any pending
11345		 * work which may include the freefile.
11346		 */
11347		if (inodedep->id_state & GOINGAWAY)
11348			goto bufwait;
11349		return (1);
11350	}
11351	inodedep->id_state |= COMPLETE;
11352	/*
11353	 * Roll forward anything that had to be rolled back before
11354	 * the inode could be updated.
11355	 */
11356	for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11357		nextadp = TAILQ_NEXT(adp, ad_next);
11358		if (adp->ad_state & ATTACHED)
11359			panic("handle_written_inodeblock: new entry");
11360		if (fstype == UFS1) {
11361			if (adp->ad_offset < NDADDR) {
11362				if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11363					panic("%s %s #%jd mismatch %d != %jd",
11364					    "handle_written_inodeblock:",
11365					    "direct pointer",
11366					    (intmax_t)adp->ad_offset,
11367					    dp1->di_db[adp->ad_offset],
11368					    (intmax_t)adp->ad_oldblkno);
11369				dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11370			} else {
11371				if (dp1->di_ib[adp->ad_offset - NDADDR] != 0)
11372					panic("%s: %s #%jd allocated as %d",
11373					    "handle_written_inodeblock",
11374					    "indirect pointer",
11375					    (intmax_t)adp->ad_offset - NDADDR,
11376					    dp1->di_ib[adp->ad_offset - NDADDR]);
11377				dp1->di_ib[adp->ad_offset - NDADDR] =
11378				    adp->ad_newblkno;
11379			}
11380		} else {
11381			if (adp->ad_offset < NDADDR) {
11382				if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11383					panic("%s: %s #%jd %s %jd != %jd",
11384					    "handle_written_inodeblock",
11385					    "direct pointer",
11386					    (intmax_t)adp->ad_offset, "mismatch",
11387					    (intmax_t)dp2->di_db[adp->ad_offset],
11388					    (intmax_t)adp->ad_oldblkno);
11389				dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11390			} else {
11391				if (dp2->di_ib[adp->ad_offset - NDADDR] != 0)
11392					panic("%s: %s #%jd allocated as %jd",
11393					    "handle_written_inodeblock",
11394					    "indirect pointer",
11395					    (intmax_t)adp->ad_offset - NDADDR,
11396					    (intmax_t)
11397					    dp2->di_ib[adp->ad_offset - NDADDR]);
11398				dp2->di_ib[adp->ad_offset - NDADDR] =
11399				    adp->ad_newblkno;
11400			}
11401		}
11402		adp->ad_state &= ~UNDONE;
11403		adp->ad_state |= ATTACHED;
11404		hadchanges = 1;
11405	}
11406	for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11407		nextadp = TAILQ_NEXT(adp, ad_next);
11408		if (adp->ad_state & ATTACHED)
11409			panic("handle_written_inodeblock: new entry");
11410		if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11411			panic("%s: direct pointers #%jd %s %jd != %jd",
11412			    "handle_written_inodeblock",
11413			    (intmax_t)adp->ad_offset, "mismatch",
11414			    (intmax_t)dp2->di_extb[adp->ad_offset],
11415			    (intmax_t)adp->ad_oldblkno);
11416		dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11417		adp->ad_state &= ~UNDONE;
11418		adp->ad_state |= ATTACHED;
11419		hadchanges = 1;
11420	}
11421	if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11422		stat_direct_blk_ptrs++;
11423	/*
11424	 * Reset the file size to its most up-to-date value.
11425	 */
11426	if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11427		panic("handle_written_inodeblock: bad size");
11428	if (inodedep->id_savednlink > LINK_MAX)
11429		panic("handle_written_inodeblock: Invalid link count "
11430		    "%d for inodedep %p", inodedep->id_savednlink, inodedep);
11431	if (fstype == UFS1) {
11432		if (dp1->di_nlink != inodedep->id_savednlink) {
11433			dp1->di_nlink = inodedep->id_savednlink;
11434			hadchanges = 1;
11435		}
11436		if (dp1->di_size != inodedep->id_savedsize) {
11437			dp1->di_size = inodedep->id_savedsize;
11438			hadchanges = 1;
11439		}
11440	} else {
11441		if (dp2->di_nlink != inodedep->id_savednlink) {
11442			dp2->di_nlink = inodedep->id_savednlink;
11443			hadchanges = 1;
11444		}
11445		if (dp2->di_size != inodedep->id_savedsize) {
11446			dp2->di_size = inodedep->id_savedsize;
11447			hadchanges = 1;
11448		}
11449		if (dp2->di_extsize != inodedep->id_savedextsize) {
11450			dp2->di_extsize = inodedep->id_savedextsize;
11451			hadchanges = 1;
11452		}
11453	}
11454	inodedep->id_savedsize = -1;
11455	inodedep->id_savedextsize = -1;
11456	inodedep->id_savednlink = -1;
11457	/*
11458	 * If there were any rollbacks in the inode block, then it must be
11459	 * marked dirty so that its will eventually get written back in
11460	 * its correct form.
11461	 */
11462	if (hadchanges)
11463		bdirty(bp);
11464bufwait:
11465	/*
11466	 * Process any allocdirects that completed during the update.
11467	 */
11468	if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11469		handle_allocdirect_partdone(adp, &wkhd);
11470	if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11471		handle_allocdirect_partdone(adp, &wkhd);
11472	/*
11473	 * Process deallocations that were held pending until the
11474	 * inode had been written to disk. Freeing of the inode
11475	 * is delayed until after all blocks have been freed to
11476	 * avoid creation of new <vfsid, inum, lbn> triples
11477	 * before the old ones have been deleted.  Completely
11478	 * unlinked inodes are not processed until the unlinked
11479	 * inode list is written or the last reference is removed.
11480	 */
11481	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11482		freefile = handle_bufwait(inodedep, NULL);
11483		if (freefile && !LIST_EMPTY(&wkhd)) {
11484			WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11485			freefile = NULL;
11486		}
11487	}
11488	/*
11489	 * Move rolled forward dependency completions to the bufwait list
11490	 * now that those that were already written have been processed.
11491	 */
11492	if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11493		panic("handle_written_inodeblock: bufwait but no changes");
11494	jwork_move(&inodedep->id_bufwait, &wkhd);
11495
11496	if (freefile != NULL) {
11497		/*
11498		 * If the inode is goingaway it was never written.  Fake up
11499		 * the state here so free_inodedep() can succeed.
11500		 */
11501		if (inodedep->id_state & GOINGAWAY)
11502			inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11503		if (free_inodedep(inodedep) == 0)
11504			panic("handle_written_inodeblock: live inodedep %p",
11505			    inodedep);
11506		add_to_worklist(&freefile->fx_list, 0);
11507		return (0);
11508	}
11509
11510	/*
11511	 * If no outstanding dependencies, free it.
11512	 */
11513	if (free_inodedep(inodedep) ||
11514	    (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11515	     TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11516	     TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11517	     LIST_FIRST(&inodedep->id_bufwait) == 0))
11518		return (0);
11519	return (hadchanges);
11520}
11521
11522static int
11523handle_written_indirdep(indirdep, bp, bpp)
11524	struct indirdep *indirdep;
11525	struct buf *bp;
11526	struct buf **bpp;
11527{
11528	struct allocindir *aip;
11529	struct buf *sbp;
11530	int chgs;
11531
11532	if (indirdep->ir_state & GOINGAWAY)
11533		panic("handle_written_indirdep: indirdep gone");
11534	if ((indirdep->ir_state & IOSTARTED) == 0)
11535		panic("handle_written_indirdep: IO not started");
11536	chgs = 0;
11537	/*
11538	 * If there were rollbacks revert them here.
11539	 */
11540	if (indirdep->ir_saveddata) {
11541		bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11542		if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11543			free(indirdep->ir_saveddata, M_INDIRDEP);
11544			indirdep->ir_saveddata = NULL;
11545		}
11546		chgs = 1;
11547	}
11548	indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11549	indirdep->ir_state |= ATTACHED;
11550	/*
11551	 * Move allocindirs with written pointers to the completehd if
11552	 * the indirdep's pointer is not yet written.  Otherwise
11553	 * free them here.
11554	 */
11555	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) {
11556		LIST_REMOVE(aip, ai_next);
11557		if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11558			LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11559			    ai_next);
11560			newblk_freefrag(&aip->ai_block);
11561			continue;
11562		}
11563		free_newblk(&aip->ai_block);
11564	}
11565	/*
11566	 * Move allocindirs that have finished dependency processing from
11567	 * the done list to the write list after updating the pointers.
11568	 */
11569	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11570		while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
11571			handle_allocindir_partdone(aip);
11572			if (aip == LIST_FIRST(&indirdep->ir_donehd))
11573				panic("disk_write_complete: not gone");
11574			chgs = 1;
11575		}
11576	}
11577	/*
11578	 * Preserve the indirdep if there were any changes or if it is not
11579	 * yet valid on disk.
11580	 */
11581	if (chgs) {
11582		stat_indir_blk_ptrs++;
11583		bdirty(bp);
11584		return (1);
11585	}
11586	/*
11587	 * If there were no changes we can discard the savedbp and detach
11588	 * ourselves from the buf.  We are only carrying completed pointers
11589	 * in this case.
11590	 */
11591	sbp = indirdep->ir_savebp;
11592	sbp->b_flags |= B_INVAL | B_NOCACHE;
11593	indirdep->ir_savebp = NULL;
11594	indirdep->ir_bp = NULL;
11595	if (*bpp != NULL)
11596		panic("handle_written_indirdep: bp already exists.");
11597	*bpp = sbp;
11598	/*
11599	 * The indirdep may not be freed until its parent points at it.
11600	 */
11601	if (indirdep->ir_state & DEPCOMPLETE)
11602		free_indirdep(indirdep);
11603
11604	return (0);
11605}
11606
11607/*
11608 * Process a diradd entry after its dependent inode has been written.
11609 * This routine must be called with splbio interrupts blocked.
11610 */
11611static void
11612diradd_inode_written(dap, inodedep)
11613	struct diradd *dap;
11614	struct inodedep *inodedep;
11615{
11616
11617	dap->da_state |= COMPLETE;
11618	complete_diradd(dap);
11619	WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
11620}
11621
11622/*
11623 * Returns true if the bmsafemap will have rollbacks when written.  Must only
11624 * be called with the per-filesystem lock and the buf lock on the cg held.
11625 */
11626static int
11627bmsafemap_backgroundwrite(bmsafemap, bp)
11628	struct bmsafemap *bmsafemap;
11629	struct buf *bp;
11630{
11631	int dirty;
11632
11633	LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp));
11634	dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
11635	    !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
11636	/*
11637	 * If we're initiating a background write we need to process the
11638	 * rollbacks as they exist now, not as they exist when IO starts.
11639	 * No other consumers will look at the contents of the shadowed
11640	 * buf so this is safe to do here.
11641	 */
11642	if (bp->b_xflags & BX_BKGRDMARKER)
11643		initiate_write_bmsafemap(bmsafemap, bp);
11644
11645	return (dirty);
11646}
11647
11648/*
11649 * Re-apply an allocation when a cg write is complete.
11650 */
11651static int
11652jnewblk_rollforward(jnewblk, fs, cgp, blksfree)
11653	struct jnewblk *jnewblk;
11654	struct fs *fs;
11655	struct cg *cgp;
11656	uint8_t *blksfree;
11657{
11658	ufs1_daddr_t fragno;
11659	ufs2_daddr_t blkno;
11660	long cgbno, bbase;
11661	int frags, blk;
11662	int i;
11663
11664	frags = 0;
11665	cgbno = dtogd(fs, jnewblk->jn_blkno);
11666	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
11667		if (isclr(blksfree, cgbno + i))
11668			panic("jnewblk_rollforward: re-allocated fragment");
11669		frags++;
11670	}
11671	if (frags == fs->fs_frag) {
11672		blkno = fragstoblks(fs, cgbno);
11673		ffs_clrblock(fs, blksfree, (long)blkno);
11674		ffs_clusteracct(fs, cgp, blkno, -1);
11675		cgp->cg_cs.cs_nbfree--;
11676	} else {
11677		bbase = cgbno - fragnum(fs, cgbno);
11678		cgbno += jnewblk->jn_oldfrags;
11679                /* If a complete block had been reassembled, account for it. */
11680		fragno = fragstoblks(fs, bbase);
11681		if (ffs_isblock(fs, blksfree, fragno)) {
11682			cgp->cg_cs.cs_nffree += fs->fs_frag;
11683			ffs_clusteracct(fs, cgp, fragno, -1);
11684			cgp->cg_cs.cs_nbfree--;
11685		}
11686		/* Decrement the old frags.  */
11687		blk = blkmap(fs, blksfree, bbase);
11688		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11689		/* Allocate the fragment */
11690		for (i = 0; i < frags; i++)
11691			clrbit(blksfree, cgbno + i);
11692		cgp->cg_cs.cs_nffree -= frags;
11693		/* Add back in counts associated with the new frags */
11694		blk = blkmap(fs, blksfree, bbase);
11695		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11696	}
11697	return (frags);
11698}
11699
11700/*
11701 * Complete a write to a bmsafemap structure.  Roll forward any bitmap
11702 * changes if it's not a background write.  Set all written dependencies
11703 * to DEPCOMPLETE and free the structure if possible.
11704 */
11705static int
11706handle_written_bmsafemap(bmsafemap, bp)
11707	struct bmsafemap *bmsafemap;
11708	struct buf *bp;
11709{
11710	struct newblk *newblk;
11711	struct inodedep *inodedep;
11712	struct jaddref *jaddref, *jatmp;
11713	struct jnewblk *jnewblk, *jntmp;
11714	struct ufsmount *ump;
11715	uint8_t *inosused;
11716	uint8_t *blksfree;
11717	struct cg *cgp;
11718	struct fs *fs;
11719	ino_t ino;
11720	int foreground;
11721	int chgs;
11722
11723	if ((bmsafemap->sm_state & IOSTARTED) == 0)
11724		panic("initiate_write_bmsafemap: Not started\n");
11725	ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
11726	chgs = 0;
11727	bmsafemap->sm_state &= ~IOSTARTED;
11728	foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0;
11729	/*
11730	 * Release journal work that was waiting on the write.
11731	 */
11732	handle_jwork(&bmsafemap->sm_freewr);
11733
11734	/*
11735	 * Restore unwritten inode allocation pending jaddref writes.
11736	 */
11737	if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
11738		cgp = (struct cg *)bp->b_data;
11739		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11740		inosused = cg_inosused(cgp);
11741		LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
11742		    ja_bmdeps, jatmp) {
11743			if ((jaddref->ja_state & UNDONE) == 0)
11744				continue;
11745			ino = jaddref->ja_ino % fs->fs_ipg;
11746			if (isset(inosused, ino))
11747				panic("handle_written_bmsafemap: "
11748				    "re-allocated inode");
11749			/* Do the roll-forward only if it's a real copy. */
11750			if (foreground) {
11751				if ((jaddref->ja_mode & IFMT) == IFDIR)
11752					cgp->cg_cs.cs_ndir++;
11753				cgp->cg_cs.cs_nifree--;
11754				setbit(inosused, ino);
11755				chgs = 1;
11756			}
11757			jaddref->ja_state &= ~UNDONE;
11758			jaddref->ja_state |= ATTACHED;
11759			free_jaddref(jaddref);
11760		}
11761	}
11762	/*
11763	 * Restore any block allocations which are pending journal writes.
11764	 */
11765	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11766		cgp = (struct cg *)bp->b_data;
11767		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11768		blksfree = cg_blksfree(cgp);
11769		LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
11770		    jntmp) {
11771			if ((jnewblk->jn_state & UNDONE) == 0)
11772				continue;
11773			/* Do the roll-forward only if it's a real copy. */
11774			if (foreground &&
11775			    jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
11776				chgs = 1;
11777			jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
11778			jnewblk->jn_state |= ATTACHED;
11779			free_jnewblk(jnewblk);
11780		}
11781	}
11782	while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
11783		newblk->nb_state |= DEPCOMPLETE;
11784		newblk->nb_state &= ~ONDEPLIST;
11785		newblk->nb_bmsafemap = NULL;
11786		LIST_REMOVE(newblk, nb_deps);
11787		if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
11788			handle_allocdirect_partdone(
11789			    WK_ALLOCDIRECT(&newblk->nb_list), NULL);
11790		else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
11791			handle_allocindir_partdone(
11792			    WK_ALLOCINDIR(&newblk->nb_list));
11793		else if (newblk->nb_list.wk_type != D_NEWBLK)
11794			panic("handle_written_bmsafemap: Unexpected type: %s",
11795			    TYPENAME(newblk->nb_list.wk_type));
11796	}
11797	while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
11798		inodedep->id_state |= DEPCOMPLETE;
11799		inodedep->id_state &= ~ONDEPLIST;
11800		LIST_REMOVE(inodedep, id_deps);
11801		inodedep->id_bmsafemap = NULL;
11802	}
11803	LIST_REMOVE(bmsafemap, sm_next);
11804	if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
11805	    LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
11806	    LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
11807	    LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
11808	    LIST_EMPTY(&bmsafemap->sm_freehd)) {
11809		LIST_REMOVE(bmsafemap, sm_hash);
11810		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
11811		return (0);
11812	}
11813	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
11814	if (foreground)
11815		bdirty(bp);
11816	return (1);
11817}
11818
11819/*
11820 * Try to free a mkdir dependency.
11821 */
11822static void
11823complete_mkdir(mkdir)
11824	struct mkdir *mkdir;
11825{
11826	struct diradd *dap;
11827
11828	if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
11829		return;
11830	LIST_REMOVE(mkdir, md_mkdirs);
11831	dap = mkdir->md_diradd;
11832	dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
11833	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
11834		dap->da_state |= DEPCOMPLETE;
11835		complete_diradd(dap);
11836	}
11837	WORKITEM_FREE(mkdir, D_MKDIR);
11838}
11839
11840/*
11841 * Handle the completion of a mkdir dependency.
11842 */
11843static void
11844handle_written_mkdir(mkdir, type)
11845	struct mkdir *mkdir;
11846	int type;
11847{
11848
11849	if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
11850		panic("handle_written_mkdir: bad type");
11851	mkdir->md_state |= COMPLETE;
11852	complete_mkdir(mkdir);
11853}
11854
11855static int
11856free_pagedep(pagedep)
11857	struct pagedep *pagedep;
11858{
11859	int i;
11860
11861	if (pagedep->pd_state & NEWBLOCK)
11862		return (0);
11863	if (!LIST_EMPTY(&pagedep->pd_dirremhd))
11864		return (0);
11865	for (i = 0; i < DAHASHSZ; i++)
11866		if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
11867			return (0);
11868	if (!LIST_EMPTY(&pagedep->pd_pendinghd))
11869		return (0);
11870	if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
11871		return (0);
11872	if (pagedep->pd_state & ONWORKLIST)
11873		WORKLIST_REMOVE(&pagedep->pd_list);
11874	LIST_REMOVE(pagedep, pd_hash);
11875	WORKITEM_FREE(pagedep, D_PAGEDEP);
11876
11877	return (1);
11878}
11879
11880/*
11881 * Called from within softdep_disk_write_complete above.
11882 * A write operation was just completed. Removed inodes can
11883 * now be freed and associated block pointers may be committed.
11884 * Note that this routine is always called from interrupt level
11885 * with further splbio interrupts blocked.
11886 */
11887static int
11888handle_written_filepage(pagedep, bp)
11889	struct pagedep *pagedep;
11890	struct buf *bp;		/* buffer containing the written page */
11891{
11892	struct dirrem *dirrem;
11893	struct diradd *dap, *nextdap;
11894	struct direct *ep;
11895	int i, chgs;
11896
11897	if ((pagedep->pd_state & IOSTARTED) == 0)
11898		panic("handle_written_filepage: not started");
11899	pagedep->pd_state &= ~IOSTARTED;
11900	/*
11901	 * Process any directory removals that have been committed.
11902	 */
11903	while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
11904		LIST_REMOVE(dirrem, dm_next);
11905		dirrem->dm_state |= COMPLETE;
11906		dirrem->dm_dirinum = pagedep->pd_ino;
11907		KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
11908		    ("handle_written_filepage: Journal entries not written."));
11909		add_to_worklist(&dirrem->dm_list, 0);
11910	}
11911	/*
11912	 * Free any directory additions that have been committed.
11913	 * If it is a newly allocated block, we have to wait until
11914	 * the on-disk directory inode claims the new block.
11915	 */
11916	if ((pagedep->pd_state & NEWBLOCK) == 0)
11917		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
11918			free_diradd(dap, NULL);
11919	/*
11920	 * Uncommitted directory entries must be restored.
11921	 */
11922	for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
11923		for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
11924		     dap = nextdap) {
11925			nextdap = LIST_NEXT(dap, da_pdlist);
11926			if (dap->da_state & ATTACHED)
11927				panic("handle_written_filepage: attached");
11928			ep = (struct direct *)
11929			    ((char *)bp->b_data + dap->da_offset);
11930			ep->d_ino = dap->da_newinum;
11931			dap->da_state &= ~UNDONE;
11932			dap->da_state |= ATTACHED;
11933			chgs = 1;
11934			/*
11935			 * If the inode referenced by the directory has
11936			 * been written out, then the dependency can be
11937			 * moved to the pending list.
11938			 */
11939			if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
11940				LIST_REMOVE(dap, da_pdlist);
11941				LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
11942				    da_pdlist);
11943			}
11944		}
11945	}
11946	/*
11947	 * If there were any rollbacks in the directory, then it must be
11948	 * marked dirty so that its will eventually get written back in
11949	 * its correct form.
11950	 */
11951	if (chgs) {
11952		if ((bp->b_flags & B_DELWRI) == 0)
11953			stat_dir_entry++;
11954		bdirty(bp);
11955		return (1);
11956	}
11957	/*
11958	 * If we are not waiting for a new directory block to be
11959	 * claimed by its inode, then the pagedep will be freed.
11960	 * Otherwise it will remain to track any new entries on
11961	 * the page in case they are fsync'ed.
11962	 */
11963	free_pagedep(pagedep);
11964	return (0);
11965}
11966
11967/*
11968 * Writing back in-core inode structures.
11969 *
11970 * The filesystem only accesses an inode's contents when it occupies an
11971 * "in-core" inode structure.  These "in-core" structures are separate from
11972 * the page frames used to cache inode blocks.  Only the latter are
11973 * transferred to/from the disk.  So, when the updated contents of the
11974 * "in-core" inode structure are copied to the corresponding in-memory inode
11975 * block, the dependencies are also transferred.  The following procedure is
11976 * called when copying a dirty "in-core" inode to a cached inode block.
11977 */
11978
11979/*
11980 * Called when an inode is loaded from disk. If the effective link count
11981 * differed from the actual link count when it was last flushed, then we
11982 * need to ensure that the correct effective link count is put back.
11983 */
11984void
11985softdep_load_inodeblock(ip)
11986	struct inode *ip;	/* the "in_core" copy of the inode */
11987{
11988	struct inodedep *inodedep;
11989
11990	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
11991	    ("softdep_load_inodeblock called on non-softdep filesystem"));
11992	/*
11993	 * Check for alternate nlink count.
11994	 */
11995	ip->i_effnlink = ip->i_nlink;
11996	ACQUIRE_LOCK(ip->i_ump);
11997	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
11998	    &inodedep) == 0) {
11999		FREE_LOCK(ip->i_ump);
12000		return;
12001	}
12002	ip->i_effnlink -= inodedep->id_nlinkdelta;
12003	FREE_LOCK(ip->i_ump);
12004}
12005
12006/*
12007 * This routine is called just before the "in-core" inode
12008 * information is to be copied to the in-memory inode block.
12009 * Recall that an inode block contains several inodes. If
12010 * the force flag is set, then the dependencies will be
12011 * cleared so that the update can always be made. Note that
12012 * the buffer is locked when this routine is called, so we
12013 * will never be in the middle of writing the inode block
12014 * to disk.
12015 */
12016void
12017softdep_update_inodeblock(ip, bp, waitfor)
12018	struct inode *ip;	/* the "in_core" copy of the inode */
12019	struct buf *bp;		/* the buffer containing the inode block */
12020	int waitfor;		/* nonzero => update must be allowed */
12021{
12022	struct inodedep *inodedep;
12023	struct inoref *inoref;
12024	struct ufsmount *ump;
12025	struct worklist *wk;
12026	struct mount *mp;
12027	struct buf *ibp;
12028	struct fs *fs;
12029	int error;
12030
12031	ump = ip->i_ump;
12032	mp = UFSTOVFS(ump);
12033	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
12034	    ("softdep_update_inodeblock called on non-softdep filesystem"));
12035	fs = ip->i_fs;
12036	/*
12037	 * Preserve the freelink that is on disk.  clear_unlinked_inodedep()
12038	 * does not have access to the in-core ip so must write directly into
12039	 * the inode block buffer when setting freelink.
12040	 */
12041	if (fs->fs_magic == FS_UFS1_MAGIC)
12042		DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data +
12043		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12044	else
12045		DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data +
12046		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12047	/*
12048	 * If the effective link count is not equal to the actual link
12049	 * count, then we must track the difference in an inodedep while
12050	 * the inode is (potentially) tossed out of the cache. Otherwise,
12051	 * if there is no existing inodedep, then there are no dependencies
12052	 * to track.
12053	 */
12054	ACQUIRE_LOCK(ump);
12055again:
12056	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12057		FREE_LOCK(ump);
12058		if (ip->i_effnlink != ip->i_nlink)
12059			panic("softdep_update_inodeblock: bad link count");
12060		return;
12061	}
12062	if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
12063		panic("softdep_update_inodeblock: bad delta");
12064	/*
12065	 * If we're flushing all dependencies we must also move any waiting
12066	 * for journal writes onto the bufwait list prior to I/O.
12067	 */
12068	if (waitfor) {
12069		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12070			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12071			    == DEPCOMPLETE) {
12072				jwait(&inoref->if_list, MNT_WAIT);
12073				goto again;
12074			}
12075		}
12076	}
12077	/*
12078	 * Changes have been initiated. Anything depending on these
12079	 * changes cannot occur until this inode has been written.
12080	 */
12081	inodedep->id_state &= ~COMPLETE;
12082	if ((inodedep->id_state & ONWORKLIST) == 0)
12083		WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
12084	/*
12085	 * Any new dependencies associated with the incore inode must
12086	 * now be moved to the list associated with the buffer holding
12087	 * the in-memory copy of the inode. Once merged process any
12088	 * allocdirects that are completed by the merger.
12089	 */
12090	merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
12091	if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
12092		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
12093		    NULL);
12094	merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
12095	if (!TAILQ_EMPTY(&inodedep->id_extupdt))
12096		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
12097		    NULL);
12098	/*
12099	 * Now that the inode has been pushed into the buffer, the
12100	 * operations dependent on the inode being written to disk
12101	 * can be moved to the id_bufwait so that they will be
12102	 * processed when the buffer I/O completes.
12103	 */
12104	while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
12105		WORKLIST_REMOVE(wk);
12106		WORKLIST_INSERT(&inodedep->id_bufwait, wk);
12107	}
12108	/*
12109	 * Newly allocated inodes cannot be written until the bitmap
12110	 * that allocates them have been written (indicated by
12111	 * DEPCOMPLETE being set in id_state). If we are doing a
12112	 * forced sync (e.g., an fsync on a file), we force the bitmap
12113	 * to be written so that the update can be done.
12114	 */
12115	if (waitfor == 0) {
12116		FREE_LOCK(ump);
12117		return;
12118	}
12119retry:
12120	if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
12121		FREE_LOCK(ump);
12122		return;
12123	}
12124	ibp = inodedep->id_bmsafemap->sm_buf;
12125	ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT);
12126	if (ibp == NULL) {
12127		/*
12128		 * If ibp came back as NULL, the dependency could have been
12129		 * freed while we slept.  Look it up again, and check to see
12130		 * that it has completed.
12131		 */
12132		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
12133			goto retry;
12134		FREE_LOCK(ump);
12135		return;
12136	}
12137	FREE_LOCK(ump);
12138	if ((error = bwrite(ibp)) != 0)
12139		softdep_error("softdep_update_inodeblock: bwrite", error);
12140}
12141
12142/*
12143 * Merge the a new inode dependency list (such as id_newinoupdt) into an
12144 * old inode dependency list (such as id_inoupdt). This routine must be
12145 * called with splbio interrupts blocked.
12146 */
12147static void
12148merge_inode_lists(newlisthead, oldlisthead)
12149	struct allocdirectlst *newlisthead;
12150	struct allocdirectlst *oldlisthead;
12151{
12152	struct allocdirect *listadp, *newadp;
12153
12154	newadp = TAILQ_FIRST(newlisthead);
12155	for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
12156		if (listadp->ad_offset < newadp->ad_offset) {
12157			listadp = TAILQ_NEXT(listadp, ad_next);
12158			continue;
12159		}
12160		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12161		TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
12162		if (listadp->ad_offset == newadp->ad_offset) {
12163			allocdirect_merge(oldlisthead, newadp,
12164			    listadp);
12165			listadp = newadp;
12166		}
12167		newadp = TAILQ_FIRST(newlisthead);
12168	}
12169	while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
12170		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12171		TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
12172	}
12173}
12174
12175/*
12176 * If we are doing an fsync, then we must ensure that any directory
12177 * entries for the inode have been written after the inode gets to disk.
12178 */
12179int
12180softdep_fsync(vp)
12181	struct vnode *vp;	/* the "in_core" copy of the inode */
12182{
12183	struct inodedep *inodedep;
12184	struct pagedep *pagedep;
12185	struct inoref *inoref;
12186	struct ufsmount *ump;
12187	struct worklist *wk;
12188	struct diradd *dap;
12189	struct mount *mp;
12190	struct vnode *pvp;
12191	struct inode *ip;
12192	struct buf *bp;
12193	struct fs *fs;
12194	struct thread *td = curthread;
12195	int error, flushparent, pagedep_new_block;
12196	ino_t parentino;
12197	ufs_lbn_t lbn;
12198
12199	ip = VTOI(vp);
12200	fs = ip->i_fs;
12201	ump = ip->i_ump;
12202	mp = vp->v_mount;
12203	if (MOUNTEDSOFTDEP(mp) == 0)
12204		return (0);
12205	ACQUIRE_LOCK(ump);
12206restart:
12207	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12208		FREE_LOCK(ump);
12209		return (0);
12210	}
12211	TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12212		if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12213		    == DEPCOMPLETE) {
12214			jwait(&inoref->if_list, MNT_WAIT);
12215			goto restart;
12216		}
12217	}
12218	if (!LIST_EMPTY(&inodedep->id_inowait) ||
12219	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
12220	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
12221	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
12222	    !TAILQ_EMPTY(&inodedep->id_newinoupdt))
12223		panic("softdep_fsync: pending ops %p", inodedep);
12224	for (error = 0, flushparent = 0; ; ) {
12225		if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
12226			break;
12227		if (wk->wk_type != D_DIRADD)
12228			panic("softdep_fsync: Unexpected type %s",
12229			    TYPENAME(wk->wk_type));
12230		dap = WK_DIRADD(wk);
12231		/*
12232		 * Flush our parent if this directory entry has a MKDIR_PARENT
12233		 * dependency or is contained in a newly allocated block.
12234		 */
12235		if (dap->da_state & DIRCHG)
12236			pagedep = dap->da_previous->dm_pagedep;
12237		else
12238			pagedep = dap->da_pagedep;
12239		parentino = pagedep->pd_ino;
12240		lbn = pagedep->pd_lbn;
12241		if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
12242			panic("softdep_fsync: dirty");
12243		if ((dap->da_state & MKDIR_PARENT) ||
12244		    (pagedep->pd_state & NEWBLOCK))
12245			flushparent = 1;
12246		else
12247			flushparent = 0;
12248		/*
12249		 * If we are being fsync'ed as part of vgone'ing this vnode,
12250		 * then we will not be able to release and recover the
12251		 * vnode below, so we just have to give up on writing its
12252		 * directory entry out. It will eventually be written, just
12253		 * not now, but then the user was not asking to have it
12254		 * written, so we are not breaking any promises.
12255		 */
12256		if (vp->v_iflag & VI_DOOMED)
12257			break;
12258		/*
12259		 * We prevent deadlock by always fetching inodes from the
12260		 * root, moving down the directory tree. Thus, when fetching
12261		 * our parent directory, we first try to get the lock. If
12262		 * that fails, we must unlock ourselves before requesting
12263		 * the lock on our parent. See the comment in ufs_lookup
12264		 * for details on possible races.
12265		 */
12266		FREE_LOCK(ump);
12267		if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp,
12268		    FFSV_FORCEINSMQ)) {
12269			error = vfs_busy(mp, MBF_NOWAIT);
12270			if (error != 0) {
12271				vfs_ref(mp);
12272				VOP_UNLOCK(vp, 0);
12273				error = vfs_busy(mp, 0);
12274				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12275				vfs_rel(mp);
12276				if (error != 0)
12277					return (ENOENT);
12278				if (vp->v_iflag & VI_DOOMED) {
12279					vfs_unbusy(mp);
12280					return (ENOENT);
12281				}
12282			}
12283			VOP_UNLOCK(vp, 0);
12284			error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE,
12285			    &pvp, FFSV_FORCEINSMQ);
12286			vfs_unbusy(mp);
12287			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12288			if (vp->v_iflag & VI_DOOMED) {
12289				if (error == 0)
12290					vput(pvp);
12291				error = ENOENT;
12292			}
12293			if (error != 0)
12294				return (error);
12295		}
12296		/*
12297		 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12298		 * that are contained in direct blocks will be resolved by
12299		 * doing a ffs_update. Pagedeps contained in indirect blocks
12300		 * may require a complete sync'ing of the directory. So, we
12301		 * try the cheap and fast ffs_update first, and if that fails,
12302		 * then we do the slower ffs_syncvnode of the directory.
12303		 */
12304		if (flushparent) {
12305			int locked;
12306
12307			if ((error = ffs_update(pvp, 1)) != 0) {
12308				vput(pvp);
12309				return (error);
12310			}
12311			ACQUIRE_LOCK(ump);
12312			locked = 1;
12313			if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12314				if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12315					if (wk->wk_type != D_DIRADD)
12316						panic("softdep_fsync: Unexpected type %s",
12317						      TYPENAME(wk->wk_type));
12318					dap = WK_DIRADD(wk);
12319					if (dap->da_state & DIRCHG)
12320						pagedep = dap->da_previous->dm_pagedep;
12321					else
12322						pagedep = dap->da_pagedep;
12323					pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12324					FREE_LOCK(ump);
12325					locked = 0;
12326					if (pagedep_new_block && (error =
12327					    ffs_syncvnode(pvp, MNT_WAIT, 0))) {
12328						vput(pvp);
12329						return (error);
12330					}
12331				}
12332			}
12333			if (locked)
12334				FREE_LOCK(ump);
12335		}
12336		/*
12337		 * Flush directory page containing the inode's name.
12338		 */
12339		error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12340		    &bp);
12341		if (error == 0)
12342			error = bwrite(bp);
12343		else
12344			brelse(bp);
12345		vput(pvp);
12346		if (error != 0)
12347			return (error);
12348		ACQUIRE_LOCK(ump);
12349		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12350			break;
12351	}
12352	FREE_LOCK(ump);
12353	return (0);
12354}
12355
12356/*
12357 * Flush all the dirty bitmaps associated with the block device
12358 * before flushing the rest of the dirty blocks so as to reduce
12359 * the number of dependencies that will have to be rolled back.
12360 *
12361 * XXX Unused?
12362 */
12363void
12364softdep_fsync_mountdev(vp)
12365	struct vnode *vp;
12366{
12367	struct buf *bp, *nbp;
12368	struct worklist *wk;
12369	struct bufobj *bo;
12370
12371	if (!vn_isdisk(vp, NULL))
12372		panic("softdep_fsync_mountdev: vnode not a disk");
12373	bo = &vp->v_bufobj;
12374restart:
12375	BO_LOCK(bo);
12376	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12377		/*
12378		 * If it is already scheduled, skip to the next buffer.
12379		 */
12380		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12381			continue;
12382
12383		if ((bp->b_flags & B_DELWRI) == 0)
12384			panic("softdep_fsync_mountdev: not dirty");
12385		/*
12386		 * We are only interested in bitmaps with outstanding
12387		 * dependencies.
12388		 */
12389		if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12390		    wk->wk_type != D_BMSAFEMAP ||
12391		    (bp->b_vflags & BV_BKGRDINPROG)) {
12392			BUF_UNLOCK(bp);
12393			continue;
12394		}
12395		BO_UNLOCK(bo);
12396		bremfree(bp);
12397		(void) bawrite(bp);
12398		goto restart;
12399	}
12400	drain_output(vp);
12401	BO_UNLOCK(bo);
12402}
12403
12404/*
12405 * Sync all cylinder groups that were dirty at the time this function is
12406 * called.  Newly dirtied cgs will be inserted before the sentinel.  This
12407 * is used to flush freedep activity that may be holding up writes to a
12408 * indirect block.
12409 */
12410static int
12411sync_cgs(mp, waitfor)
12412	struct mount *mp;
12413	int waitfor;
12414{
12415	struct bmsafemap *bmsafemap;
12416	struct bmsafemap *sentinel;
12417	struct ufsmount *ump;
12418	struct buf *bp;
12419	int error;
12420
12421	sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12422	sentinel->sm_cg = -1;
12423	ump = VFSTOUFS(mp);
12424	error = 0;
12425	ACQUIRE_LOCK(ump);
12426	LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12427	for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12428	    bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12429		/* Skip sentinels and cgs with no work to release. */
12430		if (bmsafemap->sm_cg == -1 ||
12431		    (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12432		    LIST_EMPTY(&bmsafemap->sm_freewr))) {
12433			LIST_REMOVE(sentinel, sm_next);
12434			LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12435			continue;
12436		}
12437		/*
12438		 * If we don't get the lock and we're waiting try again, if
12439		 * not move on to the next buf and try to sync it.
12440		 */
12441		bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor);
12442		if (bp == NULL && waitfor == MNT_WAIT)
12443			continue;
12444		LIST_REMOVE(sentinel, sm_next);
12445		LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12446		if (bp == NULL)
12447			continue;
12448		FREE_LOCK(ump);
12449		if (waitfor == MNT_NOWAIT)
12450			bawrite(bp);
12451		else
12452			error = bwrite(bp);
12453		ACQUIRE_LOCK(ump);
12454		if (error)
12455			break;
12456	}
12457	LIST_REMOVE(sentinel, sm_next);
12458	FREE_LOCK(ump);
12459	free(sentinel, M_BMSAFEMAP);
12460	return (error);
12461}
12462
12463/*
12464 * This routine is called when we are trying to synchronously flush a
12465 * file. This routine must eliminate any filesystem metadata dependencies
12466 * so that the syncing routine can succeed.
12467 */
12468int
12469softdep_sync_metadata(struct vnode *vp)
12470{
12471	struct inode *ip;
12472	int error;
12473
12474	ip = VTOI(vp);
12475	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
12476	    ("softdep_sync_metadata called on non-softdep filesystem"));
12477	/*
12478	 * Ensure that any direct block dependencies have been cleared,
12479	 * truncations are started, and inode references are journaled.
12480	 */
12481	ACQUIRE_LOCK(ip->i_ump);
12482	/*
12483	 * Write all journal records to prevent rollbacks on devvp.
12484	 */
12485	if (vp->v_type == VCHR)
12486		softdep_flushjournal(vp->v_mount);
12487	error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number);
12488	/*
12489	 * Ensure that all truncates are written so we won't find deps on
12490	 * indirect blocks.
12491	 */
12492	process_truncates(vp);
12493	FREE_LOCK(ip->i_ump);
12494
12495	return (error);
12496}
12497
12498/*
12499 * This routine is called when we are attempting to sync a buf with
12500 * dependencies.  If waitfor is MNT_NOWAIT it attempts to schedule any
12501 * other IO it can but returns EBUSY if the buffer is not yet able to
12502 * be written.  Dependencies which will not cause rollbacks will always
12503 * return 0.
12504 */
12505int
12506softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
12507{
12508	struct indirdep *indirdep;
12509	struct pagedep *pagedep;
12510	struct allocindir *aip;
12511	struct newblk *newblk;
12512	struct ufsmount *ump;
12513	struct buf *nbp;
12514	struct worklist *wk;
12515	int i, error;
12516
12517	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12518	    ("softdep_sync_buf called on non-softdep filesystem"));
12519	/*
12520	 * For VCHR we just don't want to force flush any dependencies that
12521	 * will cause rollbacks.
12522	 */
12523	if (vp->v_type == VCHR) {
12524		if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12525			return (EBUSY);
12526		return (0);
12527	}
12528	ump = VTOI(vp)->i_ump;
12529	ACQUIRE_LOCK(ump);
12530	/*
12531	 * As we hold the buffer locked, none of its dependencies
12532	 * will disappear.
12533	 */
12534	error = 0;
12535top:
12536	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12537		switch (wk->wk_type) {
12538
12539		case D_ALLOCDIRECT:
12540		case D_ALLOCINDIR:
12541			newblk = WK_NEWBLK(wk);
12542			if (newblk->nb_jnewblk != NULL) {
12543				if (waitfor == MNT_NOWAIT) {
12544					error = EBUSY;
12545					goto out_unlock;
12546				}
12547				jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12548				goto top;
12549			}
12550			if (newblk->nb_state & DEPCOMPLETE ||
12551			    waitfor == MNT_NOWAIT)
12552				continue;
12553			nbp = newblk->nb_bmsafemap->sm_buf;
12554			nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12555			if (nbp == NULL)
12556				goto top;
12557			FREE_LOCK(ump);
12558			if ((error = bwrite(nbp)) != 0)
12559				goto out;
12560			ACQUIRE_LOCK(ump);
12561			continue;
12562
12563		case D_INDIRDEP:
12564			indirdep = WK_INDIRDEP(wk);
12565			if (waitfor == MNT_NOWAIT) {
12566				if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
12567				    !LIST_EMPTY(&indirdep->ir_deplisthd)) {
12568					error = EBUSY;
12569					goto out_unlock;
12570				}
12571			}
12572			if (!TAILQ_EMPTY(&indirdep->ir_trunc))
12573				panic("softdep_sync_buf: truncation pending.");
12574		restart:
12575			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
12576				newblk = (struct newblk *)aip;
12577				if (newblk->nb_jnewblk != NULL) {
12578					jwait(&newblk->nb_jnewblk->jn_list,
12579					    waitfor);
12580					goto restart;
12581				}
12582				if (newblk->nb_state & DEPCOMPLETE)
12583					continue;
12584				nbp = newblk->nb_bmsafemap->sm_buf;
12585				nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12586				if (nbp == NULL)
12587					goto restart;
12588				FREE_LOCK(ump);
12589				if ((error = bwrite(nbp)) != 0)
12590					goto out;
12591				ACQUIRE_LOCK(ump);
12592				goto restart;
12593			}
12594			continue;
12595
12596		case D_PAGEDEP:
12597			/*
12598			 * Only flush directory entries in synchronous passes.
12599			 */
12600			if (waitfor != MNT_WAIT) {
12601				error = EBUSY;
12602				goto out_unlock;
12603			}
12604			/*
12605			 * While syncing snapshots, we must allow recursive
12606			 * lookups.
12607			 */
12608			BUF_AREC(bp);
12609			/*
12610			 * We are trying to sync a directory that may
12611			 * have dependencies on both its own metadata
12612			 * and/or dependencies on the inodes of any
12613			 * recently allocated files. We walk its diradd
12614			 * lists pushing out the associated inode.
12615			 */
12616			pagedep = WK_PAGEDEP(wk);
12617			for (i = 0; i < DAHASHSZ; i++) {
12618				if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
12619					continue;
12620				if ((error = flush_pagedep_deps(vp, wk->wk_mp,
12621				    &pagedep->pd_diraddhd[i]))) {
12622					BUF_NOREC(bp);
12623					goto out_unlock;
12624				}
12625			}
12626			BUF_NOREC(bp);
12627			continue;
12628
12629		case D_FREEWORK:
12630		case D_FREEDEP:
12631		case D_JSEGDEP:
12632		case D_JNEWBLK:
12633			continue;
12634
12635		default:
12636			panic("softdep_sync_buf: Unknown type %s",
12637			    TYPENAME(wk->wk_type));
12638			/* NOTREACHED */
12639		}
12640	}
12641out_unlock:
12642	FREE_LOCK(ump);
12643out:
12644	return (error);
12645}
12646
12647/*
12648 * Flush the dependencies associated with an inodedep.
12649 * Called with splbio blocked.
12650 */
12651static int
12652flush_inodedep_deps(vp, mp, ino)
12653	struct vnode *vp;
12654	struct mount *mp;
12655	ino_t ino;
12656{
12657	struct inodedep *inodedep;
12658	struct inoref *inoref;
12659	struct ufsmount *ump;
12660	int error, waitfor;
12661
12662	/*
12663	 * This work is done in two passes. The first pass grabs most
12664	 * of the buffers and begins asynchronously writing them. The
12665	 * only way to wait for these asynchronous writes is to sleep
12666	 * on the filesystem vnode which may stay busy for a long time
12667	 * if the filesystem is active. So, instead, we make a second
12668	 * pass over the dependencies blocking on each write. In the
12669	 * usual case we will be blocking against a write that we
12670	 * initiated, so when it is done the dependency will have been
12671	 * resolved. Thus the second pass is expected to end quickly.
12672	 * We give a brief window at the top of the loop to allow
12673	 * any pending I/O to complete.
12674	 */
12675	ump = VFSTOUFS(mp);
12676	LOCK_OWNED(ump);
12677	for (error = 0, waitfor = MNT_NOWAIT; ; ) {
12678		if (error)
12679			return (error);
12680		FREE_LOCK(ump);
12681		ACQUIRE_LOCK(ump);
12682restart:
12683		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
12684			return (0);
12685		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12686			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12687			    == DEPCOMPLETE) {
12688				jwait(&inoref->if_list, MNT_WAIT);
12689				goto restart;
12690			}
12691		}
12692		if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
12693		    flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
12694		    flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
12695		    flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
12696			continue;
12697		/*
12698		 * If pass2, we are done, otherwise do pass 2.
12699		 */
12700		if (waitfor == MNT_WAIT)
12701			break;
12702		waitfor = MNT_WAIT;
12703	}
12704	/*
12705	 * Try freeing inodedep in case all dependencies have been removed.
12706	 */
12707	if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
12708		(void) free_inodedep(inodedep);
12709	return (0);
12710}
12711
12712/*
12713 * Flush an inode dependency list.
12714 * Called with splbio blocked.
12715 */
12716static int
12717flush_deplist(listhead, waitfor, errorp)
12718	struct allocdirectlst *listhead;
12719	int waitfor;
12720	int *errorp;
12721{
12722	struct allocdirect *adp;
12723	struct newblk *newblk;
12724	struct ufsmount *ump;
12725	struct buf *bp;
12726
12727	if ((adp = TAILQ_FIRST(listhead)) == NULL)
12728		return (0);
12729	ump = VFSTOUFS(adp->ad_list.wk_mp);
12730	LOCK_OWNED(ump);
12731	TAILQ_FOREACH(adp, listhead, ad_next) {
12732		newblk = (struct newblk *)adp;
12733		if (newblk->nb_jnewblk != NULL) {
12734			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12735			return (1);
12736		}
12737		if (newblk->nb_state & DEPCOMPLETE)
12738			continue;
12739		bp = newblk->nb_bmsafemap->sm_buf;
12740		bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor);
12741		if (bp == NULL) {
12742			if (waitfor == MNT_NOWAIT)
12743				continue;
12744			return (1);
12745		}
12746		FREE_LOCK(ump);
12747		if (waitfor == MNT_NOWAIT)
12748			bawrite(bp);
12749		else
12750			*errorp = bwrite(bp);
12751		ACQUIRE_LOCK(ump);
12752		return (1);
12753	}
12754	return (0);
12755}
12756
12757/*
12758 * Flush dependencies associated with an allocdirect block.
12759 */
12760static int
12761flush_newblk_dep(vp, mp, lbn)
12762	struct vnode *vp;
12763	struct mount *mp;
12764	ufs_lbn_t lbn;
12765{
12766	struct newblk *newblk;
12767	struct ufsmount *ump;
12768	struct bufobj *bo;
12769	struct inode *ip;
12770	struct buf *bp;
12771	ufs2_daddr_t blkno;
12772	int error;
12773
12774	error = 0;
12775	bo = &vp->v_bufobj;
12776	ip = VTOI(vp);
12777	blkno = DIP(ip, i_db[lbn]);
12778	if (blkno == 0)
12779		panic("flush_newblk_dep: Missing block");
12780	ump = VFSTOUFS(mp);
12781	ACQUIRE_LOCK(ump);
12782	/*
12783	 * Loop until all dependencies related to this block are satisfied.
12784	 * We must be careful to restart after each sleep in case a write
12785	 * completes some part of this process for us.
12786	 */
12787	for (;;) {
12788		if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
12789			FREE_LOCK(ump);
12790			break;
12791		}
12792		if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
12793			panic("flush_newblk_deps: Bad newblk %p", newblk);
12794		/*
12795		 * Flush the journal.
12796		 */
12797		if (newblk->nb_jnewblk != NULL) {
12798			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12799			continue;
12800		}
12801		/*
12802		 * Write the bitmap dependency.
12803		 */
12804		if ((newblk->nb_state & DEPCOMPLETE) == 0) {
12805			bp = newblk->nb_bmsafemap->sm_buf;
12806			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
12807			if (bp == NULL)
12808				continue;
12809			FREE_LOCK(ump);
12810			error = bwrite(bp);
12811			if (error)
12812				break;
12813			ACQUIRE_LOCK(ump);
12814			continue;
12815		}
12816		/*
12817		 * Write the buffer.
12818		 */
12819		FREE_LOCK(ump);
12820		BO_LOCK(bo);
12821		bp = gbincore(bo, lbn);
12822		if (bp != NULL) {
12823			error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
12824			    LK_INTERLOCK, BO_LOCKPTR(bo));
12825			if (error == ENOLCK) {
12826				ACQUIRE_LOCK(ump);
12827				continue; /* Slept, retry */
12828			}
12829			if (error != 0)
12830				break;	/* Failed */
12831			if (bp->b_flags & B_DELWRI) {
12832				bremfree(bp);
12833				error = bwrite(bp);
12834				if (error)
12835					break;
12836			} else
12837				BUF_UNLOCK(bp);
12838		} else
12839			BO_UNLOCK(bo);
12840		/*
12841		 * We have to wait for the direct pointers to
12842		 * point at the newdirblk before the dependency
12843		 * will go away.
12844		 */
12845		error = ffs_update(vp, 1);
12846		if (error)
12847			break;
12848		ACQUIRE_LOCK(ump);
12849	}
12850	return (error);
12851}
12852
12853/*
12854 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
12855 * Called with splbio blocked.
12856 */
12857static int
12858flush_pagedep_deps(pvp, mp, diraddhdp)
12859	struct vnode *pvp;
12860	struct mount *mp;
12861	struct diraddhd *diraddhdp;
12862{
12863	struct inodedep *inodedep;
12864	struct inoref *inoref;
12865	struct ufsmount *ump;
12866	struct diradd *dap;
12867	struct vnode *vp;
12868	int error = 0;
12869	struct buf *bp;
12870	ino_t inum;
12871	struct diraddhd unfinished;
12872
12873	LIST_INIT(&unfinished);
12874	ump = VFSTOUFS(mp);
12875	LOCK_OWNED(ump);
12876restart:
12877	while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
12878		/*
12879		 * Flush ourselves if this directory entry
12880		 * has a MKDIR_PARENT dependency.
12881		 */
12882		if (dap->da_state & MKDIR_PARENT) {
12883			FREE_LOCK(ump);
12884			if ((error = ffs_update(pvp, 1)) != 0)
12885				break;
12886			ACQUIRE_LOCK(ump);
12887			/*
12888			 * If that cleared dependencies, go on to next.
12889			 */
12890			if (dap != LIST_FIRST(diraddhdp))
12891				continue;
12892			/*
12893			 * All MKDIR_PARENT dependencies and all the
12894			 * NEWBLOCK pagedeps that are contained in direct
12895			 * blocks were resolved by doing above ffs_update.
12896			 * Pagedeps contained in indirect blocks may
12897			 * require a complete sync'ing of the directory.
12898			 * We are in the midst of doing a complete sync,
12899			 * so if they are not resolved in this pass we
12900			 * defer them for now as they will be sync'ed by
12901			 * our caller shortly.
12902			 */
12903			LIST_REMOVE(dap, da_pdlist);
12904			LIST_INSERT_HEAD(&unfinished, dap, da_pdlist);
12905			continue;
12906		}
12907		/*
12908		 * A newly allocated directory must have its "." and
12909		 * ".." entries written out before its name can be
12910		 * committed in its parent.
12911		 */
12912		inum = dap->da_newinum;
12913		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12914			panic("flush_pagedep_deps: lost inode1");
12915		/*
12916		 * Wait for any pending journal adds to complete so we don't
12917		 * cause rollbacks while syncing.
12918		 */
12919		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12920			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12921			    == DEPCOMPLETE) {
12922				jwait(&inoref->if_list, MNT_WAIT);
12923				goto restart;
12924			}
12925		}
12926		if (dap->da_state & MKDIR_BODY) {
12927			FREE_LOCK(ump);
12928			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12929			    FFSV_FORCEINSMQ)))
12930				break;
12931			error = flush_newblk_dep(vp, mp, 0);
12932			/*
12933			 * If we still have the dependency we might need to
12934			 * update the vnode to sync the new link count to
12935			 * disk.
12936			 */
12937			if (error == 0 && dap == LIST_FIRST(diraddhdp))
12938				error = ffs_update(vp, 1);
12939			vput(vp);
12940			if (error != 0)
12941				break;
12942			ACQUIRE_LOCK(ump);
12943			/*
12944			 * If that cleared dependencies, go on to next.
12945			 */
12946			if (dap != LIST_FIRST(diraddhdp))
12947				continue;
12948			if (dap->da_state & MKDIR_BODY) {
12949				inodedep_lookup(UFSTOVFS(ump), inum, 0,
12950				    &inodedep);
12951				panic("flush_pagedep_deps: MKDIR_BODY "
12952				    "inodedep %p dap %p vp %p",
12953				    inodedep, dap, vp);
12954			}
12955		}
12956		/*
12957		 * Flush the inode on which the directory entry depends.
12958		 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
12959		 * the only remaining dependency is that the updated inode
12960		 * count must get pushed to disk. The inode has already
12961		 * been pushed into its inode buffer (via VOP_UPDATE) at
12962		 * the time of the reference count change. So we need only
12963		 * locate that buffer, ensure that there will be no rollback
12964		 * caused by a bitmap dependency, then write the inode buffer.
12965		 */
12966retry:
12967		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12968			panic("flush_pagedep_deps: lost inode");
12969		/*
12970		 * If the inode still has bitmap dependencies,
12971		 * push them to disk.
12972		 */
12973		if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
12974			bp = inodedep->id_bmsafemap->sm_buf;
12975			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
12976			if (bp == NULL)
12977				goto retry;
12978			FREE_LOCK(ump);
12979			if ((error = bwrite(bp)) != 0)
12980				break;
12981			ACQUIRE_LOCK(ump);
12982			if (dap != LIST_FIRST(diraddhdp))
12983				continue;
12984		}
12985		/*
12986		 * If the inode is still sitting in a buffer waiting
12987		 * to be written or waiting for the link count to be
12988		 * adjusted update it here to flush it to disk.
12989		 */
12990		if (dap == LIST_FIRST(diraddhdp)) {
12991			FREE_LOCK(ump);
12992			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12993			    FFSV_FORCEINSMQ)))
12994				break;
12995			error = ffs_update(vp, 1);
12996			vput(vp);
12997			if (error)
12998				break;
12999			ACQUIRE_LOCK(ump);
13000		}
13001		/*
13002		 * If we have failed to get rid of all the dependencies
13003		 * then something is seriously wrong.
13004		 */
13005		if (dap == LIST_FIRST(diraddhdp)) {
13006			inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
13007			panic("flush_pagedep_deps: failed to flush "
13008			    "inodedep %p ino %ju dap %p",
13009			    inodedep, (uintmax_t)inum, dap);
13010		}
13011	}
13012	if (error)
13013		ACQUIRE_LOCK(ump);
13014	while ((dap = LIST_FIRST(&unfinished)) != NULL) {
13015		LIST_REMOVE(dap, da_pdlist);
13016		LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist);
13017	}
13018	return (error);
13019}
13020
13021/*
13022 * A large burst of file addition or deletion activity can drive the
13023 * memory load excessively high. First attempt to slow things down
13024 * using the techniques below. If that fails, this routine requests
13025 * the offending operations to fall back to running synchronously
13026 * until the memory load returns to a reasonable level.
13027 */
13028int
13029softdep_slowdown(vp)
13030	struct vnode *vp;
13031{
13032	struct ufsmount *ump;
13033	int jlow;
13034	int max_softdeps_hard;
13035
13036	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
13037	    ("softdep_slowdown called on non-softdep filesystem"));
13038	ump = VFSTOUFS(vp->v_mount);
13039	ACQUIRE_LOCK(ump);
13040	jlow = 0;
13041	/*
13042	 * Check for journal space if needed.
13043	 */
13044	if (DOINGSUJ(vp)) {
13045		if (journal_space(ump, 0) == 0)
13046			jlow = 1;
13047	}
13048	/*
13049	 * If the system is under its limits and our filesystem is
13050	 * not responsible for more than our share of the usage and
13051	 * we are not low on journal space, then no need to slow down.
13052	 */
13053	max_softdeps_hard = max_softdeps * 11 / 10;
13054	if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
13055	    dep_current[D_INODEDEP] < max_softdeps_hard &&
13056	    dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 &&
13057	    dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 &&
13058	    ump->softdep_curdeps[D_DIRREM] <
13059	    (max_softdeps_hard / 2) / stat_flush_threads &&
13060	    ump->softdep_curdeps[D_INODEDEP] <
13061	    max_softdeps_hard / stat_flush_threads &&
13062	    ump->softdep_curdeps[D_INDIRDEP] <
13063	    (max_softdeps_hard / 1000) / stat_flush_threads &&
13064	    ump->softdep_curdeps[D_FREEBLKS] <
13065	    max_softdeps_hard / stat_flush_threads) {
13066		FREE_LOCK(ump);
13067  		return (0);
13068	}
13069	/*
13070	 * If the journal is low or our filesystem is over its limit
13071	 * then speedup the cleanup.
13072	 */
13073	if (ump->softdep_curdeps[D_INDIRDEP] <
13074	    (max_softdeps_hard / 1000) / stat_flush_threads || jlow)
13075		softdep_speedup(ump);
13076	stat_sync_limit_hit += 1;
13077	FREE_LOCK(ump);
13078	/*
13079	 * We only slow down the rate at which new dependencies are
13080	 * generated if we are not using journaling. With journaling,
13081	 * the cleanup should always be sufficient to keep things
13082	 * under control.
13083	 */
13084	if (DOINGSUJ(vp))
13085		return (0);
13086	return (1);
13087}
13088
13089/*
13090 * Called by the allocation routines when they are about to fail
13091 * in the hope that we can free up the requested resource (inodes
13092 * or disk space).
13093 *
13094 * First check to see if the work list has anything on it. If it has,
13095 * clean up entries until we successfully free the requested resource.
13096 * Because this process holds inodes locked, we cannot handle any remove
13097 * requests that might block on a locked inode as that could lead to
13098 * deadlock. If the worklist yields none of the requested resource,
13099 * start syncing out vnodes to free up the needed space.
13100 */
13101int
13102softdep_request_cleanup(fs, vp, cred, resource)
13103	struct fs *fs;
13104	struct vnode *vp;
13105	struct ucred *cred;
13106	int resource;
13107{
13108	struct ufsmount *ump;
13109	struct mount *mp;
13110	struct vnode *lvp, *mvp;
13111	long starttime;
13112	ufs2_daddr_t needed;
13113	int error;
13114
13115	/*
13116	 * If we are being called because of a process doing a
13117	 * copy-on-write, then it is not safe to process any
13118	 * worklist items as we will recurse into the copyonwrite
13119	 * routine.  This will result in an incoherent snapshot.
13120	 * If the vnode that we hold is a snapshot, we must avoid
13121	 * handling other resources that could cause deadlock.
13122	 */
13123	if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
13124		return (0);
13125
13126	if (resource == FLUSH_BLOCKS_WAIT)
13127		stat_cleanup_blkrequests += 1;
13128	else
13129		stat_cleanup_inorequests += 1;
13130
13131	mp = vp->v_mount;
13132	ump = VFSTOUFS(mp);
13133	mtx_assert(UFS_MTX(ump), MA_OWNED);
13134	UFS_UNLOCK(ump);
13135	error = ffs_update(vp, 1);
13136	if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) {
13137		UFS_LOCK(ump);
13138		return (0);
13139	}
13140	/*
13141	 * If we are in need of resources, start by cleaning up
13142	 * any block removals associated with our inode.
13143	 */
13144	ACQUIRE_LOCK(ump);
13145	process_removes(vp);
13146	process_truncates(vp);
13147	FREE_LOCK(ump);
13148	/*
13149	 * Now clean up at least as many resources as we will need.
13150	 *
13151	 * When requested to clean up inodes, the number that are needed
13152	 * is set by the number of simultaneous writers (mnt_writeopcount)
13153	 * plus a bit of slop (2) in case some more writers show up while
13154	 * we are cleaning.
13155	 *
13156	 * When requested to free up space, the amount of space that
13157	 * we need is enough blocks to allocate a full-sized segment
13158	 * (fs_contigsumsize). The number of such segments that will
13159	 * be needed is set by the number of simultaneous writers
13160	 * (mnt_writeopcount) plus a bit of slop (2) in case some more
13161	 * writers show up while we are cleaning.
13162	 *
13163	 * Additionally, if we are unpriviledged and allocating space,
13164	 * we need to ensure that we clean up enough blocks to get the
13165	 * needed number of blocks over the threshhold of the minimum
13166	 * number of blocks required to be kept free by the filesystem
13167	 * (fs_minfree).
13168	 */
13169	if (resource == FLUSH_INODES_WAIT) {
13170		needed = vp->v_mount->mnt_writeopcount + 2;
13171	} else if (resource == FLUSH_BLOCKS_WAIT) {
13172		needed = (vp->v_mount->mnt_writeopcount + 2) *
13173		    fs->fs_contigsumsize;
13174		if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0))
13175			needed += fragstoblks(fs,
13176			    roundup((fs->fs_dsize * fs->fs_minfree / 100) -
13177			    fs->fs_cstotal.cs_nffree, fs->fs_frag));
13178	} else {
13179		UFS_LOCK(ump);
13180		printf("softdep_request_cleanup: Unknown resource type %d\n",
13181		    resource);
13182		return (0);
13183	}
13184	starttime = time_second;
13185retry:
13186	if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
13187	    fs->fs_cstotal.cs_nbfree <= needed) ||
13188	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13189	    fs->fs_cstotal.cs_nifree <= needed)) {
13190		ACQUIRE_LOCK(ump);
13191		if (ump->softdep_on_worklist > 0 &&
13192		    process_worklist_item(UFSTOVFS(ump),
13193		    ump->softdep_on_worklist, LK_NOWAIT) != 0)
13194			stat_worklist_push += 1;
13195		FREE_LOCK(ump);
13196	}
13197	/*
13198	 * If we still need resources and there are no more worklist
13199	 * entries to process to obtain them, we have to start flushing
13200	 * the dirty vnodes to force the release of additional requests
13201	 * to the worklist that we can then process to reap addition
13202	 * resources. We walk the vnodes associated with the mount point
13203	 * until we get the needed worklist requests that we can reap.
13204	 */
13205	if ((resource == FLUSH_BLOCKS_WAIT &&
13206	     fs->fs_cstotal.cs_nbfree <= needed) ||
13207	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13208	     fs->fs_cstotal.cs_nifree <= needed)) {
13209		MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
13210			if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
13211				VI_UNLOCK(lvp);
13212				continue;
13213			}
13214			if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
13215			    curthread))
13216				continue;
13217			if (lvp->v_vflag & VV_NOSYNC) {	/* unlinked */
13218				vput(lvp);
13219				continue;
13220			}
13221			(void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
13222			vput(lvp);
13223		}
13224		lvp = ump->um_devvp;
13225		if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
13226			VOP_FSYNC(lvp, MNT_NOWAIT, curthread);
13227			VOP_UNLOCK(lvp, 0);
13228		}
13229		if (ump->softdep_on_worklist > 0) {
13230			stat_cleanup_retries += 1;
13231			goto retry;
13232		}
13233		stat_cleanup_failures += 1;
13234	}
13235	if (time_second - starttime > stat_cleanup_high_delay)
13236		stat_cleanup_high_delay = time_second - starttime;
13237	UFS_LOCK(ump);
13238	return (1);
13239}
13240
13241/*
13242 * If memory utilization has gotten too high, deliberately slow things
13243 * down and speed up the I/O processing.
13244 */
13245static int
13246request_cleanup(mp, resource)
13247	struct mount *mp;
13248	int resource;
13249{
13250	struct thread *td = curthread;
13251	struct ufsmount *ump;
13252
13253	ump = VFSTOUFS(mp);
13254	LOCK_OWNED(ump);
13255	/*
13256	 * We never hold up the filesystem syncer or buf daemon.
13257	 */
13258	if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
13259		return (0);
13260	/*
13261	 * First check to see if the work list has gotten backlogged.
13262	 * If it has, co-opt this process to help clean up two entries.
13263	 * Because this process may hold inodes locked, we cannot
13264	 * handle any remove requests that might block on a locked
13265	 * inode as that could lead to deadlock.  We set TDP_SOFTDEP
13266	 * to avoid recursively processing the worklist.
13267	 */
13268	if (ump->softdep_on_worklist > max_softdeps / 10) {
13269		td->td_pflags |= TDP_SOFTDEP;
13270		process_worklist_item(mp, 2, LK_NOWAIT);
13271		td->td_pflags &= ~TDP_SOFTDEP;
13272		stat_worklist_push += 2;
13273		return(1);
13274	}
13275	/*
13276	 * Next, we attempt to speed up the syncer process. If that
13277	 * is successful, then we allow the process to continue.
13278	 */
13279	if (softdep_speedup(ump) &&
13280	    resource != FLUSH_BLOCKS_WAIT &&
13281	    resource != FLUSH_INODES_WAIT)
13282		return(0);
13283	/*
13284	 * If we are resource constrained on inode dependencies, try
13285	 * flushing some dirty inodes. Otherwise, we are constrained
13286	 * by file deletions, so try accelerating flushes of directories
13287	 * with removal dependencies. We would like to do the cleanup
13288	 * here, but we probably hold an inode locked at this point and
13289	 * that might deadlock against one that we try to clean. So,
13290	 * the best that we can do is request the syncer daemon to do
13291	 * the cleanup for us.
13292	 */
13293	switch (resource) {
13294
13295	case FLUSH_INODES:
13296	case FLUSH_INODES_WAIT:
13297		ACQUIRE_GBLLOCK(&lk);
13298		stat_ino_limit_push += 1;
13299		req_clear_inodedeps += 1;
13300		FREE_GBLLOCK(&lk);
13301		stat_countp = &stat_ino_limit_hit;
13302		break;
13303
13304	case FLUSH_BLOCKS:
13305	case FLUSH_BLOCKS_WAIT:
13306		ACQUIRE_GBLLOCK(&lk);
13307		stat_blk_limit_push += 1;
13308		req_clear_remove += 1;
13309		FREE_GBLLOCK(&lk);
13310		stat_countp = &stat_blk_limit_hit;
13311		break;
13312
13313	default:
13314		panic("request_cleanup: unknown type");
13315	}
13316	/*
13317	 * Hopefully the syncer daemon will catch up and awaken us.
13318	 * We wait at most tickdelay before proceeding in any case.
13319	 */
13320	ACQUIRE_GBLLOCK(&lk);
13321	FREE_LOCK(ump);
13322	proc_waiting += 1;
13323	if (callout_pending(&softdep_callout) == FALSE)
13324		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13325		    pause_timer, 0);
13326
13327	msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
13328	proc_waiting -= 1;
13329	FREE_GBLLOCK(&lk);
13330	ACQUIRE_LOCK(ump);
13331	return (1);
13332}
13333
13334/*
13335 * Awaken processes pausing in request_cleanup and clear proc_waiting
13336 * to indicate that there is no longer a timer running. Pause_timer
13337 * will be called with the global softdep mutex (&lk) locked.
13338 */
13339static void
13340pause_timer(arg)
13341	void *arg;
13342{
13343
13344	GBLLOCK_OWNED(&lk);
13345	/*
13346	 * The callout_ API has acquired mtx and will hold it around this
13347	 * function call.
13348	 */
13349	*stat_countp += proc_waiting;
13350	wakeup(&proc_waiting);
13351}
13352
13353/*
13354 * If requested, try removing inode or removal dependencies.
13355 */
13356static void
13357check_clear_deps(mp)
13358	struct mount *mp;
13359{
13360
13361	/*
13362	 * If we are suspended, it may be because of our using
13363	 * too many inodedeps, so help clear them out.
13364	 */
13365	if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended)
13366		clear_inodedeps(mp);
13367	/*
13368	 * General requests for cleanup of backed up dependencies
13369	 */
13370	ACQUIRE_GBLLOCK(&lk);
13371	if (req_clear_inodedeps) {
13372		req_clear_inodedeps -= 1;
13373		FREE_GBLLOCK(&lk);
13374		clear_inodedeps(mp);
13375		ACQUIRE_GBLLOCK(&lk);
13376		wakeup(&proc_waiting);
13377	}
13378	if (req_clear_remove) {
13379		req_clear_remove -= 1;
13380		FREE_GBLLOCK(&lk);
13381		clear_remove(mp);
13382		ACQUIRE_GBLLOCK(&lk);
13383		wakeup(&proc_waiting);
13384	}
13385	FREE_GBLLOCK(&lk);
13386}
13387
13388/*
13389 * Flush out a directory with at least one removal dependency in an effort to
13390 * reduce the number of dirrem, freefile, and freeblks dependency structures.
13391 */
13392static void
13393clear_remove(mp)
13394	struct mount *mp;
13395{
13396	struct pagedep_hashhead *pagedephd;
13397	struct pagedep *pagedep;
13398	struct ufsmount *ump;
13399	struct vnode *vp;
13400	struct bufobj *bo;
13401	int error, cnt;
13402	ino_t ino;
13403
13404	ump = VFSTOUFS(mp);
13405	LOCK_OWNED(ump);
13406
13407	for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) {
13408		pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++];
13409		if (ump->pagedep_nextclean > ump->pagedep_hash_size)
13410			ump->pagedep_nextclean = 0;
13411		LIST_FOREACH(pagedep, pagedephd, pd_hash) {
13412			if (LIST_EMPTY(&pagedep->pd_dirremhd))
13413				continue;
13414			ino = pagedep->pd_ino;
13415			if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13416				continue;
13417			FREE_LOCK(ump);
13418
13419			/*
13420			 * Let unmount clear deps
13421			 */
13422			error = vfs_busy(mp, MBF_NOWAIT);
13423			if (error != 0)
13424				goto finish_write;
13425			error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13426			     FFSV_FORCEINSMQ);
13427			vfs_unbusy(mp);
13428			if (error != 0) {
13429				softdep_error("clear_remove: vget", error);
13430				goto finish_write;
13431			}
13432			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13433				softdep_error("clear_remove: fsync", error);
13434			bo = &vp->v_bufobj;
13435			BO_LOCK(bo);
13436			drain_output(vp);
13437			BO_UNLOCK(bo);
13438			vput(vp);
13439		finish_write:
13440			vn_finished_write(mp);
13441			ACQUIRE_LOCK(ump);
13442			return;
13443		}
13444	}
13445}
13446
13447/*
13448 * Clear out a block of dirty inodes in an effort to reduce
13449 * the number of inodedep dependency structures.
13450 */
13451static void
13452clear_inodedeps(mp)
13453	struct mount *mp;
13454{
13455	struct inodedep_hashhead *inodedephd;
13456	struct inodedep *inodedep;
13457	struct ufsmount *ump;
13458	struct vnode *vp;
13459	struct fs *fs;
13460	int error, cnt;
13461	ino_t firstino, lastino, ino;
13462
13463	ump = VFSTOUFS(mp);
13464	fs = ump->um_fs;
13465	LOCK_OWNED(ump);
13466	/*
13467	 * Pick a random inode dependency to be cleared.
13468	 * We will then gather up all the inodes in its block
13469	 * that have dependencies and flush them out.
13470	 */
13471	for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) {
13472		inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++];
13473		if (ump->inodedep_nextclean > ump->inodedep_hash_size)
13474			ump->inodedep_nextclean = 0;
13475		if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
13476			break;
13477	}
13478	if (inodedep == NULL)
13479		return;
13480	/*
13481	 * Find the last inode in the block with dependencies.
13482	 */
13483	firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
13484	for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
13485		if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
13486			break;
13487	/*
13488	 * Asynchronously push all but the last inode with dependencies.
13489	 * Synchronously push the last inode with dependencies to ensure
13490	 * that the inode block gets written to free up the inodedeps.
13491	 */
13492	for (ino = firstino; ino <= lastino; ino++) {
13493		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13494			continue;
13495		if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13496			continue;
13497		FREE_LOCK(ump);
13498		error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
13499		if (error != 0) {
13500			vn_finished_write(mp);
13501			ACQUIRE_LOCK(ump);
13502			return;
13503		}
13504		if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13505		    FFSV_FORCEINSMQ)) != 0) {
13506			softdep_error("clear_inodedeps: vget", error);
13507			vfs_unbusy(mp);
13508			vn_finished_write(mp);
13509			ACQUIRE_LOCK(ump);
13510			return;
13511		}
13512		vfs_unbusy(mp);
13513		if (ino == lastino) {
13514			if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)))
13515				softdep_error("clear_inodedeps: fsync1", error);
13516		} else {
13517			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13518				softdep_error("clear_inodedeps: fsync2", error);
13519			BO_LOCK(&vp->v_bufobj);
13520			drain_output(vp);
13521			BO_UNLOCK(&vp->v_bufobj);
13522		}
13523		vput(vp);
13524		vn_finished_write(mp);
13525		ACQUIRE_LOCK(ump);
13526	}
13527}
13528
13529void
13530softdep_buf_append(bp, wkhd)
13531	struct buf *bp;
13532	struct workhead *wkhd;
13533{
13534	struct worklist *wk;
13535	struct ufsmount *ump;
13536
13537	if ((wk = LIST_FIRST(wkhd)) == NULL)
13538		return;
13539	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13540	    ("softdep_buf_append called on non-softdep filesystem"));
13541	ump = VFSTOUFS(wk->wk_mp);
13542	ACQUIRE_LOCK(ump);
13543	while ((wk = LIST_FIRST(wkhd)) != NULL) {
13544		WORKLIST_REMOVE(wk);
13545		WORKLIST_INSERT(&bp->b_dep, wk);
13546	}
13547	FREE_LOCK(ump);
13548
13549}
13550
13551void
13552softdep_inode_append(ip, cred, wkhd)
13553	struct inode *ip;
13554	struct ucred *cred;
13555	struct workhead *wkhd;
13556{
13557	struct buf *bp;
13558	struct fs *fs;
13559	int error;
13560
13561	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
13562	    ("softdep_inode_append called on non-softdep filesystem"));
13563	fs = ip->i_fs;
13564	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
13565	    (int)fs->fs_bsize, cred, &bp);
13566	if (error) {
13567		bqrelse(bp);
13568		softdep_freework(wkhd);
13569		return;
13570	}
13571	softdep_buf_append(bp, wkhd);
13572	bqrelse(bp);
13573}
13574
13575void
13576softdep_freework(wkhd)
13577	struct workhead *wkhd;
13578{
13579	struct worklist *wk;
13580	struct ufsmount *ump;
13581
13582	if ((wk = LIST_FIRST(wkhd)) == NULL)
13583		return;
13584	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13585	    ("softdep_freework called on non-softdep filesystem"));
13586	ump = VFSTOUFS(wk->wk_mp);
13587	ACQUIRE_LOCK(ump);
13588	handle_jwork(wkhd);
13589	FREE_LOCK(ump);
13590}
13591
13592/*
13593 * Function to determine if the buffer has outstanding dependencies
13594 * that will cause a roll-back if the buffer is written. If wantcount
13595 * is set, return number of dependencies, otherwise just yes or no.
13596 */
13597static int
13598softdep_count_dependencies(bp, wantcount)
13599	struct buf *bp;
13600	int wantcount;
13601{
13602	struct worklist *wk;
13603	struct ufsmount *ump;
13604	struct bmsafemap *bmsafemap;
13605	struct freework *freework;
13606	struct inodedep *inodedep;
13607	struct indirdep *indirdep;
13608	struct freeblks *freeblks;
13609	struct allocindir *aip;
13610	struct pagedep *pagedep;
13611	struct dirrem *dirrem;
13612	struct newblk *newblk;
13613	struct mkdir *mkdir;
13614	struct diradd *dap;
13615	int i, retval;
13616
13617	retval = 0;
13618	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
13619		return (0);
13620	ump = VFSTOUFS(wk->wk_mp);
13621	ACQUIRE_LOCK(ump);
13622	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
13623		switch (wk->wk_type) {
13624
13625		case D_INODEDEP:
13626			inodedep = WK_INODEDEP(wk);
13627			if ((inodedep->id_state & DEPCOMPLETE) == 0) {
13628				/* bitmap allocation dependency */
13629				retval += 1;
13630				if (!wantcount)
13631					goto out;
13632			}
13633			if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
13634				/* direct block pointer dependency */
13635				retval += 1;
13636				if (!wantcount)
13637					goto out;
13638			}
13639			if (TAILQ_FIRST(&inodedep->id_extupdt)) {
13640				/* direct block pointer dependency */
13641				retval += 1;
13642				if (!wantcount)
13643					goto out;
13644			}
13645			if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
13646				/* Add reference dependency. */
13647				retval += 1;
13648				if (!wantcount)
13649					goto out;
13650			}
13651			continue;
13652
13653		case D_INDIRDEP:
13654			indirdep = WK_INDIRDEP(wk);
13655
13656			TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
13657				/* indirect truncation dependency */
13658				retval += 1;
13659				if (!wantcount)
13660					goto out;
13661			}
13662
13663			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13664				/* indirect block pointer dependency */
13665				retval += 1;
13666				if (!wantcount)
13667					goto out;
13668			}
13669			continue;
13670
13671		case D_PAGEDEP:
13672			pagedep = WK_PAGEDEP(wk);
13673			LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
13674				if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
13675					/* Journal remove ref dependency. */
13676					retval += 1;
13677					if (!wantcount)
13678						goto out;
13679				}
13680			}
13681			for (i = 0; i < DAHASHSZ; i++) {
13682
13683				LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
13684					/* directory entry dependency */
13685					retval += 1;
13686					if (!wantcount)
13687						goto out;
13688				}
13689			}
13690			continue;
13691
13692		case D_BMSAFEMAP:
13693			bmsafemap = WK_BMSAFEMAP(wk);
13694			if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
13695				/* Add reference dependency. */
13696				retval += 1;
13697				if (!wantcount)
13698					goto out;
13699			}
13700			if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
13701				/* Allocate block dependency. */
13702				retval += 1;
13703				if (!wantcount)
13704					goto out;
13705			}
13706			continue;
13707
13708		case D_FREEBLKS:
13709			freeblks = WK_FREEBLKS(wk);
13710			if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
13711				/* Freeblk journal dependency. */
13712				retval += 1;
13713				if (!wantcount)
13714					goto out;
13715			}
13716			continue;
13717
13718		case D_ALLOCDIRECT:
13719		case D_ALLOCINDIR:
13720			newblk = WK_NEWBLK(wk);
13721			if (newblk->nb_jnewblk) {
13722				/* Journal allocate dependency. */
13723				retval += 1;
13724				if (!wantcount)
13725					goto out;
13726			}
13727			continue;
13728
13729		case D_MKDIR:
13730			mkdir = WK_MKDIR(wk);
13731			if (mkdir->md_jaddref) {
13732				/* Journal reference dependency. */
13733				retval += 1;
13734				if (!wantcount)
13735					goto out;
13736			}
13737			continue;
13738
13739		case D_FREEWORK:
13740		case D_FREEDEP:
13741		case D_JSEGDEP:
13742		case D_JSEG:
13743		case D_SBDEP:
13744			/* never a dependency on these blocks */
13745			continue;
13746
13747		default:
13748			panic("softdep_count_dependencies: Unexpected type %s",
13749			    TYPENAME(wk->wk_type));
13750			/* NOTREACHED */
13751		}
13752	}
13753out:
13754	FREE_LOCK(ump);
13755	return retval;
13756}
13757
13758/*
13759 * Acquire exclusive access to a buffer.
13760 * Must be called with a locked mtx parameter.
13761 * Return acquired buffer or NULL on failure.
13762 */
13763static struct buf *
13764getdirtybuf(bp, lock, waitfor)
13765	struct buf *bp;
13766	struct rwlock *lock;
13767	int waitfor;
13768{
13769	int error;
13770
13771	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
13772		if (waitfor != MNT_WAIT)
13773			return (NULL);
13774		error = BUF_LOCK(bp,
13775		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock);
13776		/*
13777		 * Even if we sucessfully acquire bp here, we have dropped
13778		 * lock, which may violates our guarantee.
13779		 */
13780		if (error == 0)
13781			BUF_UNLOCK(bp);
13782		else if (error != ENOLCK)
13783			panic("getdirtybuf: inconsistent lock: %d", error);
13784		rw_wlock(lock);
13785		return (NULL);
13786	}
13787	if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13788		if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) {
13789			rw_wunlock(lock);
13790			BO_LOCK(bp->b_bufobj);
13791			BUF_UNLOCK(bp);
13792			if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13793				bp->b_vflags |= BV_BKGRDWAIT;
13794				msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj),
13795				       PRIBIO | PDROP, "getbuf", 0);
13796			} else
13797				BO_UNLOCK(bp->b_bufobj);
13798			rw_wlock(lock);
13799			return (NULL);
13800		}
13801		BUF_UNLOCK(bp);
13802		if (waitfor != MNT_WAIT)
13803			return (NULL);
13804		/*
13805		 * The lock argument must be bp->b_vp's mutex in
13806		 * this case.
13807		 */
13808#ifdef	DEBUG_VFS_LOCKS
13809		if (bp->b_vp->v_type != VCHR)
13810			ASSERT_BO_WLOCKED(bp->b_bufobj);
13811#endif
13812		bp->b_vflags |= BV_BKGRDWAIT;
13813		rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0);
13814		return (NULL);
13815	}
13816	if ((bp->b_flags & B_DELWRI) == 0) {
13817		BUF_UNLOCK(bp);
13818		return (NULL);
13819	}
13820	bremfree(bp);
13821	return (bp);
13822}
13823
13824
13825/*
13826 * Check if it is safe to suspend the file system now.  On entry,
13827 * the vnode interlock for devvp should be held.  Return 0 with
13828 * the mount interlock held if the file system can be suspended now,
13829 * otherwise return EAGAIN with the mount interlock held.
13830 */
13831int
13832softdep_check_suspend(struct mount *mp,
13833		      struct vnode *devvp,
13834		      int softdep_depcnt,
13835		      int softdep_accdepcnt,
13836		      int secondary_writes,
13837		      int secondary_accwrites)
13838{
13839	struct bufobj *bo;
13840	struct ufsmount *ump;
13841	int error;
13842
13843	bo = &devvp->v_bufobj;
13844	ASSERT_BO_WLOCKED(bo);
13845
13846	/*
13847	 * If we are not running with soft updates, then we need only
13848	 * deal with secondary writes as we try to suspend.
13849	 */
13850	if (MOUNTEDSOFTDEP(mp) == 0) {
13851		MNT_ILOCK(mp);
13852		while (mp->mnt_secondary_writes != 0) {
13853			BO_UNLOCK(bo);
13854			msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
13855			    (PUSER - 1) | PDROP, "secwr", 0);
13856			BO_LOCK(bo);
13857			MNT_ILOCK(mp);
13858		}
13859
13860		/*
13861		 * Reasons for needing more work before suspend:
13862		 * - Dirty buffers on devvp.
13863		 * - Secondary writes occurred after start of vnode sync loop
13864		 */
13865		error = 0;
13866		if (bo->bo_numoutput > 0 ||
13867		    bo->bo_dirty.bv_cnt > 0 ||
13868		    secondary_writes != 0 ||
13869		    mp->mnt_secondary_writes != 0 ||
13870		    secondary_accwrites != mp->mnt_secondary_accwrites)
13871			error = EAGAIN;
13872		BO_UNLOCK(bo);
13873		return (error);
13874	}
13875
13876	/*
13877	 * If we are running with soft updates, then we need to coordinate
13878	 * with them as we try to suspend.
13879	 */
13880	ump = VFSTOUFS(mp);
13881	for (;;) {
13882		if (!TRY_ACQUIRE_LOCK(ump)) {
13883			BO_UNLOCK(bo);
13884			ACQUIRE_LOCK(ump);
13885			FREE_LOCK(ump);
13886			BO_LOCK(bo);
13887			continue;
13888		}
13889		MNT_ILOCK(mp);
13890		if (mp->mnt_secondary_writes != 0) {
13891			FREE_LOCK(ump);
13892			BO_UNLOCK(bo);
13893			msleep(&mp->mnt_secondary_writes,
13894			       MNT_MTX(mp),
13895			       (PUSER - 1) | PDROP, "secwr", 0);
13896			BO_LOCK(bo);
13897			continue;
13898		}
13899		break;
13900	}
13901
13902	/*
13903	 * Reasons for needing more work before suspend:
13904	 * - Dirty buffers on devvp.
13905	 * - Softdep activity occurred after start of vnode sync loop
13906	 * - Secondary writes occurred after start of vnode sync loop
13907	 */
13908	error = 0;
13909	if (bo->bo_numoutput > 0 ||
13910	    bo->bo_dirty.bv_cnt > 0 ||
13911	    softdep_depcnt != 0 ||
13912	    ump->softdep_deps != 0 ||
13913	    softdep_accdepcnt != ump->softdep_accdeps ||
13914	    secondary_writes != 0 ||
13915	    mp->mnt_secondary_writes != 0 ||
13916	    secondary_accwrites != mp->mnt_secondary_accwrites)
13917		error = EAGAIN;
13918	FREE_LOCK(ump);
13919	BO_UNLOCK(bo);
13920	return (error);
13921}
13922
13923
13924/*
13925 * Get the number of dependency structures for the file system, both
13926 * the current number and the total number allocated.  These will
13927 * later be used to detect that softdep processing has occurred.
13928 */
13929void
13930softdep_get_depcounts(struct mount *mp,
13931		      int *softdep_depsp,
13932		      int *softdep_accdepsp)
13933{
13934	struct ufsmount *ump;
13935
13936	if (MOUNTEDSOFTDEP(mp) == 0) {
13937		*softdep_depsp = 0;
13938		*softdep_accdepsp = 0;
13939		return;
13940	}
13941	ump = VFSTOUFS(mp);
13942	ACQUIRE_LOCK(ump);
13943	*softdep_depsp = ump->softdep_deps;
13944	*softdep_accdepsp = ump->softdep_accdeps;
13945	FREE_LOCK(ump);
13946}
13947
13948/*
13949 * Wait for pending output on a vnode to complete.
13950 * Must be called with vnode lock and interlock locked.
13951 *
13952 * XXX: Should just be a call to bufobj_wwait().
13953 */
13954static void
13955drain_output(vp)
13956	struct vnode *vp;
13957{
13958	struct bufobj *bo;
13959
13960	bo = &vp->v_bufobj;
13961	ASSERT_VOP_LOCKED(vp, "drain_output");
13962	ASSERT_BO_WLOCKED(bo);
13963
13964	while (bo->bo_numoutput) {
13965		bo->bo_flag |= BO_WWAIT;
13966		msleep((caddr_t)&bo->bo_numoutput,
13967		    BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0);
13968	}
13969}
13970
13971/*
13972 * Called whenever a buffer that is being invalidated or reallocated
13973 * contains dependencies. This should only happen if an I/O error has
13974 * occurred. The routine is called with the buffer locked.
13975 */
13976static void
13977softdep_deallocate_dependencies(bp)
13978	struct buf *bp;
13979{
13980
13981	if ((bp->b_ioflags & BIO_ERROR) == 0)
13982		panic("softdep_deallocate_dependencies: dangling deps");
13983	if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL)
13984		softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
13985	else
13986		printf("softdep_deallocate_dependencies: "
13987		    "got error %d while accessing filesystem\n", bp->b_error);
13988	if (bp->b_error != ENXIO)
13989		panic("softdep_deallocate_dependencies: unrecovered I/O error");
13990}
13991
13992/*
13993 * Function to handle asynchronous write errors in the filesystem.
13994 */
13995static void
13996softdep_error(func, error)
13997	char *func;
13998	int error;
13999{
14000
14001	/* XXX should do something better! */
14002	printf("%s: got error %d while accessing filesystem\n", func, error);
14003}
14004
14005#ifdef DDB
14006
14007static void
14008inodedep_print(struct inodedep *inodedep, int verbose)
14009{
14010	db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d"
14011	    " saveino %p\n",
14012	    inodedep, inodedep->id_fs, inodedep->id_state,
14013	    (intmax_t)inodedep->id_ino,
14014	    (intmax_t)fsbtodb(inodedep->id_fs,
14015	    ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
14016	    inodedep->id_nlinkdelta, inodedep->id_savednlink,
14017	    inodedep->id_savedino1);
14018
14019	if (verbose == 0)
14020		return;
14021
14022	db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, "
14023	    "mkdiradd %p\n",
14024	    LIST_FIRST(&inodedep->id_pendinghd),
14025	    LIST_FIRST(&inodedep->id_bufwait),
14026	    LIST_FIRST(&inodedep->id_inowait),
14027	    TAILQ_FIRST(&inodedep->id_inoreflst),
14028	    inodedep->id_mkdiradd);
14029	db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n",
14030	    TAILQ_FIRST(&inodedep->id_inoupdt),
14031	    TAILQ_FIRST(&inodedep->id_newinoupdt),
14032	    TAILQ_FIRST(&inodedep->id_extupdt),
14033	    TAILQ_FIRST(&inodedep->id_newextupdt));
14034}
14035
14036DB_SHOW_COMMAND(inodedep, db_show_inodedep)
14037{
14038
14039	if (have_addr == 0) {
14040		db_printf("Address required\n");
14041		return;
14042	}
14043	inodedep_print((struct inodedep*)addr, 1);
14044}
14045
14046DB_SHOW_COMMAND(inodedeps, db_show_inodedeps)
14047{
14048	struct inodedep_hashhead *inodedephd;
14049	struct inodedep *inodedep;
14050	struct ufsmount *ump;
14051	int cnt;
14052
14053	if (have_addr == 0) {
14054		db_printf("Address required\n");
14055		return;
14056	}
14057	ump = (struct ufsmount *)addr;
14058	for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) {
14059		inodedephd = &ump->inodedep_hashtbl[cnt];
14060		LIST_FOREACH(inodedep, inodedephd, id_hash) {
14061			inodedep_print(inodedep, 0);
14062		}
14063	}
14064}
14065
14066DB_SHOW_COMMAND(worklist, db_show_worklist)
14067{
14068	struct worklist *wk;
14069
14070	if (have_addr == 0) {
14071		db_printf("Address required\n");
14072		return;
14073	}
14074	wk = (struct worklist *)addr;
14075	printf("worklist: %p type %s state 0x%X\n",
14076	    wk, TYPENAME(wk->wk_type), wk->wk_state);
14077}
14078
14079DB_SHOW_COMMAND(workhead, db_show_workhead)
14080{
14081	struct workhead *wkhd;
14082	struct worklist *wk;
14083	int i;
14084
14085	if (have_addr == 0) {
14086		db_printf("Address required\n");
14087		return;
14088	}
14089	wkhd = (struct workhead *)addr;
14090	wk = LIST_FIRST(wkhd);
14091	for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list))
14092		db_printf("worklist: %p type %s state 0x%X",
14093		    wk, TYPENAME(wk->wk_type), wk->wk_state);
14094	if (i == 100)
14095		db_printf("workhead overflow");
14096	printf("\n");
14097}
14098
14099
14100DB_SHOW_COMMAND(mkdirs, db_show_mkdirs)
14101{
14102	struct mkdirlist *mkdirlisthd;
14103	struct jaddref *jaddref;
14104	struct diradd *diradd;
14105	struct mkdir *mkdir;
14106
14107	if (have_addr == 0) {
14108		db_printf("Address required\n");
14109		return;
14110	}
14111	mkdirlisthd = (struct mkdirlist *)addr;
14112	LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) {
14113		diradd = mkdir->md_diradd;
14114		db_printf("mkdir: %p state 0x%X dap %p state 0x%X",
14115		    mkdir, mkdir->md_state, diradd, diradd->da_state);
14116		if ((jaddref = mkdir->md_jaddref) != NULL)
14117			db_printf(" jaddref %p jaddref state 0x%X",
14118			    jaddref, jaddref->ja_state);
14119		db_printf("\n");
14120	}
14121}
14122
14123/* exported to ffs_vfsops.c */
14124extern void db_print_ffs(struct ufsmount *ump);
14125void
14126db_print_ffs(struct ufsmount *ump)
14127{
14128	db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n",
14129	    ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname,
14130	    ump->um_devvp, ump->um_fs, ump->softdep_on_worklist,
14131	    ump->softdep_deps, ump->softdep_req);
14132}
14133
14134#endif /* DDB */
14135
14136#endif /* SOFTUPDATES */
14137