ffs_softdep.c revision 284375
1/*-
2 * Copyright 1998, 2000 Marshall Kirk McKusick.
3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
4 * All rights reserved.
5 *
6 * The soft updates code is derived from the appendix of a University
7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
8 * "Soft Updates: A Solution to the Metadata Update Problem in File
9 * Systems", CSE-TR-254-95, August 1995).
10 *
11 * Further information about soft updates can be obtained from:
12 *
13 *	Marshall Kirk McKusick		http://www.mckusick.com/softdep/
14 *	1614 Oxford Street		mckusick@mckusick.com
15 *	Berkeley, CA 94709-1608		+1-510-843-9542
16 *	USA
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 *
22 * 1. Redistributions of source code must retain the above copyright
23 *    notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 *    notice, this list of conditions and the following disclaimer in the
26 *    documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 *	from: @(#)ffs_softdep.c	9.59 (McKusick) 6/21/00
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_softdep.c 284375 2015-06-14 05:12:48Z kib $");
44
45#include "opt_ffs.h"
46#include "opt_quota.h"
47#include "opt_ddb.h"
48
49/*
50 * For now we want the safety net that the DEBUG flag provides.
51 */
52#ifndef DEBUG
53#define DEBUG
54#endif
55
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/systm.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kdb.h>
62#include <sys/kthread.h>
63#include <sys/ktr.h>
64#include <sys/limits.h>
65#include <sys/lock.h>
66#include <sys/malloc.h>
67#include <sys/mount.h>
68#include <sys/mutex.h>
69#include <sys/namei.h>
70#include <sys/priv.h>
71#include <sys/proc.h>
72#include <sys/rwlock.h>
73#include <sys/stat.h>
74#include <sys/sysctl.h>
75#include <sys/syslog.h>
76#include <sys/vnode.h>
77#include <sys/conf.h>
78
79#include <ufs/ufs/dir.h>
80#include <ufs/ufs/extattr.h>
81#include <ufs/ufs/quota.h>
82#include <ufs/ufs/inode.h>
83#include <ufs/ufs/ufsmount.h>
84#include <ufs/ffs/fs.h>
85#include <ufs/ffs/softdep.h>
86#include <ufs/ffs/ffs_extern.h>
87#include <ufs/ufs/ufs_extern.h>
88
89#include <vm/vm.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_object.h>
92
93#include <geom/geom.h>
94
95#include <ddb/ddb.h>
96
97#define	KTR_SUJ	0	/* Define to KTR_SPARE. */
98
99#ifndef SOFTUPDATES
100
101int
102softdep_flushfiles(oldmnt, flags, td)
103	struct mount *oldmnt;
104	int flags;
105	struct thread *td;
106{
107
108	panic("softdep_flushfiles called");
109}
110
111int
112softdep_mount(devvp, mp, fs, cred)
113	struct vnode *devvp;
114	struct mount *mp;
115	struct fs *fs;
116	struct ucred *cred;
117{
118
119	return (0);
120}
121
122void
123softdep_initialize()
124{
125
126	return;
127}
128
129void
130softdep_uninitialize()
131{
132
133	return;
134}
135
136void
137softdep_unmount(mp)
138	struct mount *mp;
139{
140
141	panic("softdep_unmount called");
142}
143
144void
145softdep_setup_sbupdate(ump, fs, bp)
146	struct ufsmount *ump;
147	struct fs *fs;
148	struct buf *bp;
149{
150
151	panic("softdep_setup_sbupdate called");
152}
153
154void
155softdep_setup_inomapdep(bp, ip, newinum, mode)
156	struct buf *bp;
157	struct inode *ip;
158	ino_t newinum;
159	int mode;
160{
161
162	panic("softdep_setup_inomapdep called");
163}
164
165void
166softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
167	struct buf *bp;
168	struct mount *mp;
169	ufs2_daddr_t newblkno;
170	int frags;
171	int oldfrags;
172{
173
174	panic("softdep_setup_blkmapdep called");
175}
176
177void
178softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
179	struct inode *ip;
180	ufs_lbn_t lbn;
181	ufs2_daddr_t newblkno;
182	ufs2_daddr_t oldblkno;
183	long newsize;
184	long oldsize;
185	struct buf *bp;
186{
187
188	panic("softdep_setup_allocdirect called");
189}
190
191void
192softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
193	struct inode *ip;
194	ufs_lbn_t lbn;
195	ufs2_daddr_t newblkno;
196	ufs2_daddr_t oldblkno;
197	long newsize;
198	long oldsize;
199	struct buf *bp;
200{
201
202	panic("softdep_setup_allocext called");
203}
204
205void
206softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
207	struct inode *ip;
208	ufs_lbn_t lbn;
209	struct buf *bp;
210	int ptrno;
211	ufs2_daddr_t newblkno;
212	ufs2_daddr_t oldblkno;
213	struct buf *nbp;
214{
215
216	panic("softdep_setup_allocindir_page called");
217}
218
219void
220softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
221	struct buf *nbp;
222	struct inode *ip;
223	struct buf *bp;
224	int ptrno;
225	ufs2_daddr_t newblkno;
226{
227
228	panic("softdep_setup_allocindir_meta called");
229}
230
231void
232softdep_journal_freeblocks(ip, cred, length, flags)
233	struct inode *ip;
234	struct ucred *cred;
235	off_t length;
236	int flags;
237{
238
239	panic("softdep_journal_freeblocks called");
240}
241
242void
243softdep_journal_fsync(ip)
244	struct inode *ip;
245{
246
247	panic("softdep_journal_fsync called");
248}
249
250void
251softdep_setup_freeblocks(ip, length, flags)
252	struct inode *ip;
253	off_t length;
254	int flags;
255{
256
257	panic("softdep_setup_freeblocks called");
258}
259
260void
261softdep_freefile(pvp, ino, mode)
262		struct vnode *pvp;
263		ino_t ino;
264		int mode;
265{
266
267	panic("softdep_freefile called");
268}
269
270int
271softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
272	struct buf *bp;
273	struct inode *dp;
274	off_t diroffset;
275	ino_t newinum;
276	struct buf *newdirbp;
277	int isnewblk;
278{
279
280	panic("softdep_setup_directory_add called");
281}
282
283void
284softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
285	struct buf *bp;
286	struct inode *dp;
287	caddr_t base;
288	caddr_t oldloc;
289	caddr_t newloc;
290	int entrysize;
291{
292
293	panic("softdep_change_directoryentry_offset called");
294}
295
296void
297softdep_setup_remove(bp, dp, ip, isrmdir)
298	struct buf *bp;
299	struct inode *dp;
300	struct inode *ip;
301	int isrmdir;
302{
303
304	panic("softdep_setup_remove called");
305}
306
307void
308softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
309	struct buf *bp;
310	struct inode *dp;
311	struct inode *ip;
312	ino_t newinum;
313	int isrmdir;
314{
315
316	panic("softdep_setup_directory_change called");
317}
318
319void
320softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
321	struct mount *mp;
322	struct buf *bp;
323	ufs2_daddr_t blkno;
324	int frags;
325	struct workhead *wkhd;
326{
327
328	panic("%s called", __FUNCTION__);
329}
330
331void
332softdep_setup_inofree(mp, bp, ino, wkhd)
333	struct mount *mp;
334	struct buf *bp;
335	ino_t ino;
336	struct workhead *wkhd;
337{
338
339	panic("%s called", __FUNCTION__);
340}
341
342void
343softdep_setup_unlink(dp, ip)
344	struct inode *dp;
345	struct inode *ip;
346{
347
348	panic("%s called", __FUNCTION__);
349}
350
351void
352softdep_setup_link(dp, ip)
353	struct inode *dp;
354	struct inode *ip;
355{
356
357	panic("%s called", __FUNCTION__);
358}
359
360void
361softdep_revert_link(dp, ip)
362	struct inode *dp;
363	struct inode *ip;
364{
365
366	panic("%s called", __FUNCTION__);
367}
368
369void
370softdep_setup_rmdir(dp, ip)
371	struct inode *dp;
372	struct inode *ip;
373{
374
375	panic("%s called", __FUNCTION__);
376}
377
378void
379softdep_revert_rmdir(dp, ip)
380	struct inode *dp;
381	struct inode *ip;
382{
383
384	panic("%s called", __FUNCTION__);
385}
386
387void
388softdep_setup_create(dp, ip)
389	struct inode *dp;
390	struct inode *ip;
391{
392
393	panic("%s called", __FUNCTION__);
394}
395
396void
397softdep_revert_create(dp, ip)
398	struct inode *dp;
399	struct inode *ip;
400{
401
402	panic("%s called", __FUNCTION__);
403}
404
405void
406softdep_setup_mkdir(dp, ip)
407	struct inode *dp;
408	struct inode *ip;
409{
410
411	panic("%s called", __FUNCTION__);
412}
413
414void
415softdep_revert_mkdir(dp, ip)
416	struct inode *dp;
417	struct inode *ip;
418{
419
420	panic("%s called", __FUNCTION__);
421}
422
423void
424softdep_setup_dotdot_link(dp, ip)
425	struct inode *dp;
426	struct inode *ip;
427{
428
429	panic("%s called", __FUNCTION__);
430}
431
432int
433softdep_prealloc(vp, waitok)
434	struct vnode *vp;
435	int waitok;
436{
437
438	panic("%s called", __FUNCTION__);
439}
440
441int
442softdep_journal_lookup(mp, vpp)
443	struct mount *mp;
444	struct vnode **vpp;
445{
446
447	return (ENOENT);
448}
449
450void
451softdep_change_linkcnt(ip)
452	struct inode *ip;
453{
454
455	panic("softdep_change_linkcnt called");
456}
457
458void
459softdep_load_inodeblock(ip)
460	struct inode *ip;
461{
462
463	panic("softdep_load_inodeblock called");
464}
465
466void
467softdep_update_inodeblock(ip, bp, waitfor)
468	struct inode *ip;
469	struct buf *bp;
470	int waitfor;
471{
472
473	panic("softdep_update_inodeblock called");
474}
475
476int
477softdep_fsync(vp)
478	struct vnode *vp;	/* the "in_core" copy of the inode */
479{
480
481	return (0);
482}
483
484void
485softdep_fsync_mountdev(vp)
486	struct vnode *vp;
487{
488
489	return;
490}
491
492int
493softdep_flushworklist(oldmnt, countp, td)
494	struct mount *oldmnt;
495	int *countp;
496	struct thread *td;
497{
498
499	*countp = 0;
500	return (0);
501}
502
503int
504softdep_sync_metadata(struct vnode *vp)
505{
506
507	panic("softdep_sync_metadata called");
508}
509
510int
511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
512{
513
514	panic("softdep_sync_buf called");
515}
516
517int
518softdep_slowdown(vp)
519	struct vnode *vp;
520{
521
522	panic("softdep_slowdown called");
523}
524
525int
526softdep_request_cleanup(fs, vp, cred, resource)
527	struct fs *fs;
528	struct vnode *vp;
529	struct ucred *cred;
530	int resource;
531{
532
533	return (0);
534}
535
536int
537softdep_check_suspend(struct mount *mp,
538		      struct vnode *devvp,
539		      int softdep_depcnt,
540		      int softdep_accdepcnt,
541		      int secondary_writes,
542		      int secondary_accwrites)
543{
544	struct bufobj *bo;
545	int error;
546
547	(void) softdep_depcnt,
548	(void) softdep_accdepcnt;
549
550	bo = &devvp->v_bufobj;
551	ASSERT_BO_WLOCKED(bo);
552
553	MNT_ILOCK(mp);
554	while (mp->mnt_secondary_writes != 0) {
555		BO_UNLOCK(bo);
556		msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
557		    (PUSER - 1) | PDROP, "secwr", 0);
558		BO_LOCK(bo);
559		MNT_ILOCK(mp);
560	}
561
562	/*
563	 * Reasons for needing more work before suspend:
564	 * - Dirty buffers on devvp.
565	 * - Secondary writes occurred after start of vnode sync loop
566	 */
567	error = 0;
568	if (bo->bo_numoutput > 0 ||
569	    bo->bo_dirty.bv_cnt > 0 ||
570	    secondary_writes != 0 ||
571	    mp->mnt_secondary_writes != 0 ||
572	    secondary_accwrites != mp->mnt_secondary_accwrites)
573		error = EAGAIN;
574	BO_UNLOCK(bo);
575	return (error);
576}
577
578void
579softdep_get_depcounts(struct mount *mp,
580		      int *softdepactivep,
581		      int *softdepactiveaccp)
582{
583	(void) mp;
584	*softdepactivep = 0;
585	*softdepactiveaccp = 0;
586}
587
588void
589softdep_buf_append(bp, wkhd)
590	struct buf *bp;
591	struct workhead *wkhd;
592{
593
594	panic("softdep_buf_appendwork called");
595}
596
597void
598softdep_inode_append(ip, cred, wkhd)
599	struct inode *ip;
600	struct ucred *cred;
601	struct workhead *wkhd;
602{
603
604	panic("softdep_inode_appendwork called");
605}
606
607void
608softdep_freework(wkhd)
609	struct workhead *wkhd;
610{
611
612	panic("softdep_freework called");
613}
614
615#else
616
617FEATURE(softupdates, "FFS soft-updates support");
618
619static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0,
620    "soft updates stats");
621static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0,
622    "total dependencies allocated");
623static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0,
624    "high use dependencies allocated");
625static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0,
626    "current dependencies allocated");
627static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0,
628    "current dependencies written");
629
630unsigned long dep_current[D_LAST + 1];
631unsigned long dep_highuse[D_LAST + 1];
632unsigned long dep_total[D_LAST + 1];
633unsigned long dep_write[D_LAST + 1];
634
635#define	SOFTDEP_TYPE(type, str, long)					\
636    static MALLOC_DEFINE(M_ ## type, #str, long);			\
637    SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD,	\
638	&dep_total[D_ ## type], 0, "");					\
639    SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, 	\
640	&dep_current[D_ ## type], 0, "");				\
641    SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, 	\
642	&dep_highuse[D_ ## type], 0, "");				\
643    SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, 	\
644	&dep_write[D_ ## type], 0, "");
645
646SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
647SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
648SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
649    "Block or frag allocated from cyl group map");
650SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
651SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
652SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
653SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
654SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
655SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
656SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
657SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
658SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
659SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
660SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
661SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
662SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
663SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
664SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
665SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
666SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
667SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
668SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
669SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
670SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
671SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
672SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
673SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
674
675static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
676
677static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
678static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
679static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data");
680
681#define M_SOFTDEP_FLAGS	(M_WAITOK)
682
683/*
684 * translate from workitem type to memory type
685 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
686 */
687static struct malloc_type *memtype[] = {
688	M_PAGEDEP,
689	M_INODEDEP,
690	M_BMSAFEMAP,
691	M_NEWBLK,
692	M_ALLOCDIRECT,
693	M_INDIRDEP,
694	M_ALLOCINDIR,
695	M_FREEFRAG,
696	M_FREEBLKS,
697	M_FREEFILE,
698	M_DIRADD,
699	M_MKDIR,
700	M_DIRREM,
701	M_NEWDIRBLK,
702	M_FREEWORK,
703	M_FREEDEP,
704	M_JADDREF,
705	M_JREMREF,
706	M_JMVREF,
707	M_JNEWBLK,
708	M_JFREEBLK,
709	M_JFREEFRAG,
710	M_JSEG,
711	M_JSEGDEP,
712	M_SBDEP,
713	M_JTRUNC,
714	M_JFSYNC,
715	M_SENTINEL
716};
717
718#define DtoM(type) (memtype[type])
719
720/*
721 * Names of malloc types.
722 */
723#define TYPENAME(type)  \
724	((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???")
725/*
726 * End system adaptation definitions.
727 */
728
729#define	DOTDOT_OFFSET	offsetof(struct dirtemplate, dotdot_ino)
730#define	DOT_OFFSET	offsetof(struct dirtemplate, dot_ino)
731
732/*
733 * Internal function prototypes.
734 */
735static	void check_clear_deps(struct mount *);
736static	void softdep_error(char *, int);
737static	int softdep_process_worklist(struct mount *, int);
738static	int softdep_waitidle(struct mount *, int);
739static	void drain_output(struct vnode *);
740static	struct buf *getdirtybuf(struct buf *, struct rwlock *, int);
741static	int check_inodedep_free(struct inodedep *);
742static	void clear_remove(struct mount *);
743static	void clear_inodedeps(struct mount *);
744static	void unlinked_inodedep(struct mount *, struct inodedep *);
745static	void clear_unlinked_inodedep(struct inodedep *);
746static	struct inodedep *first_unlinked_inodedep(struct ufsmount *);
747static	int flush_pagedep_deps(struct vnode *, struct mount *,
748	    struct diraddhd *);
749static	int free_pagedep(struct pagedep *);
750static	int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
751static	int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
752static	int flush_deplist(struct allocdirectlst *, int, int *);
753static	int sync_cgs(struct mount *, int);
754static	int handle_written_filepage(struct pagedep *, struct buf *);
755static	int handle_written_sbdep(struct sbdep *, struct buf *);
756static	void initiate_write_sbdep(struct sbdep *);
757static	void diradd_inode_written(struct diradd *, struct inodedep *);
758static	int handle_written_indirdep(struct indirdep *, struct buf *,
759	    struct buf**);
760static	int handle_written_inodeblock(struct inodedep *, struct buf *);
761static	int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
762	    uint8_t *);
763static	int handle_written_bmsafemap(struct bmsafemap *, struct buf *);
764static	void handle_written_jaddref(struct jaddref *);
765static	void handle_written_jremref(struct jremref *);
766static	void handle_written_jseg(struct jseg *, struct buf *);
767static	void handle_written_jnewblk(struct jnewblk *);
768static	void handle_written_jblkdep(struct jblkdep *);
769static	void handle_written_jfreefrag(struct jfreefrag *);
770static	void complete_jseg(struct jseg *);
771static	void complete_jsegs(struct jseg *);
772static	void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
773static	void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
774static	void jremref_write(struct jremref *, struct jseg *, uint8_t *);
775static	void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
776static	void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
777static	void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
778static	void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
779static	void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
780static	void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
781static	inline void inoref_write(struct inoref *, struct jseg *,
782	    struct jrefrec *);
783static	void handle_allocdirect_partdone(struct allocdirect *,
784	    struct workhead *);
785static	struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
786	    struct workhead *);
787static	void indirdep_complete(struct indirdep *);
788static	int indirblk_lookup(struct mount *, ufs2_daddr_t);
789static	void indirblk_insert(struct freework *);
790static	void indirblk_remove(struct freework *);
791static	void handle_allocindir_partdone(struct allocindir *);
792static	void initiate_write_filepage(struct pagedep *, struct buf *);
793static	void initiate_write_indirdep(struct indirdep*, struct buf *);
794static	void handle_written_mkdir(struct mkdir *, int);
795static	int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
796	    uint8_t *);
797static	void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
798static	void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
799static	void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
800static	void handle_workitem_freefile(struct freefile *);
801static	int handle_workitem_remove(struct dirrem *, int);
802static	struct dirrem *newdirrem(struct buf *, struct inode *,
803	    struct inode *, int, struct dirrem **);
804static	struct indirdep *indirdep_lookup(struct mount *, struct inode *,
805	    struct buf *);
806static	void cancel_indirdep(struct indirdep *, struct buf *,
807	    struct freeblks *);
808static	void free_indirdep(struct indirdep *);
809static	void free_diradd(struct diradd *, struct workhead *);
810static	void merge_diradd(struct inodedep *, struct diradd *);
811static	void complete_diradd(struct diradd *);
812static	struct diradd *diradd_lookup(struct pagedep *, int);
813static	struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
814	    struct jremref *);
815static	struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
816	    struct jremref *);
817static	void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
818	    struct jremref *, struct jremref *);
819static	void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
820	    struct jremref *);
821static	void cancel_allocindir(struct allocindir *, struct buf *bp,
822	    struct freeblks *, int);
823static	int setup_trunc_indir(struct freeblks *, struct inode *,
824	    ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
825static	void complete_trunc_indir(struct freework *);
826static	void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
827	    int);
828static	void complete_mkdir(struct mkdir *);
829static	void free_newdirblk(struct newdirblk *);
830static	void free_jremref(struct jremref *);
831static	void free_jaddref(struct jaddref *);
832static	void free_jsegdep(struct jsegdep *);
833static	void free_jsegs(struct jblocks *);
834static	void rele_jseg(struct jseg *);
835static	void free_jseg(struct jseg *, struct jblocks *);
836static	void free_jnewblk(struct jnewblk *);
837static	void free_jblkdep(struct jblkdep *);
838static	void free_jfreefrag(struct jfreefrag *);
839static	void free_freedep(struct freedep *);
840static	void journal_jremref(struct dirrem *, struct jremref *,
841	    struct inodedep *);
842static	void cancel_jnewblk(struct jnewblk *, struct workhead *);
843static	int cancel_jaddref(struct jaddref *, struct inodedep *,
844	    struct workhead *);
845static	void cancel_jfreefrag(struct jfreefrag *);
846static	inline void setup_freedirect(struct freeblks *, struct inode *,
847	    int, int);
848static	inline void setup_freeext(struct freeblks *, struct inode *, int, int);
849static	inline void setup_freeindir(struct freeblks *, struct inode *, int,
850	    ufs_lbn_t, int);
851static	inline struct freeblks *newfreeblks(struct mount *, struct inode *);
852static	void freeblks_free(struct ufsmount *, struct freeblks *, int);
853static	void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
854static	ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
855static	int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
856static	void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
857	    int, int);
858static	void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
859static 	int cancel_pagedep(struct pagedep *, struct freeblks *, int);
860static	int deallocate_dependencies(struct buf *, struct freeblks *, int);
861static	void newblk_freefrag(struct newblk*);
862static	void free_newblk(struct newblk *);
863static	void cancel_allocdirect(struct allocdirectlst *,
864	    struct allocdirect *, struct freeblks *);
865static	int check_inode_unwritten(struct inodedep *);
866static	int free_inodedep(struct inodedep *);
867static	void freework_freeblock(struct freework *);
868static	void freework_enqueue(struct freework *);
869static	int handle_workitem_freeblocks(struct freeblks *, int);
870static	int handle_complete_freeblocks(struct freeblks *, int);
871static	void handle_workitem_indirblk(struct freework *);
872static	void handle_written_freework(struct freework *);
873static	void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
874static	struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
875	    struct workhead *);
876static	struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
877	    struct inodedep *, struct allocindir *, ufs_lbn_t);
878static	struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
879	    ufs2_daddr_t, ufs_lbn_t);
880static	void handle_workitem_freefrag(struct freefrag *);
881static	struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
882	    ufs_lbn_t);
883static	void allocdirect_merge(struct allocdirectlst *,
884	    struct allocdirect *, struct allocdirect *);
885static	struct freefrag *allocindir_merge(struct allocindir *,
886	    struct allocindir *);
887static	int bmsafemap_find(struct bmsafemap_hashhead *, int,
888	    struct bmsafemap **);
889static	struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
890	    int cg, struct bmsafemap *);
891static	int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int,
892	    struct newblk **);
893static	int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
894static	int inodedep_find(struct inodedep_hashhead *, ino_t,
895	    struct inodedep **);
896static	int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
897static	int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
898	    int, struct pagedep **);
899static	int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
900	    struct pagedep **);
901static	void pause_timer(void *);
902static	int request_cleanup(struct mount *, int);
903static	void schedule_cleanup(struct mount *);
904static void softdep_ast_cleanup_proc(void);
905static	int process_worklist_item(struct mount *, int, int);
906static	void process_removes(struct vnode *);
907static	void process_truncates(struct vnode *);
908static	void jwork_move(struct workhead *, struct workhead *);
909static	void jwork_insert(struct workhead *, struct jsegdep *);
910static	void add_to_worklist(struct worklist *, int);
911static	void wake_worklist(struct worklist *);
912static	void wait_worklist(struct worklist *, char *);
913static	void remove_from_worklist(struct worklist *);
914static	void softdep_flush(void *);
915static	void softdep_flushjournal(struct mount *);
916static	int softdep_speedup(struct ufsmount *);
917static	void worklist_speedup(struct mount *);
918static	int journal_mount(struct mount *, struct fs *, struct ucred *);
919static	void journal_unmount(struct ufsmount *);
920static	int journal_space(struct ufsmount *, int);
921static	void journal_suspend(struct ufsmount *);
922static	int journal_unsuspend(struct ufsmount *ump);
923static	void softdep_prelink(struct vnode *, struct vnode *);
924static	void add_to_journal(struct worklist *);
925static	void remove_from_journal(struct worklist *);
926static	bool softdep_excess_inodes(struct ufsmount *);
927static	bool softdep_excess_dirrem(struct ufsmount *);
928static	void softdep_process_journal(struct mount *, struct worklist *, int);
929static	struct jremref *newjremref(struct dirrem *, struct inode *,
930	    struct inode *ip, off_t, nlink_t);
931static	struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
932	    uint16_t);
933static	inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
934	    uint16_t);
935static	inline struct jsegdep *inoref_jseg(struct inoref *);
936static	struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
937static	struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
938	    ufs2_daddr_t, int);
939static	void adjust_newfreework(struct freeblks *, int);
940static	struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
941static	void move_newblock_dep(struct jaddref *, struct inodedep *);
942static	void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
943static	struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
944	    ufs2_daddr_t, long, ufs_lbn_t);
945static	struct freework *newfreework(struct ufsmount *, struct freeblks *,
946	    struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
947static	int jwait(struct worklist *, int);
948static	struct inodedep *inodedep_lookup_ip(struct inode *);
949static	int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
950static	struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
951static	void handle_jwork(struct workhead *);
952static	struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
953	    struct mkdir **);
954static	struct jblocks *jblocks_create(void);
955static	ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
956static	void jblocks_free(struct jblocks *, struct mount *, int);
957static	void jblocks_destroy(struct jblocks *);
958static	void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
959
960/*
961 * Exported softdep operations.
962 */
963static	void softdep_disk_io_initiation(struct buf *);
964static	void softdep_disk_write_complete(struct buf *);
965static	void softdep_deallocate_dependencies(struct buf *);
966static	int softdep_count_dependencies(struct buf *bp, int);
967
968/*
969 * Global lock over all of soft updates.
970 */
971static struct mtx lk;
972MTX_SYSINIT(softdep_lock, &lk, "Global Softdep Lock", MTX_DEF);
973
974#define ACQUIRE_GBLLOCK(lk)	mtx_lock(lk)
975#define FREE_GBLLOCK(lk)	mtx_unlock(lk)
976#define GBLLOCK_OWNED(lk)	mtx_assert((lk), MA_OWNED)
977
978/*
979 * Per-filesystem soft-updates locking.
980 */
981#define LOCK_PTR(ump)		(&(ump)->um_softdep->sd_fslock)
982#define TRY_ACQUIRE_LOCK(ump)	rw_try_wlock(&(ump)->um_softdep->sd_fslock)
983#define ACQUIRE_LOCK(ump)	rw_wlock(&(ump)->um_softdep->sd_fslock)
984#define FREE_LOCK(ump)		rw_wunlock(&(ump)->um_softdep->sd_fslock)
985#define LOCK_OWNED(ump)		rw_assert(&(ump)->um_softdep->sd_fslock, \
986				    RA_WLOCKED)
987
988#define	BUF_AREC(bp)		lockallowrecurse(&(bp)->b_lock)
989#define	BUF_NOREC(bp)		lockdisablerecurse(&(bp)->b_lock)
990
991/*
992 * Worklist queue management.
993 * These routines require that the lock be held.
994 */
995#ifndef /* NOT */ DEBUG
996#define WORKLIST_INSERT(head, item) do {	\
997	(item)->wk_state |= ONWORKLIST;		\
998	LIST_INSERT_HEAD(head, item, wk_list);	\
999} while (0)
1000#define WORKLIST_REMOVE(item) do {		\
1001	(item)->wk_state &= ~ONWORKLIST;	\
1002	LIST_REMOVE(item, wk_list);		\
1003} while (0)
1004#define WORKLIST_INSERT_UNLOCKED	WORKLIST_INSERT
1005#define WORKLIST_REMOVE_UNLOCKED	WORKLIST_REMOVE
1006
1007#else /* DEBUG */
1008static	void worklist_insert(struct workhead *, struct worklist *, int);
1009static	void worklist_remove(struct worklist *, int);
1010
1011#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1)
1012#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0)
1013#define WORKLIST_REMOVE(item) worklist_remove(item, 1)
1014#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0)
1015
1016static void
1017worklist_insert(head, item, locked)
1018	struct workhead *head;
1019	struct worklist *item;
1020	int locked;
1021{
1022
1023	if (locked)
1024		LOCK_OWNED(VFSTOUFS(item->wk_mp));
1025	if (item->wk_state & ONWORKLIST)
1026		panic("worklist_insert: %p %s(0x%X) already on list",
1027		    item, TYPENAME(item->wk_type), item->wk_state);
1028	item->wk_state |= ONWORKLIST;
1029	LIST_INSERT_HEAD(head, item, wk_list);
1030}
1031
1032static void
1033worklist_remove(item, locked)
1034	struct worklist *item;
1035	int locked;
1036{
1037
1038	if (locked)
1039		LOCK_OWNED(VFSTOUFS(item->wk_mp));
1040	if ((item->wk_state & ONWORKLIST) == 0)
1041		panic("worklist_remove: %p %s(0x%X) not on list",
1042		    item, TYPENAME(item->wk_type), item->wk_state);
1043	item->wk_state &= ~ONWORKLIST;
1044	LIST_REMOVE(item, wk_list);
1045}
1046#endif /* DEBUG */
1047
1048/*
1049 * Merge two jsegdeps keeping only the oldest one as newer references
1050 * can't be discarded until after older references.
1051 */
1052static inline struct jsegdep *
1053jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1054{
1055	struct jsegdep *swp;
1056
1057	if (two == NULL)
1058		return (one);
1059
1060	if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1061		swp = one;
1062		one = two;
1063		two = swp;
1064	}
1065	WORKLIST_REMOVE(&two->jd_list);
1066	free_jsegdep(two);
1067
1068	return (one);
1069}
1070
1071/*
1072 * If two freedeps are compatible free one to reduce list size.
1073 */
1074static inline struct freedep *
1075freedep_merge(struct freedep *one, struct freedep *two)
1076{
1077	if (two == NULL)
1078		return (one);
1079
1080	if (one->fd_freework == two->fd_freework) {
1081		WORKLIST_REMOVE(&two->fd_list);
1082		free_freedep(two);
1083	}
1084	return (one);
1085}
1086
1087/*
1088 * Move journal work from one list to another.  Duplicate freedeps and
1089 * jsegdeps are coalesced to keep the lists as small as possible.
1090 */
1091static void
1092jwork_move(dst, src)
1093	struct workhead *dst;
1094	struct workhead *src;
1095{
1096	struct freedep *freedep;
1097	struct jsegdep *jsegdep;
1098	struct worklist *wkn;
1099	struct worklist *wk;
1100
1101	KASSERT(dst != src,
1102	    ("jwork_move: dst == src"));
1103	freedep = NULL;
1104	jsegdep = NULL;
1105	LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1106		if (wk->wk_type == D_JSEGDEP)
1107			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1108		if (wk->wk_type == D_FREEDEP)
1109			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1110	}
1111
1112	while ((wk = LIST_FIRST(src)) != NULL) {
1113		WORKLIST_REMOVE(wk);
1114		WORKLIST_INSERT(dst, wk);
1115		if (wk->wk_type == D_JSEGDEP) {
1116			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1117			continue;
1118		}
1119		if (wk->wk_type == D_FREEDEP)
1120			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1121	}
1122}
1123
1124static void
1125jwork_insert(dst, jsegdep)
1126	struct workhead *dst;
1127	struct jsegdep *jsegdep;
1128{
1129	struct jsegdep *jsegdepn;
1130	struct worklist *wk;
1131
1132	LIST_FOREACH(wk, dst, wk_list)
1133		if (wk->wk_type == D_JSEGDEP)
1134			break;
1135	if (wk == NULL) {
1136		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1137		return;
1138	}
1139	jsegdepn = WK_JSEGDEP(wk);
1140	if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1141		WORKLIST_REMOVE(wk);
1142		free_jsegdep(jsegdepn);
1143		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1144	} else
1145		free_jsegdep(jsegdep);
1146}
1147
1148/*
1149 * Routines for tracking and managing workitems.
1150 */
1151static	void workitem_free(struct worklist *, int);
1152static	void workitem_alloc(struct worklist *, int, struct mount *);
1153static	void workitem_reassign(struct worklist *, int);
1154
1155#define	WORKITEM_FREE(item, type) \
1156	workitem_free((struct worklist *)(item), (type))
1157#define	WORKITEM_REASSIGN(item, type) \
1158	workitem_reassign((struct worklist *)(item), (type))
1159
1160static void
1161workitem_free(item, type)
1162	struct worklist *item;
1163	int type;
1164{
1165	struct ufsmount *ump;
1166
1167#ifdef DEBUG
1168	if (item->wk_state & ONWORKLIST)
1169		panic("workitem_free: %s(0x%X) still on list",
1170		    TYPENAME(item->wk_type), item->wk_state);
1171	if (item->wk_type != type && type != D_NEWBLK)
1172		panic("workitem_free: type mismatch %s != %s",
1173		    TYPENAME(item->wk_type), TYPENAME(type));
1174#endif
1175	if (item->wk_state & IOWAITING)
1176		wakeup(item);
1177	ump = VFSTOUFS(item->wk_mp);
1178	LOCK_OWNED(ump);
1179	KASSERT(ump->softdep_deps > 0,
1180	    ("workitem_free: %s: softdep_deps going negative",
1181	    ump->um_fs->fs_fsmnt));
1182	if (--ump->softdep_deps == 0 && ump->softdep_req)
1183		wakeup(&ump->softdep_deps);
1184	KASSERT(dep_current[item->wk_type] > 0,
1185	    ("workitem_free: %s: dep_current[%s] going negative",
1186	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1187	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1188	    ("workitem_free: %s: softdep_curdeps[%s] going negative",
1189	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1190	atomic_subtract_long(&dep_current[item->wk_type], 1);
1191	ump->softdep_curdeps[item->wk_type] -= 1;
1192	free(item, DtoM(type));
1193}
1194
1195static void
1196workitem_alloc(item, type, mp)
1197	struct worklist *item;
1198	int type;
1199	struct mount *mp;
1200{
1201	struct ufsmount *ump;
1202
1203	item->wk_type = type;
1204	item->wk_mp = mp;
1205	item->wk_state = 0;
1206
1207	ump = VFSTOUFS(mp);
1208	ACQUIRE_GBLLOCK(&lk);
1209	dep_current[type]++;
1210	if (dep_current[type] > dep_highuse[type])
1211		dep_highuse[type] = dep_current[type];
1212	dep_total[type]++;
1213	FREE_GBLLOCK(&lk);
1214	ACQUIRE_LOCK(ump);
1215	ump->softdep_curdeps[type] += 1;
1216	ump->softdep_deps++;
1217	ump->softdep_accdeps++;
1218	FREE_LOCK(ump);
1219}
1220
1221static void
1222workitem_reassign(item, newtype)
1223	struct worklist *item;
1224	int newtype;
1225{
1226	struct ufsmount *ump;
1227
1228	ump = VFSTOUFS(item->wk_mp);
1229	LOCK_OWNED(ump);
1230	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1231	    ("workitem_reassign: %s: softdep_curdeps[%s] going negative",
1232	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1233	ump->softdep_curdeps[item->wk_type] -= 1;
1234	ump->softdep_curdeps[newtype] += 1;
1235	KASSERT(dep_current[item->wk_type] > 0,
1236	    ("workitem_reassign: %s: dep_current[%s] going negative",
1237	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1238	ACQUIRE_GBLLOCK(&lk);
1239	dep_current[newtype]++;
1240	dep_current[item->wk_type]--;
1241	if (dep_current[newtype] > dep_highuse[newtype])
1242		dep_highuse[newtype] = dep_current[newtype];
1243	dep_total[newtype]++;
1244	FREE_GBLLOCK(&lk);
1245	item->wk_type = newtype;
1246}
1247
1248/*
1249 * Workitem queue management
1250 */
1251static int max_softdeps;	/* maximum number of structs before slowdown */
1252static int tickdelay = 2;	/* number of ticks to pause during slowdown */
1253static int proc_waiting;	/* tracks whether we have a timeout posted */
1254static int *stat_countp;	/* statistic to count in proc_waiting timeout */
1255static struct callout softdep_callout;
1256static int req_clear_inodedeps;	/* syncer process flush some inodedeps */
1257static int req_clear_remove;	/* syncer process flush some freeblks */
1258static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1259
1260/*
1261 * runtime statistics
1262 */
1263static int stat_flush_threads;	/* number of softdep flushing threads */
1264static int stat_worklist_push;	/* number of worklist cleanups */
1265static int stat_blk_limit_push;	/* number of times block limit neared */
1266static int stat_ino_limit_push;	/* number of times inode limit neared */
1267static int stat_blk_limit_hit;	/* number of times block slowdown imposed */
1268static int stat_ino_limit_hit;	/* number of times inode slowdown imposed */
1269static int stat_sync_limit_hit;	/* number of synchronous slowdowns imposed */
1270static int stat_indir_blk_ptrs;	/* bufs redirtied as indir ptrs not written */
1271static int stat_inode_bitmap;	/* bufs redirtied as inode bitmap not written */
1272static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1273static int stat_dir_entry;	/* bufs redirtied as dir entry cannot write */
1274static int stat_jaddref;	/* bufs redirtied as ino bitmap can not write */
1275static int stat_jnewblk;	/* bufs redirtied as blk bitmap can not write */
1276static int stat_journal_min;	/* Times hit journal min threshold */
1277static int stat_journal_low;	/* Times hit journal low threshold */
1278static int stat_journal_wait;	/* Times blocked in jwait(). */
1279static int stat_jwait_filepage;	/* Times blocked in jwait() for filepage. */
1280static int stat_jwait_freeblks;	/* Times blocked in jwait() for freeblks. */
1281static int stat_jwait_inode;	/* Times blocked in jwait() for inodes. */
1282static int stat_jwait_newblk;	/* Times blocked in jwait() for newblks. */
1283static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1284static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1285static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1286static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1287static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1288static int stat_emptyjblocks; /* Number of potentially empty journal blocks */
1289
1290SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1291    &max_softdeps, 0, "");
1292SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1293    &tickdelay, 0, "");
1294SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD,
1295    &stat_flush_threads, 0, "");
1296SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW,
1297    &stat_worklist_push, 0,"");
1298SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW,
1299    &stat_blk_limit_push, 0,"");
1300SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW,
1301    &stat_ino_limit_push, 0,"");
1302SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW,
1303    &stat_blk_limit_hit, 0, "");
1304SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW,
1305    &stat_ino_limit_hit, 0, "");
1306SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW,
1307    &stat_sync_limit_hit, 0, "");
1308SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW,
1309    &stat_indir_blk_ptrs, 0, "");
1310SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW,
1311    &stat_inode_bitmap, 0, "");
1312SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW,
1313    &stat_direct_blk_ptrs, 0, "");
1314SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW,
1315    &stat_dir_entry, 0, "");
1316SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW,
1317    &stat_jaddref, 0, "");
1318SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW,
1319    &stat_jnewblk, 0, "");
1320SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW,
1321    &stat_journal_low, 0, "");
1322SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW,
1323    &stat_journal_min, 0, "");
1324SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW,
1325    &stat_journal_wait, 0, "");
1326SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW,
1327    &stat_jwait_filepage, 0, "");
1328SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW,
1329    &stat_jwait_freeblks, 0, "");
1330SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW,
1331    &stat_jwait_inode, 0, "");
1332SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW,
1333    &stat_jwait_newblk, 0, "");
1334SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW,
1335    &stat_cleanup_blkrequests, 0, "");
1336SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW,
1337    &stat_cleanup_inorequests, 0, "");
1338SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW,
1339    &stat_cleanup_high_delay, 0, "");
1340SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW,
1341    &stat_cleanup_retries, 0, "");
1342SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW,
1343    &stat_cleanup_failures, 0, "");
1344SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1345    &softdep_flushcache, 0, "");
1346SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD,
1347    &stat_emptyjblocks, 0, "");
1348
1349SYSCTL_DECL(_vfs_ffs);
1350
1351/* Whether to recompute the summary at mount time */
1352static int compute_summary_at_mount = 0;
1353SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1354	   &compute_summary_at_mount, 0, "Recompute summary at mount");
1355static int print_threads = 0;
1356SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW,
1357    &print_threads, 0, "Notify flusher thread start/stop");
1358
1359/* List of all filesystems mounted with soft updates */
1360static TAILQ_HEAD(, mount_softdeps) softdepmounts;
1361
1362/*
1363 * This function cleans the worklist for a filesystem.
1364 * Each filesystem running with soft dependencies gets its own
1365 * thread to run in this function. The thread is started up in
1366 * softdep_mount and shutdown in softdep_unmount. They show up
1367 * as part of the kernel "bufdaemon" process whose process
1368 * entry is available in bufdaemonproc.
1369 */
1370static int searchfailed;
1371extern struct proc *bufdaemonproc;
1372static void
1373softdep_flush(addr)
1374	void *addr;
1375{
1376	struct mount *mp;
1377	struct thread *td;
1378	struct ufsmount *ump;
1379
1380	td = curthread;
1381	td->td_pflags |= TDP_NORUNNINGBUF;
1382	mp = (struct mount *)addr;
1383	ump = VFSTOUFS(mp);
1384	atomic_add_int(&stat_flush_threads, 1);
1385	ACQUIRE_LOCK(ump);
1386	ump->softdep_flags &= ~FLUSH_STARTING;
1387	wakeup(&ump->softdep_flushtd);
1388	FREE_LOCK(ump);
1389	if (print_threads) {
1390		if (stat_flush_threads == 1)
1391			printf("Running %s at pid %d\n", bufdaemonproc->p_comm,
1392			    bufdaemonproc->p_pid);
1393		printf("Start thread %s\n", td->td_name);
1394	}
1395	for (;;) {
1396		while (softdep_process_worklist(mp, 0) > 0 ||
1397		    (MOUNTEDSUJ(mp) &&
1398		    VFSTOUFS(mp)->softdep_jblocks->jb_suspended))
1399			kthread_suspend_check();
1400		ACQUIRE_LOCK(ump);
1401		if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1402			msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM,
1403			    "sdflush", hz / 2);
1404		ump->softdep_flags &= ~FLUSH_CLEANUP;
1405		/*
1406		 * Check to see if we are done and need to exit.
1407		 */
1408		if ((ump->softdep_flags & FLUSH_EXIT) == 0) {
1409			FREE_LOCK(ump);
1410			continue;
1411		}
1412		ump->softdep_flags &= ~FLUSH_EXIT;
1413		FREE_LOCK(ump);
1414		wakeup(&ump->softdep_flags);
1415		if (print_threads)
1416			printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups);
1417		atomic_subtract_int(&stat_flush_threads, 1);
1418		kthread_exit();
1419		panic("kthread_exit failed\n");
1420	}
1421}
1422
1423static void
1424worklist_speedup(mp)
1425	struct mount *mp;
1426{
1427	struct ufsmount *ump;
1428
1429	ump = VFSTOUFS(mp);
1430	LOCK_OWNED(ump);
1431	if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1432		ump->softdep_flags |= FLUSH_CLEANUP;
1433	wakeup(&ump->softdep_flushtd);
1434}
1435
1436static int
1437softdep_speedup(ump)
1438	struct ufsmount *ump;
1439{
1440	struct ufsmount *altump;
1441	struct mount_softdeps *sdp;
1442
1443	LOCK_OWNED(ump);
1444	worklist_speedup(ump->um_mountp);
1445	bd_speedup();
1446	/*
1447	 * If we have global shortages, then we need other
1448	 * filesystems to help with the cleanup. Here we wakeup a
1449	 * flusher thread for a filesystem that is over its fair
1450	 * share of resources.
1451	 */
1452	if (req_clear_inodedeps || req_clear_remove) {
1453		ACQUIRE_GBLLOCK(&lk);
1454		TAILQ_FOREACH(sdp, &softdepmounts, sd_next) {
1455			if ((altump = sdp->sd_ump) == ump)
1456				continue;
1457			if (((req_clear_inodedeps &&
1458			    altump->softdep_curdeps[D_INODEDEP] >
1459			    max_softdeps / stat_flush_threads) ||
1460			    (req_clear_remove &&
1461			    altump->softdep_curdeps[D_DIRREM] >
1462			    (max_softdeps / 2) / stat_flush_threads)) &&
1463			    TRY_ACQUIRE_LOCK(altump))
1464				break;
1465		}
1466		if (sdp == NULL) {
1467			searchfailed++;
1468			FREE_GBLLOCK(&lk);
1469		} else {
1470			/*
1471			 * Move to the end of the list so we pick a
1472			 * different one on out next try.
1473			 */
1474			TAILQ_REMOVE(&softdepmounts, sdp, sd_next);
1475			TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
1476			FREE_GBLLOCK(&lk);
1477			if ((altump->softdep_flags &
1478			    (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1479				altump->softdep_flags |= FLUSH_CLEANUP;
1480			altump->um_softdep->sd_cleanups++;
1481			wakeup(&altump->softdep_flushtd);
1482			FREE_LOCK(altump);
1483		}
1484	}
1485	return (speedup_syncer());
1486}
1487
1488/*
1489 * Add an item to the end of the work queue.
1490 * This routine requires that the lock be held.
1491 * This is the only routine that adds items to the list.
1492 * The following routine is the only one that removes items
1493 * and does so in order from first to last.
1494 */
1495
1496#define	WK_HEAD		0x0001	/* Add to HEAD. */
1497#define	WK_NODELAY	0x0002	/* Process immediately. */
1498
1499static void
1500add_to_worklist(wk, flags)
1501	struct worklist *wk;
1502	int flags;
1503{
1504	struct ufsmount *ump;
1505
1506	ump = VFSTOUFS(wk->wk_mp);
1507	LOCK_OWNED(ump);
1508	if (wk->wk_state & ONWORKLIST)
1509		panic("add_to_worklist: %s(0x%X) already on list",
1510		    TYPENAME(wk->wk_type), wk->wk_state);
1511	wk->wk_state |= ONWORKLIST;
1512	if (ump->softdep_on_worklist == 0) {
1513		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1514		ump->softdep_worklist_tail = wk;
1515	} else if (flags & WK_HEAD) {
1516		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1517	} else {
1518		LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1519		ump->softdep_worklist_tail = wk;
1520	}
1521	ump->softdep_on_worklist += 1;
1522	if (flags & WK_NODELAY)
1523		worklist_speedup(wk->wk_mp);
1524}
1525
1526/*
1527 * Remove the item to be processed. If we are removing the last
1528 * item on the list, we need to recalculate the tail pointer.
1529 */
1530static void
1531remove_from_worklist(wk)
1532	struct worklist *wk;
1533{
1534	struct ufsmount *ump;
1535
1536	ump = VFSTOUFS(wk->wk_mp);
1537	WORKLIST_REMOVE(wk);
1538	if (ump->softdep_worklist_tail == wk)
1539		ump->softdep_worklist_tail =
1540		    (struct worklist *)wk->wk_list.le_prev;
1541	ump->softdep_on_worklist -= 1;
1542}
1543
1544static void
1545wake_worklist(wk)
1546	struct worklist *wk;
1547{
1548	if (wk->wk_state & IOWAITING) {
1549		wk->wk_state &= ~IOWAITING;
1550		wakeup(wk);
1551	}
1552}
1553
1554static void
1555wait_worklist(wk, wmesg)
1556	struct worklist *wk;
1557	char *wmesg;
1558{
1559	struct ufsmount *ump;
1560
1561	ump = VFSTOUFS(wk->wk_mp);
1562	wk->wk_state |= IOWAITING;
1563	msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0);
1564}
1565
1566/*
1567 * Process that runs once per second to handle items in the background queue.
1568 *
1569 * Note that we ensure that everything is done in the order in which they
1570 * appear in the queue. The code below depends on this property to ensure
1571 * that blocks of a file are freed before the inode itself is freed. This
1572 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1573 * until all the old ones have been purged from the dependency lists.
1574 */
1575static int
1576softdep_process_worklist(mp, full)
1577	struct mount *mp;
1578	int full;
1579{
1580	int cnt, matchcnt;
1581	struct ufsmount *ump;
1582	long starttime;
1583
1584	KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1585	if (MOUNTEDSOFTDEP(mp) == 0)
1586		return (0);
1587	matchcnt = 0;
1588	ump = VFSTOUFS(mp);
1589	ACQUIRE_LOCK(ump);
1590	starttime = time_second;
1591	softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0);
1592	check_clear_deps(mp);
1593	while (ump->softdep_on_worklist > 0) {
1594		if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1595			break;
1596		else
1597			matchcnt += cnt;
1598		check_clear_deps(mp);
1599		/*
1600		 * We do not generally want to stop for buffer space, but if
1601		 * we are really being a buffer hog, we will stop and wait.
1602		 */
1603		if (should_yield()) {
1604			FREE_LOCK(ump);
1605			kern_yield(PRI_USER);
1606			bwillwrite();
1607			ACQUIRE_LOCK(ump);
1608		}
1609		/*
1610		 * Never allow processing to run for more than one
1611		 * second. This gives the syncer thread the opportunity
1612		 * to pause if appropriate.
1613		 */
1614		if (!full && starttime != time_second)
1615			break;
1616	}
1617	if (full == 0)
1618		journal_unsuspend(ump);
1619	FREE_LOCK(ump);
1620	return (matchcnt);
1621}
1622
1623/*
1624 * Process all removes associated with a vnode if we are running out of
1625 * journal space.  Any other process which attempts to flush these will
1626 * be unable as we have the vnodes locked.
1627 */
1628static void
1629process_removes(vp)
1630	struct vnode *vp;
1631{
1632	struct inodedep *inodedep;
1633	struct dirrem *dirrem;
1634	struct ufsmount *ump;
1635	struct mount *mp;
1636	ino_t inum;
1637
1638	mp = vp->v_mount;
1639	ump = VFSTOUFS(mp);
1640	LOCK_OWNED(ump);
1641	inum = VTOI(vp)->i_number;
1642	for (;;) {
1643top:
1644		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1645			return;
1646		LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1647			/*
1648			 * If another thread is trying to lock this vnode
1649			 * it will fail but we must wait for it to do so
1650			 * before we can proceed.
1651			 */
1652			if (dirrem->dm_state & INPROGRESS) {
1653				wait_worklist(&dirrem->dm_list, "pwrwait");
1654				goto top;
1655			}
1656			if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1657			    (COMPLETE | ONWORKLIST))
1658				break;
1659		}
1660		if (dirrem == NULL)
1661			return;
1662		remove_from_worklist(&dirrem->dm_list);
1663		FREE_LOCK(ump);
1664		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1665			panic("process_removes: suspended filesystem");
1666		handle_workitem_remove(dirrem, 0);
1667		vn_finished_secondary_write(mp);
1668		ACQUIRE_LOCK(ump);
1669	}
1670}
1671
1672/*
1673 * Process all truncations associated with a vnode if we are running out
1674 * of journal space.  This is called when the vnode lock is already held
1675 * and no other process can clear the truncation.  This function returns
1676 * a value greater than zero if it did any work.
1677 */
1678static void
1679process_truncates(vp)
1680	struct vnode *vp;
1681{
1682	struct inodedep *inodedep;
1683	struct freeblks *freeblks;
1684	struct ufsmount *ump;
1685	struct mount *mp;
1686	ino_t inum;
1687	int cgwait;
1688
1689	mp = vp->v_mount;
1690	ump = VFSTOUFS(mp);
1691	LOCK_OWNED(ump);
1692	inum = VTOI(vp)->i_number;
1693	for (;;) {
1694		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1695			return;
1696		cgwait = 0;
1697		TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1698			/* Journal entries not yet written.  */
1699			if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1700				jwait(&LIST_FIRST(
1701				    &freeblks->fb_jblkdephd)->jb_list,
1702				    MNT_WAIT);
1703				break;
1704			}
1705			/* Another thread is executing this item. */
1706			if (freeblks->fb_state & INPROGRESS) {
1707				wait_worklist(&freeblks->fb_list, "ptrwait");
1708				break;
1709			}
1710			/* Freeblks is waiting on a inode write. */
1711			if ((freeblks->fb_state & COMPLETE) == 0) {
1712				FREE_LOCK(ump);
1713				ffs_update(vp, 1);
1714				ACQUIRE_LOCK(ump);
1715				break;
1716			}
1717			if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1718			    (ALLCOMPLETE | ONWORKLIST)) {
1719				remove_from_worklist(&freeblks->fb_list);
1720				freeblks->fb_state |= INPROGRESS;
1721				FREE_LOCK(ump);
1722				if (vn_start_secondary_write(NULL, &mp,
1723				    V_NOWAIT))
1724					panic("process_truncates: "
1725					    "suspended filesystem");
1726				handle_workitem_freeblocks(freeblks, 0);
1727				vn_finished_secondary_write(mp);
1728				ACQUIRE_LOCK(ump);
1729				break;
1730			}
1731			if (freeblks->fb_cgwait)
1732				cgwait++;
1733		}
1734		if (cgwait) {
1735			FREE_LOCK(ump);
1736			sync_cgs(mp, MNT_WAIT);
1737			ffs_sync_snap(mp, MNT_WAIT);
1738			ACQUIRE_LOCK(ump);
1739			continue;
1740		}
1741		if (freeblks == NULL)
1742			break;
1743	}
1744	return;
1745}
1746
1747/*
1748 * Process one item on the worklist.
1749 */
1750static int
1751process_worklist_item(mp, target, flags)
1752	struct mount *mp;
1753	int target;
1754	int flags;
1755{
1756	struct worklist sentinel;
1757	struct worklist *wk;
1758	struct ufsmount *ump;
1759	int matchcnt;
1760	int error;
1761
1762	KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1763	/*
1764	 * If we are being called because of a process doing a
1765	 * copy-on-write, then it is not safe to write as we may
1766	 * recurse into the copy-on-write routine.
1767	 */
1768	if (curthread->td_pflags & TDP_COWINPROGRESS)
1769		return (-1);
1770	PHOLD(curproc);	/* Don't let the stack go away. */
1771	ump = VFSTOUFS(mp);
1772	LOCK_OWNED(ump);
1773	matchcnt = 0;
1774	sentinel.wk_mp = NULL;
1775	sentinel.wk_type = D_SENTINEL;
1776	LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1777	for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1778	    wk = LIST_NEXT(&sentinel, wk_list)) {
1779		if (wk->wk_type == D_SENTINEL) {
1780			LIST_REMOVE(&sentinel, wk_list);
1781			LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1782			continue;
1783		}
1784		if (wk->wk_state & INPROGRESS)
1785			panic("process_worklist_item: %p already in progress.",
1786			    wk);
1787		wk->wk_state |= INPROGRESS;
1788		remove_from_worklist(wk);
1789		FREE_LOCK(ump);
1790		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1791			panic("process_worklist_item: suspended filesystem");
1792		switch (wk->wk_type) {
1793		case D_DIRREM:
1794			/* removal of a directory entry */
1795			error = handle_workitem_remove(WK_DIRREM(wk), flags);
1796			break;
1797
1798		case D_FREEBLKS:
1799			/* releasing blocks and/or fragments from a file */
1800			error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1801			    flags);
1802			break;
1803
1804		case D_FREEFRAG:
1805			/* releasing a fragment when replaced as a file grows */
1806			handle_workitem_freefrag(WK_FREEFRAG(wk));
1807			error = 0;
1808			break;
1809
1810		case D_FREEFILE:
1811			/* releasing an inode when its link count drops to 0 */
1812			handle_workitem_freefile(WK_FREEFILE(wk));
1813			error = 0;
1814			break;
1815
1816		default:
1817			panic("%s_process_worklist: Unknown type %s",
1818			    "softdep", TYPENAME(wk->wk_type));
1819			/* NOTREACHED */
1820		}
1821		vn_finished_secondary_write(mp);
1822		ACQUIRE_LOCK(ump);
1823		if (error == 0) {
1824			if (++matchcnt == target)
1825				break;
1826			continue;
1827		}
1828		/*
1829		 * We have to retry the worklist item later.  Wake up any
1830		 * waiters who may be able to complete it immediately and
1831		 * add the item back to the head so we don't try to execute
1832		 * it again.
1833		 */
1834		wk->wk_state &= ~INPROGRESS;
1835		wake_worklist(wk);
1836		add_to_worklist(wk, WK_HEAD);
1837	}
1838	LIST_REMOVE(&sentinel, wk_list);
1839	/* Sentinal could've become the tail from remove_from_worklist. */
1840	if (ump->softdep_worklist_tail == &sentinel)
1841		ump->softdep_worklist_tail =
1842		    (struct worklist *)sentinel.wk_list.le_prev;
1843	PRELE(curproc);
1844	return (matchcnt);
1845}
1846
1847/*
1848 * Move dependencies from one buffer to another.
1849 */
1850int
1851softdep_move_dependencies(oldbp, newbp)
1852	struct buf *oldbp;
1853	struct buf *newbp;
1854{
1855	struct worklist *wk, *wktail;
1856	struct ufsmount *ump;
1857	int dirty;
1858
1859	if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL)
1860		return (0);
1861	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
1862	    ("softdep_move_dependencies called on non-softdep filesystem"));
1863	dirty = 0;
1864	wktail = NULL;
1865	ump = VFSTOUFS(wk->wk_mp);
1866	ACQUIRE_LOCK(ump);
1867	while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
1868		LIST_REMOVE(wk, wk_list);
1869		if (wk->wk_type == D_BMSAFEMAP &&
1870		    bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
1871			dirty = 1;
1872		if (wktail == 0)
1873			LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
1874		else
1875			LIST_INSERT_AFTER(wktail, wk, wk_list);
1876		wktail = wk;
1877	}
1878	FREE_LOCK(ump);
1879
1880	return (dirty);
1881}
1882
1883/*
1884 * Purge the work list of all items associated with a particular mount point.
1885 */
1886int
1887softdep_flushworklist(oldmnt, countp, td)
1888	struct mount *oldmnt;
1889	int *countp;
1890	struct thread *td;
1891{
1892	struct vnode *devvp;
1893	struct ufsmount *ump;
1894	int count, error;
1895
1896	/*
1897	 * Alternately flush the block device associated with the mount
1898	 * point and process any dependencies that the flushing
1899	 * creates. We continue until no more worklist dependencies
1900	 * are found.
1901	 */
1902	*countp = 0;
1903	error = 0;
1904	ump = VFSTOUFS(oldmnt);
1905	devvp = ump->um_devvp;
1906	while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
1907		*countp += count;
1908		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1909		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1910		VOP_UNLOCK(devvp, 0);
1911		if (error != 0)
1912			break;
1913	}
1914	return (error);
1915}
1916
1917#define	SU_WAITIDLE_RETRIES	20
1918static int
1919softdep_waitidle(struct mount *mp, int flags __unused)
1920{
1921	struct ufsmount *ump;
1922	struct vnode *devvp;
1923	struct thread *td;
1924	int error, i;
1925
1926	ump = VFSTOUFS(mp);
1927	devvp = ump->um_devvp;
1928	td = curthread;
1929	error = 0;
1930	ACQUIRE_LOCK(ump);
1931	for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) {
1932		ump->softdep_req = 1;
1933		KASSERT((flags & FORCECLOSE) == 0 ||
1934		    ump->softdep_on_worklist == 0,
1935		    ("softdep_waitidle: work added after flush"));
1936		msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP,
1937		    "softdeps", 10 * hz);
1938		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1939		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1940		VOP_UNLOCK(devvp, 0);
1941		if (error != 0)
1942			break;
1943		ACQUIRE_LOCK(ump);
1944	}
1945	ump->softdep_req = 0;
1946	if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) {
1947		error = EBUSY;
1948		printf("softdep_waitidle: Failed to flush worklist for %p\n",
1949		    mp);
1950	}
1951	FREE_LOCK(ump);
1952	return (error);
1953}
1954
1955/*
1956 * Flush all vnodes and worklist items associated with a specified mount point.
1957 */
1958int
1959softdep_flushfiles(oldmnt, flags, td)
1960	struct mount *oldmnt;
1961	int flags;
1962	struct thread *td;
1963{
1964#ifdef QUOTA
1965	struct ufsmount *ump;
1966	int i;
1967#endif
1968	int error, early, depcount, loopcnt, retry_flush_count, retry;
1969	int morework;
1970
1971	KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0,
1972	    ("softdep_flushfiles called on non-softdep filesystem"));
1973	loopcnt = 10;
1974	retry_flush_count = 3;
1975retry_flush:
1976	error = 0;
1977
1978	/*
1979	 * Alternately flush the vnodes associated with the mount
1980	 * point and process any dependencies that the flushing
1981	 * creates. In theory, this loop can happen at most twice,
1982	 * but we give it a few extra just to be sure.
1983	 */
1984	for (; loopcnt > 0; loopcnt--) {
1985		/*
1986		 * Do another flush in case any vnodes were brought in
1987		 * as part of the cleanup operations.
1988		 */
1989		early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
1990		    MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
1991		if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
1992			break;
1993		if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
1994		    depcount == 0)
1995			break;
1996	}
1997	/*
1998	 * If we are unmounting then it is an error to fail. If we
1999	 * are simply trying to downgrade to read-only, then filesystem
2000	 * activity can keep us busy forever, so we just fail with EBUSY.
2001	 */
2002	if (loopcnt == 0) {
2003		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
2004			panic("softdep_flushfiles: looping");
2005		error = EBUSY;
2006	}
2007	if (!error)
2008		error = softdep_waitidle(oldmnt, flags);
2009	if (!error) {
2010		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
2011			retry = 0;
2012			MNT_ILOCK(oldmnt);
2013			KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0,
2014			    ("softdep_flushfiles: !MNTK_NOINSMNTQ"));
2015			morework = oldmnt->mnt_nvnodelistsize > 0;
2016#ifdef QUOTA
2017			ump = VFSTOUFS(oldmnt);
2018			UFS_LOCK(ump);
2019			for (i = 0; i < MAXQUOTAS; i++) {
2020				if (ump->um_quotas[i] != NULLVP)
2021					morework = 1;
2022			}
2023			UFS_UNLOCK(ump);
2024#endif
2025			if (morework) {
2026				if (--retry_flush_count > 0) {
2027					retry = 1;
2028					loopcnt = 3;
2029				} else
2030					error = EBUSY;
2031			}
2032			MNT_IUNLOCK(oldmnt);
2033			if (retry)
2034				goto retry_flush;
2035		}
2036	}
2037	return (error);
2038}
2039
2040/*
2041 * Structure hashing.
2042 *
2043 * There are four types of structures that can be looked up:
2044 *	1) pagedep structures identified by mount point, inode number,
2045 *	   and logical block.
2046 *	2) inodedep structures identified by mount point and inode number.
2047 *	3) newblk structures identified by mount point and
2048 *	   physical block number.
2049 *	4) bmsafemap structures identified by mount point and
2050 *	   cylinder group number.
2051 *
2052 * The "pagedep" and "inodedep" dependency structures are hashed
2053 * separately from the file blocks and inodes to which they correspond.
2054 * This separation helps when the in-memory copy of an inode or
2055 * file block must be replaced. It also obviates the need to access
2056 * an inode or file page when simply updating (or de-allocating)
2057 * dependency structures. Lookup of newblk structures is needed to
2058 * find newly allocated blocks when trying to associate them with
2059 * their allocdirect or allocindir structure.
2060 *
2061 * The lookup routines optionally create and hash a new instance when
2062 * an existing entry is not found. The bmsafemap lookup routine always
2063 * allocates a new structure if an existing one is not found.
2064 */
2065#define DEPALLOC	0x0001	/* allocate structure if lookup fails */
2066
2067/*
2068 * Structures and routines associated with pagedep caching.
2069 */
2070#define	PAGEDEP_HASH(ump, inum, lbn) \
2071	(&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size])
2072
2073static int
2074pagedep_find(pagedephd, ino, lbn, pagedeppp)
2075	struct pagedep_hashhead *pagedephd;
2076	ino_t ino;
2077	ufs_lbn_t lbn;
2078	struct pagedep **pagedeppp;
2079{
2080	struct pagedep *pagedep;
2081
2082	LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2083		if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) {
2084			*pagedeppp = pagedep;
2085			return (1);
2086		}
2087	}
2088	*pagedeppp = NULL;
2089	return (0);
2090}
2091/*
2092 * Look up a pagedep. Return 1 if found, 0 otherwise.
2093 * If not found, allocate if DEPALLOC flag is passed.
2094 * Found or allocated entry is returned in pagedeppp.
2095 * This routine must be called with splbio interrupts blocked.
2096 */
2097static int
2098pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp)
2099	struct mount *mp;
2100	struct buf *bp;
2101	ino_t ino;
2102	ufs_lbn_t lbn;
2103	int flags;
2104	struct pagedep **pagedeppp;
2105{
2106	struct pagedep *pagedep;
2107	struct pagedep_hashhead *pagedephd;
2108	struct worklist *wk;
2109	struct ufsmount *ump;
2110	int ret;
2111	int i;
2112
2113	ump = VFSTOUFS(mp);
2114	LOCK_OWNED(ump);
2115	if (bp) {
2116		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2117			if (wk->wk_type == D_PAGEDEP) {
2118				*pagedeppp = WK_PAGEDEP(wk);
2119				return (1);
2120			}
2121		}
2122	}
2123	pagedephd = PAGEDEP_HASH(ump, ino, lbn);
2124	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2125	if (ret) {
2126		if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2127			WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2128		return (1);
2129	}
2130	if ((flags & DEPALLOC) == 0)
2131		return (0);
2132	FREE_LOCK(ump);
2133	pagedep = malloc(sizeof(struct pagedep),
2134	    M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2135	workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2136	ACQUIRE_LOCK(ump);
2137	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2138	if (*pagedeppp) {
2139		/*
2140		 * This should never happen since we only create pagedeps
2141		 * with the vnode lock held.  Could be an assert.
2142		 */
2143		WORKITEM_FREE(pagedep, D_PAGEDEP);
2144		return (ret);
2145	}
2146	pagedep->pd_ino = ino;
2147	pagedep->pd_lbn = lbn;
2148	LIST_INIT(&pagedep->pd_dirremhd);
2149	LIST_INIT(&pagedep->pd_pendinghd);
2150	for (i = 0; i < DAHASHSZ; i++)
2151		LIST_INIT(&pagedep->pd_diraddhd[i]);
2152	LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2153	WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2154	*pagedeppp = pagedep;
2155	return (0);
2156}
2157
2158/*
2159 * Structures and routines associated with inodedep caching.
2160 */
2161#define	INODEDEP_HASH(ump, inum) \
2162      (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size])
2163
2164static int
2165inodedep_find(inodedephd, inum, inodedeppp)
2166	struct inodedep_hashhead *inodedephd;
2167	ino_t inum;
2168	struct inodedep **inodedeppp;
2169{
2170	struct inodedep *inodedep;
2171
2172	LIST_FOREACH(inodedep, inodedephd, id_hash)
2173		if (inum == inodedep->id_ino)
2174			break;
2175	if (inodedep) {
2176		*inodedeppp = inodedep;
2177		return (1);
2178	}
2179	*inodedeppp = NULL;
2180
2181	return (0);
2182}
2183/*
2184 * Look up an inodedep. Return 1 if found, 0 if not found.
2185 * If not found, allocate if DEPALLOC flag is passed.
2186 * Found or allocated entry is returned in inodedeppp.
2187 * This routine must be called with splbio interrupts blocked.
2188 */
2189static int
2190inodedep_lookup(mp, inum, flags, inodedeppp)
2191	struct mount *mp;
2192	ino_t inum;
2193	int flags;
2194	struct inodedep **inodedeppp;
2195{
2196	struct inodedep *inodedep;
2197	struct inodedep_hashhead *inodedephd;
2198	struct ufsmount *ump;
2199	struct fs *fs;
2200
2201	ump = VFSTOUFS(mp);
2202	LOCK_OWNED(ump);
2203	fs = ump->um_fs;
2204	inodedephd = INODEDEP_HASH(ump, inum);
2205
2206	if (inodedep_find(inodedephd, inum, inodedeppp))
2207		return (1);
2208	if ((flags & DEPALLOC) == 0)
2209		return (0);
2210	/*
2211	 * If the system is over its limit and our filesystem is
2212	 * responsible for more than our share of that usage and
2213	 * we are not in a rush, request some inodedep cleanup.
2214	 */
2215	if (softdep_excess_inodes(ump))
2216		schedule_cleanup(mp);
2217	else
2218		FREE_LOCK(ump);
2219	inodedep = malloc(sizeof(struct inodedep),
2220		M_INODEDEP, M_SOFTDEP_FLAGS);
2221	workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2222	ACQUIRE_LOCK(ump);
2223	if (inodedep_find(inodedephd, inum, inodedeppp)) {
2224		WORKITEM_FREE(inodedep, D_INODEDEP);
2225		return (1);
2226	}
2227	inodedep->id_fs = fs;
2228	inodedep->id_ino = inum;
2229	inodedep->id_state = ALLCOMPLETE;
2230	inodedep->id_nlinkdelta = 0;
2231	inodedep->id_savedino1 = NULL;
2232	inodedep->id_savedsize = -1;
2233	inodedep->id_savedextsize = -1;
2234	inodedep->id_savednlink = -1;
2235	inodedep->id_bmsafemap = NULL;
2236	inodedep->id_mkdiradd = NULL;
2237	LIST_INIT(&inodedep->id_dirremhd);
2238	LIST_INIT(&inodedep->id_pendinghd);
2239	LIST_INIT(&inodedep->id_inowait);
2240	LIST_INIT(&inodedep->id_bufwait);
2241	TAILQ_INIT(&inodedep->id_inoreflst);
2242	TAILQ_INIT(&inodedep->id_inoupdt);
2243	TAILQ_INIT(&inodedep->id_newinoupdt);
2244	TAILQ_INIT(&inodedep->id_extupdt);
2245	TAILQ_INIT(&inodedep->id_newextupdt);
2246	TAILQ_INIT(&inodedep->id_freeblklst);
2247	LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2248	*inodedeppp = inodedep;
2249	return (0);
2250}
2251
2252/*
2253 * Structures and routines associated with newblk caching.
2254 */
2255#define	NEWBLK_HASH(ump, inum) \
2256	(&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size])
2257
2258static int
2259newblk_find(newblkhd, newblkno, flags, newblkpp)
2260	struct newblk_hashhead *newblkhd;
2261	ufs2_daddr_t newblkno;
2262	int flags;
2263	struct newblk **newblkpp;
2264{
2265	struct newblk *newblk;
2266
2267	LIST_FOREACH(newblk, newblkhd, nb_hash) {
2268		if (newblkno != newblk->nb_newblkno)
2269			continue;
2270		/*
2271		 * If we're creating a new dependency don't match those that
2272		 * have already been converted to allocdirects.  This is for
2273		 * a frag extend.
2274		 */
2275		if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2276			continue;
2277		break;
2278	}
2279	if (newblk) {
2280		*newblkpp = newblk;
2281		return (1);
2282	}
2283	*newblkpp = NULL;
2284	return (0);
2285}
2286
2287/*
2288 * Look up a newblk. Return 1 if found, 0 if not found.
2289 * If not found, allocate if DEPALLOC flag is passed.
2290 * Found or allocated entry is returned in newblkpp.
2291 */
2292static int
2293newblk_lookup(mp, newblkno, flags, newblkpp)
2294	struct mount *mp;
2295	ufs2_daddr_t newblkno;
2296	int flags;
2297	struct newblk **newblkpp;
2298{
2299	struct newblk *newblk;
2300	struct newblk_hashhead *newblkhd;
2301	struct ufsmount *ump;
2302
2303	ump = VFSTOUFS(mp);
2304	LOCK_OWNED(ump);
2305	newblkhd = NEWBLK_HASH(ump, newblkno);
2306	if (newblk_find(newblkhd, newblkno, flags, newblkpp))
2307		return (1);
2308	if ((flags & DEPALLOC) == 0)
2309		return (0);
2310	FREE_LOCK(ump);
2311	newblk = malloc(sizeof(union allblk), M_NEWBLK,
2312	    M_SOFTDEP_FLAGS | M_ZERO);
2313	workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2314	ACQUIRE_LOCK(ump);
2315	if (newblk_find(newblkhd, newblkno, flags, newblkpp)) {
2316		WORKITEM_FREE(newblk, D_NEWBLK);
2317		return (1);
2318	}
2319	newblk->nb_freefrag = NULL;
2320	LIST_INIT(&newblk->nb_indirdeps);
2321	LIST_INIT(&newblk->nb_newdirblk);
2322	LIST_INIT(&newblk->nb_jwork);
2323	newblk->nb_state = ATTACHED;
2324	newblk->nb_newblkno = newblkno;
2325	LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2326	*newblkpp = newblk;
2327	return (0);
2328}
2329
2330/*
2331 * Structures and routines associated with freed indirect block caching.
2332 */
2333#define	INDIR_HASH(ump, blkno) \
2334	(&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size])
2335
2336/*
2337 * Lookup an indirect block in the indir hash table.  The freework is
2338 * removed and potentially freed.  The caller must do a blocking journal
2339 * write before writing to the blkno.
2340 */
2341static int
2342indirblk_lookup(mp, blkno)
2343	struct mount *mp;
2344	ufs2_daddr_t blkno;
2345{
2346	struct freework *freework;
2347	struct indir_hashhead *wkhd;
2348	struct ufsmount *ump;
2349
2350	ump = VFSTOUFS(mp);
2351	wkhd = INDIR_HASH(ump, blkno);
2352	TAILQ_FOREACH(freework, wkhd, fw_next) {
2353		if (freework->fw_blkno != blkno)
2354			continue;
2355		indirblk_remove(freework);
2356		return (1);
2357	}
2358	return (0);
2359}
2360
2361/*
2362 * Insert an indirect block represented by freework into the indirblk
2363 * hash table so that it may prevent the block from being re-used prior
2364 * to the journal being written.
2365 */
2366static void
2367indirblk_insert(freework)
2368	struct freework *freework;
2369{
2370	struct jblocks *jblocks;
2371	struct jseg *jseg;
2372	struct ufsmount *ump;
2373
2374	ump = VFSTOUFS(freework->fw_list.wk_mp);
2375	jblocks = ump->softdep_jblocks;
2376	jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2377	if (jseg == NULL)
2378		return;
2379
2380	LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2381	TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework,
2382	    fw_next);
2383	freework->fw_state &= ~DEPCOMPLETE;
2384}
2385
2386static void
2387indirblk_remove(freework)
2388	struct freework *freework;
2389{
2390	struct ufsmount *ump;
2391
2392	ump = VFSTOUFS(freework->fw_list.wk_mp);
2393	LIST_REMOVE(freework, fw_segs);
2394	TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next);
2395	freework->fw_state |= DEPCOMPLETE;
2396	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2397		WORKITEM_FREE(freework, D_FREEWORK);
2398}
2399
2400/*
2401 * Executed during filesystem system initialization before
2402 * mounting any filesystems.
2403 */
2404void
2405softdep_initialize()
2406{
2407
2408	TAILQ_INIT(&softdepmounts);
2409	max_softdeps = desiredvnodes * 4;
2410
2411	/* initialise bioops hack */
2412	bioops.io_start = softdep_disk_io_initiation;
2413	bioops.io_complete = softdep_disk_write_complete;
2414	bioops.io_deallocate = softdep_deallocate_dependencies;
2415	bioops.io_countdeps = softdep_count_dependencies;
2416	softdep_ast_cleanup = softdep_ast_cleanup_proc;
2417
2418	/* Initialize the callout with an mtx. */
2419	callout_init_mtx(&softdep_callout, &lk, 0);
2420}
2421
2422/*
2423 * Executed after all filesystems have been unmounted during
2424 * filesystem module unload.
2425 */
2426void
2427softdep_uninitialize()
2428{
2429
2430	/* clear bioops hack */
2431	bioops.io_start = NULL;
2432	bioops.io_complete = NULL;
2433	bioops.io_deallocate = NULL;
2434	bioops.io_countdeps = NULL;
2435	softdep_ast_cleanup = NULL;
2436
2437	callout_drain(&softdep_callout);
2438}
2439
2440/*
2441 * Called at mount time to notify the dependency code that a
2442 * filesystem wishes to use it.
2443 */
2444int
2445softdep_mount(devvp, mp, fs, cred)
2446	struct vnode *devvp;
2447	struct mount *mp;
2448	struct fs *fs;
2449	struct ucred *cred;
2450{
2451	struct csum_total cstotal;
2452	struct mount_softdeps *sdp;
2453	struct ufsmount *ump;
2454	struct cg *cgp;
2455	struct buf *bp;
2456	int i, error, cyl;
2457
2458	sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA,
2459	    M_WAITOK | M_ZERO);
2460	MNT_ILOCK(mp);
2461	mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2462	if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2463		mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2464			MNTK_SOFTDEP | MNTK_NOASYNC;
2465	}
2466	ump = VFSTOUFS(mp);
2467	ump->um_softdep = sdp;
2468	MNT_IUNLOCK(mp);
2469	rw_init(LOCK_PTR(ump), "Per-Filesystem Softdep Lock");
2470	sdp->sd_ump = ump;
2471	LIST_INIT(&ump->softdep_workitem_pending);
2472	LIST_INIT(&ump->softdep_journal_pending);
2473	TAILQ_INIT(&ump->softdep_unlinked);
2474	LIST_INIT(&ump->softdep_dirtycg);
2475	ump->softdep_worklist_tail = NULL;
2476	ump->softdep_on_worklist = 0;
2477	ump->softdep_deps = 0;
2478	LIST_INIT(&ump->softdep_mkdirlisthd);
2479	ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP,
2480	    &ump->pagedep_hash_size);
2481	ump->pagedep_nextclean = 0;
2482	ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP,
2483	    &ump->inodedep_hash_size);
2484	ump->inodedep_nextclean = 0;
2485	ump->newblk_hashtbl = hashinit(max_softdeps / 2,  M_NEWBLK,
2486	    &ump->newblk_hash_size);
2487	ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP,
2488	    &ump->bmsafemap_hash_size);
2489	i = 1 << (ffs(desiredvnodes / 10) - 1);
2490	ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead),
2491	    M_FREEWORK, M_WAITOK);
2492	ump->indir_hash_size = i - 1;
2493	for (i = 0; i <= ump->indir_hash_size; i++)
2494		TAILQ_INIT(&ump->indir_hashtbl[i]);
2495	ACQUIRE_GBLLOCK(&lk);
2496	TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
2497	FREE_GBLLOCK(&lk);
2498	if ((fs->fs_flags & FS_SUJ) &&
2499	    (error = journal_mount(mp, fs, cred)) != 0) {
2500		printf("Failed to start journal: %d\n", error);
2501		softdep_unmount(mp);
2502		return (error);
2503	}
2504	/*
2505	 * Start our flushing thread in the bufdaemon process.
2506	 */
2507	ACQUIRE_LOCK(ump);
2508	ump->softdep_flags |= FLUSH_STARTING;
2509	FREE_LOCK(ump);
2510	kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc,
2511	    &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker",
2512	    mp->mnt_stat.f_mntonname);
2513	ACQUIRE_LOCK(ump);
2514	while ((ump->softdep_flags & FLUSH_STARTING) != 0) {
2515		msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart",
2516		    hz / 2);
2517	}
2518	FREE_LOCK(ump);
2519	/*
2520	 * When doing soft updates, the counters in the
2521	 * superblock may have gotten out of sync. Recomputation
2522	 * can take a long time and can be deferred for background
2523	 * fsck.  However, the old behavior of scanning the cylinder
2524	 * groups and recalculating them at mount time is available
2525	 * by setting vfs.ffs.compute_summary_at_mount to one.
2526	 */
2527	if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2528		return (0);
2529	bzero(&cstotal, sizeof cstotal);
2530	for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2531		if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2532		    fs->fs_cgsize, cred, &bp)) != 0) {
2533			brelse(bp);
2534			softdep_unmount(mp);
2535			return (error);
2536		}
2537		cgp = (struct cg *)bp->b_data;
2538		cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2539		cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2540		cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2541		cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2542		fs->fs_cs(fs, cyl) = cgp->cg_cs;
2543		brelse(bp);
2544	}
2545#ifdef DEBUG
2546	if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2547		printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2548#endif
2549	bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2550	return (0);
2551}
2552
2553void
2554softdep_unmount(mp)
2555	struct mount *mp;
2556{
2557	struct ufsmount *ump;
2558#ifdef INVARIANTS
2559	int i;
2560#endif
2561
2562	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
2563	    ("softdep_unmount called on non-softdep filesystem"));
2564	ump = VFSTOUFS(mp);
2565	MNT_ILOCK(mp);
2566	mp->mnt_flag &= ~MNT_SOFTDEP;
2567	if (MOUNTEDSUJ(mp) == 0) {
2568		MNT_IUNLOCK(mp);
2569	} else {
2570		mp->mnt_flag &= ~MNT_SUJ;
2571		MNT_IUNLOCK(mp);
2572		journal_unmount(ump);
2573	}
2574	/*
2575	 * Shut down our flushing thread. Check for NULL is if
2576	 * softdep_mount errors out before the thread has been created.
2577	 */
2578	if (ump->softdep_flushtd != NULL) {
2579		ACQUIRE_LOCK(ump);
2580		ump->softdep_flags |= FLUSH_EXIT;
2581		wakeup(&ump->softdep_flushtd);
2582		msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP,
2583		    "sdwait", 0);
2584		KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0,
2585		    ("Thread shutdown failed"));
2586	}
2587	/*
2588	 * Free up our resources.
2589	 */
2590	ACQUIRE_GBLLOCK(&lk);
2591	TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next);
2592	FREE_GBLLOCK(&lk);
2593	rw_destroy(LOCK_PTR(ump));
2594	hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size);
2595	hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size);
2596	hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size);
2597	hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP,
2598	    ump->bmsafemap_hash_size);
2599	free(ump->indir_hashtbl, M_FREEWORK);
2600#ifdef INVARIANTS
2601	for (i = 0; i <= D_LAST; i++)
2602		KASSERT(ump->softdep_curdeps[i] == 0,
2603		    ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt,
2604		    TYPENAME(i), ump->softdep_curdeps[i]));
2605#endif
2606	free(ump->um_softdep, M_MOUNTDATA);
2607}
2608
2609static struct jblocks *
2610jblocks_create(void)
2611{
2612	struct jblocks *jblocks;
2613
2614	jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2615	TAILQ_INIT(&jblocks->jb_segs);
2616	jblocks->jb_avail = 10;
2617	jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2618	    M_JBLOCKS, M_WAITOK | M_ZERO);
2619
2620	return (jblocks);
2621}
2622
2623static ufs2_daddr_t
2624jblocks_alloc(jblocks, bytes, actual)
2625	struct jblocks *jblocks;
2626	int bytes;
2627	int *actual;
2628{
2629	ufs2_daddr_t daddr;
2630	struct jextent *jext;
2631	int freecnt;
2632	int blocks;
2633
2634	blocks = bytes / DEV_BSIZE;
2635	jext = &jblocks->jb_extent[jblocks->jb_head];
2636	freecnt = jext->je_blocks - jblocks->jb_off;
2637	if (freecnt == 0) {
2638		jblocks->jb_off = 0;
2639		if (++jblocks->jb_head > jblocks->jb_used)
2640			jblocks->jb_head = 0;
2641		jext = &jblocks->jb_extent[jblocks->jb_head];
2642		freecnt = jext->je_blocks;
2643	}
2644	if (freecnt > blocks)
2645		freecnt = blocks;
2646	*actual = freecnt * DEV_BSIZE;
2647	daddr = jext->je_daddr + jblocks->jb_off;
2648	jblocks->jb_off += freecnt;
2649	jblocks->jb_free -= freecnt;
2650
2651	return (daddr);
2652}
2653
2654static void
2655jblocks_free(jblocks, mp, bytes)
2656	struct jblocks *jblocks;
2657	struct mount *mp;
2658	int bytes;
2659{
2660
2661	LOCK_OWNED(VFSTOUFS(mp));
2662	jblocks->jb_free += bytes / DEV_BSIZE;
2663	if (jblocks->jb_suspended)
2664		worklist_speedup(mp);
2665	wakeup(jblocks);
2666}
2667
2668static void
2669jblocks_destroy(jblocks)
2670	struct jblocks *jblocks;
2671{
2672
2673	if (jblocks->jb_extent)
2674		free(jblocks->jb_extent, M_JBLOCKS);
2675	free(jblocks, M_JBLOCKS);
2676}
2677
2678static void
2679jblocks_add(jblocks, daddr, blocks)
2680	struct jblocks *jblocks;
2681	ufs2_daddr_t daddr;
2682	int blocks;
2683{
2684	struct jextent *jext;
2685
2686	jblocks->jb_blocks += blocks;
2687	jblocks->jb_free += blocks;
2688	jext = &jblocks->jb_extent[jblocks->jb_used];
2689	/* Adding the first block. */
2690	if (jext->je_daddr == 0) {
2691		jext->je_daddr = daddr;
2692		jext->je_blocks = blocks;
2693		return;
2694	}
2695	/* Extending the last extent. */
2696	if (jext->je_daddr + jext->je_blocks == daddr) {
2697		jext->je_blocks += blocks;
2698		return;
2699	}
2700	/* Adding a new extent. */
2701	if (++jblocks->jb_used == jblocks->jb_avail) {
2702		jblocks->jb_avail *= 2;
2703		jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2704		    M_JBLOCKS, M_WAITOK | M_ZERO);
2705		memcpy(jext, jblocks->jb_extent,
2706		    sizeof(struct jextent) * jblocks->jb_used);
2707		free(jblocks->jb_extent, M_JBLOCKS);
2708		jblocks->jb_extent = jext;
2709	}
2710	jext = &jblocks->jb_extent[jblocks->jb_used];
2711	jext->je_daddr = daddr;
2712	jext->je_blocks = blocks;
2713	return;
2714}
2715
2716int
2717softdep_journal_lookup(mp, vpp)
2718	struct mount *mp;
2719	struct vnode **vpp;
2720{
2721	struct componentname cnp;
2722	struct vnode *dvp;
2723	ino_t sujournal;
2724	int error;
2725
2726	error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp);
2727	if (error)
2728		return (error);
2729	bzero(&cnp, sizeof(cnp));
2730	cnp.cn_nameiop = LOOKUP;
2731	cnp.cn_flags = ISLASTCN;
2732	cnp.cn_thread = curthread;
2733	cnp.cn_cred = curthread->td_ucred;
2734	cnp.cn_pnbuf = SUJ_FILE;
2735	cnp.cn_nameptr = SUJ_FILE;
2736	cnp.cn_namelen = strlen(SUJ_FILE);
2737	error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2738	vput(dvp);
2739	if (error != 0)
2740		return (error);
2741	error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2742	return (error);
2743}
2744
2745/*
2746 * Open and verify the journal file.
2747 */
2748static int
2749journal_mount(mp, fs, cred)
2750	struct mount *mp;
2751	struct fs *fs;
2752	struct ucred *cred;
2753{
2754	struct jblocks *jblocks;
2755	struct ufsmount *ump;
2756	struct vnode *vp;
2757	struct inode *ip;
2758	ufs2_daddr_t blkno;
2759	int bcount;
2760	int error;
2761	int i;
2762
2763	ump = VFSTOUFS(mp);
2764	ump->softdep_journal_tail = NULL;
2765	ump->softdep_on_journal = 0;
2766	ump->softdep_accdeps = 0;
2767	ump->softdep_req = 0;
2768	ump->softdep_jblocks = NULL;
2769	error = softdep_journal_lookup(mp, &vp);
2770	if (error != 0) {
2771		printf("Failed to find journal.  Use tunefs to create one\n");
2772		return (error);
2773	}
2774	ip = VTOI(vp);
2775	if (ip->i_size < SUJ_MIN) {
2776		error = ENOSPC;
2777		goto out;
2778	}
2779	bcount = lblkno(fs, ip->i_size);	/* Only use whole blocks. */
2780	jblocks = jblocks_create();
2781	for (i = 0; i < bcount; i++) {
2782		error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2783		if (error)
2784			break;
2785		jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2786	}
2787	if (error) {
2788		jblocks_destroy(jblocks);
2789		goto out;
2790	}
2791	jblocks->jb_low = jblocks->jb_free / 3;	/* Reserve 33%. */
2792	jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2793	ump->softdep_jblocks = jblocks;
2794out:
2795	if (error == 0) {
2796		MNT_ILOCK(mp);
2797		mp->mnt_flag |= MNT_SUJ;
2798		mp->mnt_flag &= ~MNT_SOFTDEP;
2799		MNT_IUNLOCK(mp);
2800		/*
2801		 * Only validate the journal contents if the
2802		 * filesystem is clean, otherwise we write the logs
2803		 * but they'll never be used.  If the filesystem was
2804		 * still dirty when we mounted it the journal is
2805		 * invalid and a new journal can only be valid if it
2806		 * starts from a clean mount.
2807		 */
2808		if (fs->fs_clean) {
2809			DIP_SET(ip, i_modrev, fs->fs_mtime);
2810			ip->i_flags |= IN_MODIFIED;
2811			ffs_update(vp, 1);
2812		}
2813	}
2814	vput(vp);
2815	return (error);
2816}
2817
2818static void
2819journal_unmount(ump)
2820	struct ufsmount *ump;
2821{
2822
2823	if (ump->softdep_jblocks)
2824		jblocks_destroy(ump->softdep_jblocks);
2825	ump->softdep_jblocks = NULL;
2826}
2827
2828/*
2829 * Called when a journal record is ready to be written.  Space is allocated
2830 * and the journal entry is created when the journal is flushed to stable
2831 * store.
2832 */
2833static void
2834add_to_journal(wk)
2835	struct worklist *wk;
2836{
2837	struct ufsmount *ump;
2838
2839	ump = VFSTOUFS(wk->wk_mp);
2840	LOCK_OWNED(ump);
2841	if (wk->wk_state & ONWORKLIST)
2842		panic("add_to_journal: %s(0x%X) already on list",
2843		    TYPENAME(wk->wk_type), wk->wk_state);
2844	wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2845	if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2846		ump->softdep_jblocks->jb_age = ticks;
2847		LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2848	} else
2849		LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2850	ump->softdep_journal_tail = wk;
2851	ump->softdep_on_journal += 1;
2852}
2853
2854/*
2855 * Remove an arbitrary item for the journal worklist maintain the tail
2856 * pointer.  This happens when a new operation obviates the need to
2857 * journal an old operation.
2858 */
2859static void
2860remove_from_journal(wk)
2861	struct worklist *wk;
2862{
2863	struct ufsmount *ump;
2864
2865	ump = VFSTOUFS(wk->wk_mp);
2866	LOCK_OWNED(ump);
2867#ifdef SUJ_DEBUG
2868	{
2869		struct worklist *wkn;
2870
2871		LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
2872			if (wkn == wk)
2873				break;
2874		if (wkn == NULL)
2875			panic("remove_from_journal: %p is not in journal", wk);
2876	}
2877#endif
2878	/*
2879	 * We emulate a TAILQ to save space in most structures which do not
2880	 * require TAILQ semantics.  Here we must update the tail position
2881	 * when removing the tail which is not the final entry. This works
2882	 * only if the worklist linkage are at the beginning of the structure.
2883	 */
2884	if (ump->softdep_journal_tail == wk)
2885		ump->softdep_journal_tail =
2886		    (struct worklist *)wk->wk_list.le_prev;
2887
2888	WORKLIST_REMOVE(wk);
2889	ump->softdep_on_journal -= 1;
2890}
2891
2892/*
2893 * Check for journal space as well as dependency limits so the prelink
2894 * code can throttle both journaled and non-journaled filesystems.
2895 * Threshold is 0 for low and 1 for min.
2896 */
2897static int
2898journal_space(ump, thresh)
2899	struct ufsmount *ump;
2900	int thresh;
2901{
2902	struct jblocks *jblocks;
2903	int limit, avail;
2904
2905	jblocks = ump->softdep_jblocks;
2906	if (jblocks == NULL)
2907		return (1);
2908	/*
2909	 * We use a tighter restriction here to prevent request_cleanup()
2910	 * running in threads from running into locks we currently hold.
2911	 * We have to be over the limit and our filesystem has to be
2912	 * responsible for more than our share of that usage.
2913	 */
2914	limit = (max_softdeps / 10) * 9;
2915	if (dep_current[D_INODEDEP] > limit &&
2916	    ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads)
2917		return (0);
2918	if (thresh)
2919		thresh = jblocks->jb_min;
2920	else
2921		thresh = jblocks->jb_low;
2922	avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
2923	avail = jblocks->jb_free - avail;
2924
2925	return (avail > thresh);
2926}
2927
2928static void
2929journal_suspend(ump)
2930	struct ufsmount *ump;
2931{
2932	struct jblocks *jblocks;
2933	struct mount *mp;
2934
2935	mp = UFSTOVFS(ump);
2936	jblocks = ump->softdep_jblocks;
2937	MNT_ILOCK(mp);
2938	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
2939		stat_journal_min++;
2940		mp->mnt_kern_flag |= MNTK_SUSPEND;
2941		mp->mnt_susp_owner = ump->softdep_flushtd;
2942	}
2943	jblocks->jb_suspended = 1;
2944	MNT_IUNLOCK(mp);
2945}
2946
2947static int
2948journal_unsuspend(struct ufsmount *ump)
2949{
2950	struct jblocks *jblocks;
2951	struct mount *mp;
2952
2953	mp = UFSTOVFS(ump);
2954	jblocks = ump->softdep_jblocks;
2955
2956	if (jblocks != NULL && jblocks->jb_suspended &&
2957	    journal_space(ump, jblocks->jb_min)) {
2958		jblocks->jb_suspended = 0;
2959		FREE_LOCK(ump);
2960		mp->mnt_susp_owner = curthread;
2961		vfs_write_resume(mp, 0);
2962		ACQUIRE_LOCK(ump);
2963		return (1);
2964	}
2965	return (0);
2966}
2967
2968/*
2969 * Called before any allocation function to be certain that there is
2970 * sufficient space in the journal prior to creating any new records.
2971 * Since in the case of block allocation we may have multiple locked
2972 * buffers at the time of the actual allocation we can not block
2973 * when the journal records are created.  Doing so would create a deadlock
2974 * if any of these buffers needed to be flushed to reclaim space.  Instead
2975 * we require a sufficiently large amount of available space such that
2976 * each thread in the system could have passed this allocation check and
2977 * still have sufficient free space.  With 20% of a minimum journal size
2978 * of 1MB we have 6553 records available.
2979 */
2980int
2981softdep_prealloc(vp, waitok)
2982	struct vnode *vp;
2983	int waitok;
2984{
2985	struct ufsmount *ump;
2986
2987	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
2988	    ("softdep_prealloc called on non-softdep filesystem"));
2989	/*
2990	 * Nothing to do if we are not running journaled soft updates.
2991	 * If we currently hold the snapshot lock, we must avoid handling
2992	 * other resources that could cause deadlock.
2993	 */
2994	if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)))
2995		return (0);
2996	ump = VFSTOUFS(vp->v_mount);
2997	ACQUIRE_LOCK(ump);
2998	if (journal_space(ump, 0)) {
2999		FREE_LOCK(ump);
3000		return (0);
3001	}
3002	stat_journal_low++;
3003	FREE_LOCK(ump);
3004	if (waitok == MNT_NOWAIT)
3005		return (ENOSPC);
3006	/*
3007	 * Attempt to sync this vnode once to flush any journal
3008	 * work attached to it.
3009	 */
3010	if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
3011		ffs_syncvnode(vp, waitok, 0);
3012	ACQUIRE_LOCK(ump);
3013	process_removes(vp);
3014	process_truncates(vp);
3015	if (journal_space(ump, 0) == 0) {
3016		softdep_speedup(ump);
3017		if (journal_space(ump, 1) == 0)
3018			journal_suspend(ump);
3019	}
3020	FREE_LOCK(ump);
3021
3022	return (0);
3023}
3024
3025/*
3026 * Before adjusting a link count on a vnode verify that we have sufficient
3027 * journal space.  If not, process operations that depend on the currently
3028 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
3029 * and softdep flush threads can not acquire these locks to reclaim space.
3030 */
3031static void
3032softdep_prelink(dvp, vp)
3033	struct vnode *dvp;
3034	struct vnode *vp;
3035{
3036	struct ufsmount *ump;
3037
3038	ump = VFSTOUFS(dvp->v_mount);
3039	LOCK_OWNED(ump);
3040	/*
3041	 * Nothing to do if we have sufficient journal space.
3042	 * If we currently hold the snapshot lock, we must avoid
3043	 * handling other resources that could cause deadlock.
3044	 */
3045	if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp))))
3046		return;
3047	stat_journal_low++;
3048	FREE_LOCK(ump);
3049	if (vp)
3050		ffs_syncvnode(vp, MNT_NOWAIT, 0);
3051	ffs_syncvnode(dvp, MNT_WAIT, 0);
3052	ACQUIRE_LOCK(ump);
3053	/* Process vp before dvp as it may create .. removes. */
3054	if (vp) {
3055		process_removes(vp);
3056		process_truncates(vp);
3057	}
3058	process_removes(dvp);
3059	process_truncates(dvp);
3060	softdep_speedup(ump);
3061	process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
3062	if (journal_space(ump, 0) == 0) {
3063		softdep_speedup(ump);
3064		if (journal_space(ump, 1) == 0)
3065			journal_suspend(ump);
3066	}
3067}
3068
3069static void
3070jseg_write(ump, jseg, data)
3071	struct ufsmount *ump;
3072	struct jseg *jseg;
3073	uint8_t *data;
3074{
3075	struct jsegrec *rec;
3076
3077	rec = (struct jsegrec *)data;
3078	rec->jsr_seq = jseg->js_seq;
3079	rec->jsr_oldest = jseg->js_oldseq;
3080	rec->jsr_cnt = jseg->js_cnt;
3081	rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
3082	rec->jsr_crc = 0;
3083	rec->jsr_time = ump->um_fs->fs_mtime;
3084}
3085
3086static inline void
3087inoref_write(inoref, jseg, rec)
3088	struct inoref *inoref;
3089	struct jseg *jseg;
3090	struct jrefrec *rec;
3091{
3092
3093	inoref->if_jsegdep->jd_seg = jseg;
3094	rec->jr_ino = inoref->if_ino;
3095	rec->jr_parent = inoref->if_parent;
3096	rec->jr_nlink = inoref->if_nlink;
3097	rec->jr_mode = inoref->if_mode;
3098	rec->jr_diroff = inoref->if_diroff;
3099}
3100
3101static void
3102jaddref_write(jaddref, jseg, data)
3103	struct jaddref *jaddref;
3104	struct jseg *jseg;
3105	uint8_t *data;
3106{
3107	struct jrefrec *rec;
3108
3109	rec = (struct jrefrec *)data;
3110	rec->jr_op = JOP_ADDREF;
3111	inoref_write(&jaddref->ja_ref, jseg, rec);
3112}
3113
3114static void
3115jremref_write(jremref, jseg, data)
3116	struct jremref *jremref;
3117	struct jseg *jseg;
3118	uint8_t *data;
3119{
3120	struct jrefrec *rec;
3121
3122	rec = (struct jrefrec *)data;
3123	rec->jr_op = JOP_REMREF;
3124	inoref_write(&jremref->jr_ref, jseg, rec);
3125}
3126
3127static void
3128jmvref_write(jmvref, jseg, data)
3129	struct jmvref *jmvref;
3130	struct jseg *jseg;
3131	uint8_t *data;
3132{
3133	struct jmvrec *rec;
3134
3135	rec = (struct jmvrec *)data;
3136	rec->jm_op = JOP_MVREF;
3137	rec->jm_ino = jmvref->jm_ino;
3138	rec->jm_parent = jmvref->jm_parent;
3139	rec->jm_oldoff = jmvref->jm_oldoff;
3140	rec->jm_newoff = jmvref->jm_newoff;
3141}
3142
3143static void
3144jnewblk_write(jnewblk, jseg, data)
3145	struct jnewblk *jnewblk;
3146	struct jseg *jseg;
3147	uint8_t *data;
3148{
3149	struct jblkrec *rec;
3150
3151	jnewblk->jn_jsegdep->jd_seg = jseg;
3152	rec = (struct jblkrec *)data;
3153	rec->jb_op = JOP_NEWBLK;
3154	rec->jb_ino = jnewblk->jn_ino;
3155	rec->jb_blkno = jnewblk->jn_blkno;
3156	rec->jb_lbn = jnewblk->jn_lbn;
3157	rec->jb_frags = jnewblk->jn_frags;
3158	rec->jb_oldfrags = jnewblk->jn_oldfrags;
3159}
3160
3161static void
3162jfreeblk_write(jfreeblk, jseg, data)
3163	struct jfreeblk *jfreeblk;
3164	struct jseg *jseg;
3165	uint8_t *data;
3166{
3167	struct jblkrec *rec;
3168
3169	jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3170	rec = (struct jblkrec *)data;
3171	rec->jb_op = JOP_FREEBLK;
3172	rec->jb_ino = jfreeblk->jf_ino;
3173	rec->jb_blkno = jfreeblk->jf_blkno;
3174	rec->jb_lbn = jfreeblk->jf_lbn;
3175	rec->jb_frags = jfreeblk->jf_frags;
3176	rec->jb_oldfrags = 0;
3177}
3178
3179static void
3180jfreefrag_write(jfreefrag, jseg, data)
3181	struct jfreefrag *jfreefrag;
3182	struct jseg *jseg;
3183	uint8_t *data;
3184{
3185	struct jblkrec *rec;
3186
3187	jfreefrag->fr_jsegdep->jd_seg = jseg;
3188	rec = (struct jblkrec *)data;
3189	rec->jb_op = JOP_FREEBLK;
3190	rec->jb_ino = jfreefrag->fr_ino;
3191	rec->jb_blkno = jfreefrag->fr_blkno;
3192	rec->jb_lbn = jfreefrag->fr_lbn;
3193	rec->jb_frags = jfreefrag->fr_frags;
3194	rec->jb_oldfrags = 0;
3195}
3196
3197static void
3198jtrunc_write(jtrunc, jseg, data)
3199	struct jtrunc *jtrunc;
3200	struct jseg *jseg;
3201	uint8_t *data;
3202{
3203	struct jtrncrec *rec;
3204
3205	jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3206	rec = (struct jtrncrec *)data;
3207	rec->jt_op = JOP_TRUNC;
3208	rec->jt_ino = jtrunc->jt_ino;
3209	rec->jt_size = jtrunc->jt_size;
3210	rec->jt_extsize = jtrunc->jt_extsize;
3211}
3212
3213static void
3214jfsync_write(jfsync, jseg, data)
3215	struct jfsync *jfsync;
3216	struct jseg *jseg;
3217	uint8_t *data;
3218{
3219	struct jtrncrec *rec;
3220
3221	rec = (struct jtrncrec *)data;
3222	rec->jt_op = JOP_SYNC;
3223	rec->jt_ino = jfsync->jfs_ino;
3224	rec->jt_size = jfsync->jfs_size;
3225	rec->jt_extsize = jfsync->jfs_extsize;
3226}
3227
3228static void
3229softdep_flushjournal(mp)
3230	struct mount *mp;
3231{
3232	struct jblocks *jblocks;
3233	struct ufsmount *ump;
3234
3235	if (MOUNTEDSUJ(mp) == 0)
3236		return;
3237	ump = VFSTOUFS(mp);
3238	jblocks = ump->softdep_jblocks;
3239	ACQUIRE_LOCK(ump);
3240	while (ump->softdep_on_journal) {
3241		jblocks->jb_needseg = 1;
3242		softdep_process_journal(mp, NULL, MNT_WAIT);
3243	}
3244	FREE_LOCK(ump);
3245}
3246
3247static void softdep_synchronize_completed(struct bio *);
3248static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3249
3250static void
3251softdep_synchronize_completed(bp)
3252        struct bio *bp;
3253{
3254	struct jseg *oldest;
3255	struct jseg *jseg;
3256	struct ufsmount *ump;
3257
3258	/*
3259	 * caller1 marks the last segment written before we issued the
3260	 * synchronize cache.
3261	 */
3262	jseg = bp->bio_caller1;
3263	if (jseg == NULL) {
3264		g_destroy_bio(bp);
3265		return;
3266	}
3267	ump = VFSTOUFS(jseg->js_list.wk_mp);
3268	ACQUIRE_LOCK(ump);
3269	oldest = NULL;
3270	/*
3271	 * Mark all the journal entries waiting on the synchronize cache
3272	 * as completed so they may continue on.
3273	 */
3274	while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3275		jseg->js_state |= COMPLETE;
3276		oldest = jseg;
3277		jseg = TAILQ_PREV(jseg, jseglst, js_next);
3278	}
3279	/*
3280	 * Restart deferred journal entry processing from the oldest
3281	 * completed jseg.
3282	 */
3283	if (oldest)
3284		complete_jsegs(oldest);
3285
3286	FREE_LOCK(ump);
3287	g_destroy_bio(bp);
3288}
3289
3290/*
3291 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3292 * barriers.  The journal must be written prior to any blocks that depend
3293 * on it and the journal can not be released until the blocks have be
3294 * written.  This code handles both barriers simultaneously.
3295 */
3296static void
3297softdep_synchronize(bp, ump, caller1)
3298	struct bio *bp;
3299	struct ufsmount *ump;
3300	void *caller1;
3301{
3302
3303	bp->bio_cmd = BIO_FLUSH;
3304	bp->bio_flags |= BIO_ORDERED;
3305	bp->bio_data = NULL;
3306	bp->bio_offset = ump->um_cp->provider->mediasize;
3307	bp->bio_length = 0;
3308	bp->bio_done = softdep_synchronize_completed;
3309	bp->bio_caller1 = caller1;
3310	g_io_request(bp,
3311	    (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private);
3312}
3313
3314/*
3315 * Flush some journal records to disk.
3316 */
3317static void
3318softdep_process_journal(mp, needwk, flags)
3319	struct mount *mp;
3320	struct worklist *needwk;
3321	int flags;
3322{
3323	struct jblocks *jblocks;
3324	struct ufsmount *ump;
3325	struct worklist *wk;
3326	struct jseg *jseg;
3327	struct buf *bp;
3328	struct bio *bio;
3329	uint8_t *data;
3330	struct fs *fs;
3331	int shouldflush;
3332	int segwritten;
3333	int jrecmin;	/* Minimum records per block. */
3334	int jrecmax;	/* Maximum records per block. */
3335	int size;
3336	int cnt;
3337	int off;
3338	int devbsize;
3339
3340	if (MOUNTEDSUJ(mp) == 0)
3341		return;
3342	shouldflush = softdep_flushcache;
3343	bio = NULL;
3344	jseg = NULL;
3345	ump = VFSTOUFS(mp);
3346	LOCK_OWNED(ump);
3347	fs = ump->um_fs;
3348	jblocks = ump->softdep_jblocks;
3349	devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3350	/*
3351	 * We write anywhere between a disk block and fs block.  The upper
3352	 * bound is picked to prevent buffer cache fragmentation and limit
3353	 * processing time per I/O.
3354	 */
3355	jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3356	jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3357	segwritten = 0;
3358	for (;;) {
3359		cnt = ump->softdep_on_journal;
3360		/*
3361		 * Criteria for writing a segment:
3362		 * 1) We have a full block.
3363		 * 2) We're called from jwait() and haven't found the
3364		 *    journal item yet.
3365		 * 3) Always write if needseg is set.
3366		 * 4) If we are called from process_worklist and have
3367		 *    not yet written anything we write a partial block
3368		 *    to enforce a 1 second maximum latency on journal
3369		 *    entries.
3370		 */
3371		if (cnt < (jrecmax - 1) && needwk == NULL &&
3372		    jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3373			break;
3374		cnt++;
3375		/*
3376		 * Verify some free journal space.  softdep_prealloc() should
3377		 * guarantee that we don't run out so this is indicative of
3378		 * a problem with the flow control.  Try to recover
3379		 * gracefully in any event.
3380		 */
3381		while (jblocks->jb_free == 0) {
3382			if (flags != MNT_WAIT)
3383				break;
3384			printf("softdep: Out of journal space!\n");
3385			softdep_speedup(ump);
3386			msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz);
3387		}
3388		FREE_LOCK(ump);
3389		jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3390		workitem_alloc(&jseg->js_list, D_JSEG, mp);
3391		LIST_INIT(&jseg->js_entries);
3392		LIST_INIT(&jseg->js_indirs);
3393		jseg->js_state = ATTACHED;
3394		if (shouldflush == 0)
3395			jseg->js_state |= COMPLETE;
3396		else if (bio == NULL)
3397			bio = g_alloc_bio();
3398		jseg->js_jblocks = jblocks;
3399		bp = geteblk(fs->fs_bsize, 0);
3400		ACQUIRE_LOCK(ump);
3401		/*
3402		 * If there was a race while we were allocating the block
3403		 * and jseg the entry we care about was likely written.
3404		 * We bail out in both the WAIT and NOWAIT case and assume
3405		 * the caller will loop if the entry it cares about is
3406		 * not written.
3407		 */
3408		cnt = ump->softdep_on_journal;
3409		if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3410			bp->b_flags |= B_INVAL | B_NOCACHE;
3411			WORKITEM_FREE(jseg, D_JSEG);
3412			FREE_LOCK(ump);
3413			brelse(bp);
3414			ACQUIRE_LOCK(ump);
3415			break;
3416		}
3417		/*
3418		 * Calculate the disk block size required for the available
3419		 * records rounded to the min size.
3420		 */
3421		if (cnt == 0)
3422			size = devbsize;
3423		else if (cnt < jrecmax)
3424			size = howmany(cnt, jrecmin) * devbsize;
3425		else
3426			size = fs->fs_bsize;
3427		/*
3428		 * Allocate a disk block for this journal data and account
3429		 * for truncation of the requested size if enough contiguous
3430		 * space was not available.
3431		 */
3432		bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3433		bp->b_lblkno = bp->b_blkno;
3434		bp->b_offset = bp->b_blkno * DEV_BSIZE;
3435		bp->b_bcount = size;
3436		bp->b_flags &= ~B_INVAL;
3437		bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3438		/*
3439		 * Initialize our jseg with cnt records.  Assign the next
3440		 * sequence number to it and link it in-order.
3441		 */
3442		cnt = MIN(cnt, (size / devbsize) * jrecmin);
3443		jseg->js_buf = bp;
3444		jseg->js_cnt = cnt;
3445		jseg->js_refs = cnt + 1;	/* Self ref. */
3446		jseg->js_size = size;
3447		jseg->js_seq = jblocks->jb_nextseq++;
3448		if (jblocks->jb_oldestseg == NULL)
3449			jblocks->jb_oldestseg = jseg;
3450		jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3451		TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3452		if (jblocks->jb_writeseg == NULL)
3453			jblocks->jb_writeseg = jseg;
3454		/*
3455		 * Start filling in records from the pending list.
3456		 */
3457		data = bp->b_data;
3458		off = 0;
3459
3460		/*
3461		 * Always put a header on the first block.
3462		 * XXX As with below, there might not be a chance to get
3463		 * into the loop.  Ensure that something valid is written.
3464		 */
3465		jseg_write(ump, jseg, data);
3466		off += JREC_SIZE;
3467		data = bp->b_data + off;
3468
3469		/*
3470		 * XXX Something is wrong here.  There's no work to do,
3471		 * but we need to perform and I/O and allow it to complete
3472		 * anyways.
3473		 */
3474		if (LIST_EMPTY(&ump->softdep_journal_pending))
3475			stat_emptyjblocks++;
3476
3477		while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3478		    != NULL) {
3479			if (cnt == 0)
3480				break;
3481			/* Place a segment header on every device block. */
3482			if ((off % devbsize) == 0) {
3483				jseg_write(ump, jseg, data);
3484				off += JREC_SIZE;
3485				data = bp->b_data + off;
3486			}
3487			if (wk == needwk)
3488				needwk = NULL;
3489			remove_from_journal(wk);
3490			wk->wk_state |= INPROGRESS;
3491			WORKLIST_INSERT(&jseg->js_entries, wk);
3492			switch (wk->wk_type) {
3493			case D_JADDREF:
3494				jaddref_write(WK_JADDREF(wk), jseg, data);
3495				break;
3496			case D_JREMREF:
3497				jremref_write(WK_JREMREF(wk), jseg, data);
3498				break;
3499			case D_JMVREF:
3500				jmvref_write(WK_JMVREF(wk), jseg, data);
3501				break;
3502			case D_JNEWBLK:
3503				jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3504				break;
3505			case D_JFREEBLK:
3506				jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3507				break;
3508			case D_JFREEFRAG:
3509				jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3510				break;
3511			case D_JTRUNC:
3512				jtrunc_write(WK_JTRUNC(wk), jseg, data);
3513				break;
3514			case D_JFSYNC:
3515				jfsync_write(WK_JFSYNC(wk), jseg, data);
3516				break;
3517			default:
3518				panic("process_journal: Unknown type %s",
3519				    TYPENAME(wk->wk_type));
3520				/* NOTREACHED */
3521			}
3522			off += JREC_SIZE;
3523			data = bp->b_data + off;
3524			cnt--;
3525		}
3526
3527		/* Clear any remaining space so we don't leak kernel data */
3528		if (size > off)
3529			bzero(data, size - off);
3530
3531		/*
3532		 * Write this one buffer and continue.
3533		 */
3534		segwritten = 1;
3535		jblocks->jb_needseg = 0;
3536		WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3537		FREE_LOCK(ump);
3538		pbgetvp(ump->um_devvp, bp);
3539		/*
3540		 * We only do the blocking wait once we find the journal
3541		 * entry we're looking for.
3542		 */
3543		if (needwk == NULL && flags == MNT_WAIT)
3544			bwrite(bp);
3545		else
3546			bawrite(bp);
3547		ACQUIRE_LOCK(ump);
3548	}
3549	/*
3550	 * If we wrote a segment issue a synchronize cache so the journal
3551	 * is reflected on disk before the data is written.  Since reclaiming
3552	 * journal space also requires writing a journal record this
3553	 * process also enforces a barrier before reclamation.
3554	 */
3555	if (segwritten && shouldflush) {
3556		softdep_synchronize(bio, ump,
3557		    TAILQ_LAST(&jblocks->jb_segs, jseglst));
3558	} else if (bio)
3559		g_destroy_bio(bio);
3560	/*
3561	 * If we've suspended the filesystem because we ran out of journal
3562	 * space either try to sync it here to make some progress or
3563	 * unsuspend it if we already have.
3564	 */
3565	if (flags == 0 && jblocks->jb_suspended) {
3566		if (journal_unsuspend(ump))
3567			return;
3568		FREE_LOCK(ump);
3569		VFS_SYNC(mp, MNT_NOWAIT);
3570		ffs_sbupdate(ump, MNT_WAIT, 0);
3571		ACQUIRE_LOCK(ump);
3572	}
3573}
3574
3575/*
3576 * Complete a jseg, allowing all dependencies awaiting journal writes
3577 * to proceed.  Each journal dependency also attaches a jsegdep to dependent
3578 * structures so that the journal segment can be freed to reclaim space.
3579 */
3580static void
3581complete_jseg(jseg)
3582	struct jseg *jseg;
3583{
3584	struct worklist *wk;
3585	struct jmvref *jmvref;
3586	int waiting;
3587#ifdef INVARIANTS
3588	int i = 0;
3589#endif
3590
3591	while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3592		WORKLIST_REMOVE(wk);
3593		waiting = wk->wk_state & IOWAITING;
3594		wk->wk_state &= ~(INPROGRESS | IOWAITING);
3595		wk->wk_state |= COMPLETE;
3596		KASSERT(i++ < jseg->js_cnt,
3597		    ("handle_written_jseg: overflow %d >= %d",
3598		    i - 1, jseg->js_cnt));
3599		switch (wk->wk_type) {
3600		case D_JADDREF:
3601			handle_written_jaddref(WK_JADDREF(wk));
3602			break;
3603		case D_JREMREF:
3604			handle_written_jremref(WK_JREMREF(wk));
3605			break;
3606		case D_JMVREF:
3607			rele_jseg(jseg);	/* No jsegdep. */
3608			jmvref = WK_JMVREF(wk);
3609			LIST_REMOVE(jmvref, jm_deps);
3610			if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3611				free_pagedep(jmvref->jm_pagedep);
3612			WORKITEM_FREE(jmvref, D_JMVREF);
3613			break;
3614		case D_JNEWBLK:
3615			handle_written_jnewblk(WK_JNEWBLK(wk));
3616			break;
3617		case D_JFREEBLK:
3618			handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3619			break;
3620		case D_JTRUNC:
3621			handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3622			break;
3623		case D_JFSYNC:
3624			rele_jseg(jseg);	/* No jsegdep. */
3625			WORKITEM_FREE(wk, D_JFSYNC);
3626			break;
3627		case D_JFREEFRAG:
3628			handle_written_jfreefrag(WK_JFREEFRAG(wk));
3629			break;
3630		default:
3631			panic("handle_written_jseg: Unknown type %s",
3632			    TYPENAME(wk->wk_type));
3633			/* NOTREACHED */
3634		}
3635		if (waiting)
3636			wakeup(wk);
3637	}
3638	/* Release the self reference so the structure may be freed. */
3639	rele_jseg(jseg);
3640}
3641
3642/*
3643 * Determine which jsegs are ready for completion processing.  Waits for
3644 * synchronize cache to complete as well as forcing in-order completion
3645 * of journal entries.
3646 */
3647static void
3648complete_jsegs(jseg)
3649	struct jseg *jseg;
3650{
3651	struct jblocks *jblocks;
3652	struct jseg *jsegn;
3653
3654	jblocks = jseg->js_jblocks;
3655	/*
3656	 * Don't allow out of order completions.  If this isn't the first
3657	 * block wait for it to write before we're done.
3658	 */
3659	if (jseg != jblocks->jb_writeseg)
3660		return;
3661	/* Iterate through available jsegs processing their entries. */
3662	while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3663		jblocks->jb_oldestwrseq = jseg->js_oldseq;
3664		jsegn = TAILQ_NEXT(jseg, js_next);
3665		complete_jseg(jseg);
3666		jseg = jsegn;
3667	}
3668	jblocks->jb_writeseg = jseg;
3669	/*
3670	 * Attempt to free jsegs now that oldestwrseq may have advanced.
3671	 */
3672	free_jsegs(jblocks);
3673}
3674
3675/*
3676 * Mark a jseg as DEPCOMPLETE and throw away the buffer.  Attempt to handle
3677 * the final completions.
3678 */
3679static void
3680handle_written_jseg(jseg, bp)
3681	struct jseg *jseg;
3682	struct buf *bp;
3683{
3684
3685	if (jseg->js_refs == 0)
3686		panic("handle_written_jseg: No self-reference on %p", jseg);
3687	jseg->js_state |= DEPCOMPLETE;
3688	/*
3689	 * We'll never need this buffer again, set flags so it will be
3690	 * discarded.
3691	 */
3692	bp->b_flags |= B_INVAL | B_NOCACHE;
3693	pbrelvp(bp);
3694	complete_jsegs(jseg);
3695}
3696
3697static inline struct jsegdep *
3698inoref_jseg(inoref)
3699	struct inoref *inoref;
3700{
3701	struct jsegdep *jsegdep;
3702
3703	jsegdep = inoref->if_jsegdep;
3704	inoref->if_jsegdep = NULL;
3705
3706	return (jsegdep);
3707}
3708
3709/*
3710 * Called once a jremref has made it to stable store.  The jremref is marked
3711 * complete and we attempt to free it.  Any pagedeps writes sleeping waiting
3712 * for the jremref to complete will be awoken by free_jremref.
3713 */
3714static void
3715handle_written_jremref(jremref)
3716	struct jremref *jremref;
3717{
3718	struct inodedep *inodedep;
3719	struct jsegdep *jsegdep;
3720	struct dirrem *dirrem;
3721
3722	/* Grab the jsegdep. */
3723	jsegdep = inoref_jseg(&jremref->jr_ref);
3724	/*
3725	 * Remove us from the inoref list.
3726	 */
3727	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
3728	    0, &inodedep) == 0)
3729		panic("handle_written_jremref: Lost inodedep");
3730	TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
3731	/*
3732	 * Complete the dirrem.
3733	 */
3734	dirrem = jremref->jr_dirrem;
3735	jremref->jr_dirrem = NULL;
3736	LIST_REMOVE(jremref, jr_deps);
3737	jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
3738	jwork_insert(&dirrem->dm_jwork, jsegdep);
3739	if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
3740	    (dirrem->dm_state & COMPLETE) != 0)
3741		add_to_worklist(&dirrem->dm_list, 0);
3742	free_jremref(jremref);
3743}
3744
3745/*
3746 * Called once a jaddref has made it to stable store.  The dependency is
3747 * marked complete and any dependent structures are added to the inode
3748 * bufwait list to be completed as soon as it is written.  If a bitmap write
3749 * depends on this entry we move the inode into the inodedephd of the
3750 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
3751 */
3752static void
3753handle_written_jaddref(jaddref)
3754	struct jaddref *jaddref;
3755{
3756	struct jsegdep *jsegdep;
3757	struct inodedep *inodedep;
3758	struct diradd *diradd;
3759	struct mkdir *mkdir;
3760
3761	/* Grab the jsegdep. */
3762	jsegdep = inoref_jseg(&jaddref->ja_ref);
3763	mkdir = NULL;
3764	diradd = NULL;
3765	if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
3766	    0, &inodedep) == 0)
3767		panic("handle_written_jaddref: Lost inodedep.");
3768	if (jaddref->ja_diradd == NULL)
3769		panic("handle_written_jaddref: No dependency");
3770	if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
3771		diradd = jaddref->ja_diradd;
3772		WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
3773	} else if (jaddref->ja_state & MKDIR_PARENT) {
3774		mkdir = jaddref->ja_mkdir;
3775		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
3776	} else if (jaddref->ja_state & MKDIR_BODY)
3777		mkdir = jaddref->ja_mkdir;
3778	else
3779		panic("handle_written_jaddref: Unknown dependency %p",
3780		    jaddref->ja_diradd);
3781	jaddref->ja_diradd = NULL;	/* also clears ja_mkdir */
3782	/*
3783	 * Remove us from the inode list.
3784	 */
3785	TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
3786	/*
3787	 * The mkdir may be waiting on the jaddref to clear before freeing.
3788	 */
3789	if (mkdir) {
3790		KASSERT(mkdir->md_list.wk_type == D_MKDIR,
3791		    ("handle_written_jaddref: Incorrect type for mkdir %s",
3792		    TYPENAME(mkdir->md_list.wk_type)));
3793		mkdir->md_jaddref = NULL;
3794		diradd = mkdir->md_diradd;
3795		mkdir->md_state |= DEPCOMPLETE;
3796		complete_mkdir(mkdir);
3797	}
3798	jwork_insert(&diradd->da_jwork, jsegdep);
3799	if (jaddref->ja_state & NEWBLOCK) {
3800		inodedep->id_state |= ONDEPLIST;
3801		LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
3802		    inodedep, id_deps);
3803	}
3804	free_jaddref(jaddref);
3805}
3806
3807/*
3808 * Called once a jnewblk journal is written.  The allocdirect or allocindir
3809 * is placed in the bmsafemap to await notification of a written bitmap.  If
3810 * the operation was canceled we add the segdep to the appropriate
3811 * dependency to free the journal space once the canceling operation
3812 * completes.
3813 */
3814static void
3815handle_written_jnewblk(jnewblk)
3816	struct jnewblk *jnewblk;
3817{
3818	struct bmsafemap *bmsafemap;
3819	struct freefrag *freefrag;
3820	struct freework *freework;
3821	struct jsegdep *jsegdep;
3822	struct newblk *newblk;
3823
3824	/* Grab the jsegdep. */
3825	jsegdep = jnewblk->jn_jsegdep;
3826	jnewblk->jn_jsegdep = NULL;
3827	if (jnewblk->jn_dep == NULL)
3828		panic("handle_written_jnewblk: No dependency for the segdep.");
3829	switch (jnewblk->jn_dep->wk_type) {
3830	case D_NEWBLK:
3831	case D_ALLOCDIRECT:
3832	case D_ALLOCINDIR:
3833		/*
3834		 * Add the written block to the bmsafemap so it can
3835		 * be notified when the bitmap is on disk.
3836		 */
3837		newblk = WK_NEWBLK(jnewblk->jn_dep);
3838		newblk->nb_jnewblk = NULL;
3839		if ((newblk->nb_state & GOINGAWAY) == 0) {
3840			bmsafemap = newblk->nb_bmsafemap;
3841			newblk->nb_state |= ONDEPLIST;
3842			LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
3843			    nb_deps);
3844		}
3845		jwork_insert(&newblk->nb_jwork, jsegdep);
3846		break;
3847	case D_FREEFRAG:
3848		/*
3849		 * A newblock being removed by a freefrag when replaced by
3850		 * frag extension.
3851		 */
3852		freefrag = WK_FREEFRAG(jnewblk->jn_dep);
3853		freefrag->ff_jdep = NULL;
3854		jwork_insert(&freefrag->ff_jwork, jsegdep);
3855		break;
3856	case D_FREEWORK:
3857		/*
3858		 * A direct block was removed by truncate.
3859		 */
3860		freework = WK_FREEWORK(jnewblk->jn_dep);
3861		freework->fw_jnewblk = NULL;
3862		jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
3863		break;
3864	default:
3865		panic("handle_written_jnewblk: Unknown type %d.",
3866		    jnewblk->jn_dep->wk_type);
3867	}
3868	jnewblk->jn_dep = NULL;
3869	free_jnewblk(jnewblk);
3870}
3871
3872/*
3873 * Cancel a jfreefrag that won't be needed, probably due to colliding with
3874 * an in-flight allocation that has not yet been committed.  Divorce us
3875 * from the freefrag and mark it DEPCOMPLETE so that it may be added
3876 * to the worklist.
3877 */
3878static void
3879cancel_jfreefrag(jfreefrag)
3880	struct jfreefrag *jfreefrag;
3881{
3882	struct freefrag *freefrag;
3883
3884	if (jfreefrag->fr_jsegdep) {
3885		free_jsegdep(jfreefrag->fr_jsegdep);
3886		jfreefrag->fr_jsegdep = NULL;
3887	}
3888	freefrag = jfreefrag->fr_freefrag;
3889	jfreefrag->fr_freefrag = NULL;
3890	free_jfreefrag(jfreefrag);
3891	freefrag->ff_state |= DEPCOMPLETE;
3892	CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
3893}
3894
3895/*
3896 * Free a jfreefrag when the parent freefrag is rendered obsolete.
3897 */
3898static void
3899free_jfreefrag(jfreefrag)
3900	struct jfreefrag *jfreefrag;
3901{
3902
3903	if (jfreefrag->fr_state & INPROGRESS)
3904		WORKLIST_REMOVE(&jfreefrag->fr_list);
3905	else if (jfreefrag->fr_state & ONWORKLIST)
3906		remove_from_journal(&jfreefrag->fr_list);
3907	if (jfreefrag->fr_freefrag != NULL)
3908		panic("free_jfreefrag:  Still attached to a freefrag.");
3909	WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
3910}
3911
3912/*
3913 * Called when the journal write for a jfreefrag completes.  The parent
3914 * freefrag is added to the worklist if this completes its dependencies.
3915 */
3916static void
3917handle_written_jfreefrag(jfreefrag)
3918	struct jfreefrag *jfreefrag;
3919{
3920	struct jsegdep *jsegdep;
3921	struct freefrag *freefrag;
3922
3923	/* Grab the jsegdep. */
3924	jsegdep = jfreefrag->fr_jsegdep;
3925	jfreefrag->fr_jsegdep = NULL;
3926	freefrag = jfreefrag->fr_freefrag;
3927	if (freefrag == NULL)
3928		panic("handle_written_jfreefrag: No freefrag.");
3929	freefrag->ff_state |= DEPCOMPLETE;
3930	freefrag->ff_jdep = NULL;
3931	jwork_insert(&freefrag->ff_jwork, jsegdep);
3932	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
3933		add_to_worklist(&freefrag->ff_list, 0);
3934	jfreefrag->fr_freefrag = NULL;
3935	free_jfreefrag(jfreefrag);
3936}
3937
3938/*
3939 * Called when the journal write for a jfreeblk completes.  The jfreeblk
3940 * is removed from the freeblks list of pending journal writes and the
3941 * jsegdep is moved to the freeblks jwork to be completed when all blocks
3942 * have been reclaimed.
3943 */
3944static void
3945handle_written_jblkdep(jblkdep)
3946	struct jblkdep *jblkdep;
3947{
3948	struct freeblks *freeblks;
3949	struct jsegdep *jsegdep;
3950
3951	/* Grab the jsegdep. */
3952	jsegdep = jblkdep->jb_jsegdep;
3953	jblkdep->jb_jsegdep = NULL;
3954	freeblks = jblkdep->jb_freeblks;
3955	LIST_REMOVE(jblkdep, jb_deps);
3956	jwork_insert(&freeblks->fb_jwork, jsegdep);
3957	/*
3958	 * If the freeblks is all journaled, we can add it to the worklist.
3959	 */
3960	if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
3961	    (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
3962		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
3963
3964	free_jblkdep(jblkdep);
3965}
3966
3967static struct jsegdep *
3968newjsegdep(struct worklist *wk)
3969{
3970	struct jsegdep *jsegdep;
3971
3972	jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
3973	workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
3974	jsegdep->jd_seg = NULL;
3975
3976	return (jsegdep);
3977}
3978
3979static struct jmvref *
3980newjmvref(dp, ino, oldoff, newoff)
3981	struct inode *dp;
3982	ino_t ino;
3983	off_t oldoff;
3984	off_t newoff;
3985{
3986	struct jmvref *jmvref;
3987
3988	jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
3989	workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump));
3990	jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
3991	jmvref->jm_parent = dp->i_number;
3992	jmvref->jm_ino = ino;
3993	jmvref->jm_oldoff = oldoff;
3994	jmvref->jm_newoff = newoff;
3995
3996	return (jmvref);
3997}
3998
3999/*
4000 * Allocate a new jremref that tracks the removal of ip from dp with the
4001 * directory entry offset of diroff.  Mark the entry as ATTACHED and
4002 * DEPCOMPLETE as we have all the information required for the journal write
4003 * and the directory has already been removed from the buffer.  The caller
4004 * is responsible for linking the jremref into the pagedep and adding it
4005 * to the journal to write.  The MKDIR_PARENT flag is set if we're doing
4006 * a DOTDOT addition so handle_workitem_remove() can properly assign
4007 * the jsegdep when we're done.
4008 */
4009static struct jremref *
4010newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip,
4011    off_t diroff, nlink_t nlink)
4012{
4013	struct jremref *jremref;
4014
4015	jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
4016	workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump));
4017	jremref->jr_state = ATTACHED;
4018	newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
4019	   nlink, ip->i_mode);
4020	jremref->jr_dirrem = dirrem;
4021
4022	return (jremref);
4023}
4024
4025static inline void
4026newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff,
4027    nlink_t nlink, uint16_t mode)
4028{
4029
4030	inoref->if_jsegdep = newjsegdep(&inoref->if_list);
4031	inoref->if_diroff = diroff;
4032	inoref->if_ino = ino;
4033	inoref->if_parent = parent;
4034	inoref->if_nlink = nlink;
4035	inoref->if_mode = mode;
4036}
4037
4038/*
4039 * Allocate a new jaddref to track the addition of ino to dp at diroff.  The
4040 * directory offset may not be known until later.  The caller is responsible
4041 * adding the entry to the journal when this information is available.  nlink
4042 * should be the link count prior to the addition and mode is only required
4043 * to have the correct FMT.
4044 */
4045static struct jaddref *
4046newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink,
4047    uint16_t mode)
4048{
4049	struct jaddref *jaddref;
4050
4051	jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
4052	workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump));
4053	jaddref->ja_state = ATTACHED;
4054	jaddref->ja_mkdir = NULL;
4055	newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
4056
4057	return (jaddref);
4058}
4059
4060/*
4061 * Create a new free dependency for a freework.  The caller is responsible
4062 * for adjusting the reference count when it has the lock held.  The freedep
4063 * will track an outstanding bitmap write that will ultimately clear the
4064 * freework to continue.
4065 */
4066static struct freedep *
4067newfreedep(struct freework *freework)
4068{
4069	struct freedep *freedep;
4070
4071	freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
4072	workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
4073	freedep->fd_freework = freework;
4074
4075	return (freedep);
4076}
4077
4078/*
4079 * Free a freedep structure once the buffer it is linked to is written.  If
4080 * this is the last reference to the freework schedule it for completion.
4081 */
4082static void
4083free_freedep(freedep)
4084	struct freedep *freedep;
4085{
4086	struct freework *freework;
4087
4088	freework = freedep->fd_freework;
4089	freework->fw_freeblks->fb_cgwait--;
4090	if (--freework->fw_ref == 0)
4091		freework_enqueue(freework);
4092	WORKITEM_FREE(freedep, D_FREEDEP);
4093}
4094
4095/*
4096 * Allocate a new freework structure that may be a level in an indirect
4097 * when parent is not NULL or a top level block when it is.  The top level
4098 * freework structures are allocated without the per-filesystem lock held
4099 * and before the freeblks is visible outside of softdep_setup_freeblocks().
4100 */
4101static struct freework *
4102newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal)
4103	struct ufsmount *ump;
4104	struct freeblks *freeblks;
4105	struct freework *parent;
4106	ufs_lbn_t lbn;
4107	ufs2_daddr_t nb;
4108	int frags;
4109	int off;
4110	int journal;
4111{
4112	struct freework *freework;
4113
4114	freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
4115	workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
4116	freework->fw_state = ATTACHED;
4117	freework->fw_jnewblk = NULL;
4118	freework->fw_freeblks = freeblks;
4119	freework->fw_parent = parent;
4120	freework->fw_lbn = lbn;
4121	freework->fw_blkno = nb;
4122	freework->fw_frags = frags;
4123	freework->fw_indir = NULL;
4124	freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR)
4125		? 0 : NINDIR(ump->um_fs) + 1;
4126	freework->fw_start = freework->fw_off = off;
4127	if (journal)
4128		newjfreeblk(freeblks, lbn, nb, frags);
4129	if (parent == NULL) {
4130		ACQUIRE_LOCK(ump);
4131		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
4132		freeblks->fb_ref++;
4133		FREE_LOCK(ump);
4134	}
4135
4136	return (freework);
4137}
4138
4139/*
4140 * Eliminate a jfreeblk for a block that does not need journaling.
4141 */
4142static void
4143cancel_jfreeblk(freeblks, blkno)
4144	struct freeblks *freeblks;
4145	ufs2_daddr_t blkno;
4146{
4147	struct jfreeblk *jfreeblk;
4148	struct jblkdep *jblkdep;
4149
4150	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
4151		if (jblkdep->jb_list.wk_type != D_JFREEBLK)
4152			continue;
4153		jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
4154		if (jfreeblk->jf_blkno == blkno)
4155			break;
4156	}
4157	if (jblkdep == NULL)
4158		return;
4159	CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4160	free_jsegdep(jblkdep->jb_jsegdep);
4161	LIST_REMOVE(jblkdep, jb_deps);
4162	WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4163}
4164
4165/*
4166 * Allocate a new jfreeblk to journal top level block pointer when truncating
4167 * a file.  The caller must add this to the worklist when the per-filesystem
4168 * lock is held.
4169 */
4170static struct jfreeblk *
4171newjfreeblk(freeblks, lbn, blkno, frags)
4172	struct freeblks *freeblks;
4173	ufs_lbn_t lbn;
4174	ufs2_daddr_t blkno;
4175	int frags;
4176{
4177	struct jfreeblk *jfreeblk;
4178
4179	jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4180	workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4181	    freeblks->fb_list.wk_mp);
4182	jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4183	jfreeblk->jf_dep.jb_freeblks = freeblks;
4184	jfreeblk->jf_ino = freeblks->fb_inum;
4185	jfreeblk->jf_lbn = lbn;
4186	jfreeblk->jf_blkno = blkno;
4187	jfreeblk->jf_frags = frags;
4188	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4189
4190	return (jfreeblk);
4191}
4192
4193/*
4194 * The journal is only prepared to handle full-size block numbers, so we
4195 * have to adjust the record to reflect the change to a full-size block.
4196 * For example, suppose we have a block made up of fragments 8-15 and
4197 * want to free its last two fragments. We are given a request that says:
4198 *     FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0
4199 * where frags are the number of fragments to free and oldfrags are the
4200 * number of fragments to keep. To block align it, we have to change it to
4201 * have a valid full-size blkno, so it becomes:
4202 *     FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6
4203 */
4204static void
4205adjust_newfreework(freeblks, frag_offset)
4206	struct freeblks *freeblks;
4207	int frag_offset;
4208{
4209	struct jfreeblk *jfreeblk;
4210
4211	KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL &&
4212	    LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK),
4213	    ("adjust_newfreework: Missing freeblks dependency"));
4214
4215	jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd));
4216	jfreeblk->jf_blkno -= frag_offset;
4217	jfreeblk->jf_frags += frag_offset;
4218}
4219
4220/*
4221 * Allocate a new jtrunc to track a partial truncation.
4222 */
4223static struct jtrunc *
4224newjtrunc(freeblks, size, extsize)
4225	struct freeblks *freeblks;
4226	off_t size;
4227	int extsize;
4228{
4229	struct jtrunc *jtrunc;
4230
4231	jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4232	workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4233	    freeblks->fb_list.wk_mp);
4234	jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4235	jtrunc->jt_dep.jb_freeblks = freeblks;
4236	jtrunc->jt_ino = freeblks->fb_inum;
4237	jtrunc->jt_size = size;
4238	jtrunc->jt_extsize = extsize;
4239	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4240
4241	return (jtrunc);
4242}
4243
4244/*
4245 * If we're canceling a new bitmap we have to search for another ref
4246 * to move into the bmsafemap dep.  This might be better expressed
4247 * with another structure.
4248 */
4249static void
4250move_newblock_dep(jaddref, inodedep)
4251	struct jaddref *jaddref;
4252	struct inodedep *inodedep;
4253{
4254	struct inoref *inoref;
4255	struct jaddref *jaddrefn;
4256
4257	jaddrefn = NULL;
4258	for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4259	    inoref = TAILQ_NEXT(inoref, if_deps)) {
4260		if ((jaddref->ja_state & NEWBLOCK) &&
4261		    inoref->if_list.wk_type == D_JADDREF) {
4262			jaddrefn = (struct jaddref *)inoref;
4263			break;
4264		}
4265	}
4266	if (jaddrefn == NULL)
4267		return;
4268	jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4269	jaddrefn->ja_state |= jaddref->ja_state &
4270	    (ATTACHED | UNDONE | NEWBLOCK);
4271	jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4272	jaddref->ja_state |= ATTACHED;
4273	LIST_REMOVE(jaddref, ja_bmdeps);
4274	LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4275	    ja_bmdeps);
4276}
4277
4278/*
4279 * Cancel a jaddref either before it has been written or while it is being
4280 * written.  This happens when a link is removed before the add reaches
4281 * the disk.  The jaddref dependency is kept linked into the bmsafemap
4282 * and inode to prevent the link count or bitmap from reaching the disk
4283 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4284 * required.
4285 *
4286 * Returns 1 if the canceled addref requires journaling of the remove and
4287 * 0 otherwise.
4288 */
4289static int
4290cancel_jaddref(jaddref, inodedep, wkhd)
4291	struct jaddref *jaddref;
4292	struct inodedep *inodedep;
4293	struct workhead *wkhd;
4294{
4295	struct inoref *inoref;
4296	struct jsegdep *jsegdep;
4297	int needsj;
4298
4299	KASSERT((jaddref->ja_state & COMPLETE) == 0,
4300	    ("cancel_jaddref: Canceling complete jaddref"));
4301	if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4302		needsj = 1;
4303	else
4304		needsj = 0;
4305	if (inodedep == NULL)
4306		if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4307		    0, &inodedep) == 0)
4308			panic("cancel_jaddref: Lost inodedep");
4309	/*
4310	 * We must adjust the nlink of any reference operation that follows
4311	 * us so that it is consistent with the in-memory reference.  This
4312	 * ensures that inode nlink rollbacks always have the correct link.
4313	 */
4314	if (needsj == 0) {
4315		for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4316		    inoref = TAILQ_NEXT(inoref, if_deps)) {
4317			if (inoref->if_state & GOINGAWAY)
4318				break;
4319			inoref->if_nlink--;
4320		}
4321	}
4322	jsegdep = inoref_jseg(&jaddref->ja_ref);
4323	if (jaddref->ja_state & NEWBLOCK)
4324		move_newblock_dep(jaddref, inodedep);
4325	wake_worklist(&jaddref->ja_list);
4326	jaddref->ja_mkdir = NULL;
4327	if (jaddref->ja_state & INPROGRESS) {
4328		jaddref->ja_state &= ~INPROGRESS;
4329		WORKLIST_REMOVE(&jaddref->ja_list);
4330		jwork_insert(wkhd, jsegdep);
4331	} else {
4332		free_jsegdep(jsegdep);
4333		if (jaddref->ja_state & DEPCOMPLETE)
4334			remove_from_journal(&jaddref->ja_list);
4335	}
4336	jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4337	/*
4338	 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4339	 * can arrange for them to be freed with the bitmap.  Otherwise we
4340	 * no longer need this addref attached to the inoreflst and it
4341	 * will incorrectly adjust nlink if we leave it.
4342	 */
4343	if ((jaddref->ja_state & NEWBLOCK) == 0) {
4344		TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4345		    if_deps);
4346		jaddref->ja_state |= COMPLETE;
4347		free_jaddref(jaddref);
4348		return (needsj);
4349	}
4350	/*
4351	 * Leave the head of the list for jsegdeps for fast merging.
4352	 */
4353	if (LIST_FIRST(wkhd) != NULL) {
4354		jaddref->ja_state |= ONWORKLIST;
4355		LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4356	} else
4357		WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4358
4359	return (needsj);
4360}
4361
4362/*
4363 * Attempt to free a jaddref structure when some work completes.  This
4364 * should only succeed once the entry is written and all dependencies have
4365 * been notified.
4366 */
4367static void
4368free_jaddref(jaddref)
4369	struct jaddref *jaddref;
4370{
4371
4372	if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4373		return;
4374	if (jaddref->ja_ref.if_jsegdep)
4375		panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4376		    jaddref, jaddref->ja_state);
4377	if (jaddref->ja_state & NEWBLOCK)
4378		LIST_REMOVE(jaddref, ja_bmdeps);
4379	if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4380		panic("free_jaddref: Bad state %p(0x%X)",
4381		    jaddref, jaddref->ja_state);
4382	if (jaddref->ja_mkdir != NULL)
4383		panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4384	WORKITEM_FREE(jaddref, D_JADDREF);
4385}
4386
4387/*
4388 * Free a jremref structure once it has been written or discarded.
4389 */
4390static void
4391free_jremref(jremref)
4392	struct jremref *jremref;
4393{
4394
4395	if (jremref->jr_ref.if_jsegdep)
4396		free_jsegdep(jremref->jr_ref.if_jsegdep);
4397	if (jremref->jr_state & INPROGRESS)
4398		panic("free_jremref: IO still pending");
4399	WORKITEM_FREE(jremref, D_JREMREF);
4400}
4401
4402/*
4403 * Free a jnewblk structure.
4404 */
4405static void
4406free_jnewblk(jnewblk)
4407	struct jnewblk *jnewblk;
4408{
4409
4410	if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4411		return;
4412	LIST_REMOVE(jnewblk, jn_deps);
4413	if (jnewblk->jn_dep != NULL)
4414		panic("free_jnewblk: Dependency still attached.");
4415	WORKITEM_FREE(jnewblk, D_JNEWBLK);
4416}
4417
4418/*
4419 * Cancel a jnewblk which has been been made redundant by frag extension.
4420 */
4421static void
4422cancel_jnewblk(jnewblk, wkhd)
4423	struct jnewblk *jnewblk;
4424	struct workhead *wkhd;
4425{
4426	struct jsegdep *jsegdep;
4427
4428	CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4429	jsegdep = jnewblk->jn_jsegdep;
4430	if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4431		panic("cancel_jnewblk: Invalid state");
4432	jnewblk->jn_jsegdep  = NULL;
4433	jnewblk->jn_dep = NULL;
4434	jnewblk->jn_state |= GOINGAWAY;
4435	if (jnewblk->jn_state & INPROGRESS) {
4436		jnewblk->jn_state &= ~INPROGRESS;
4437		WORKLIST_REMOVE(&jnewblk->jn_list);
4438		jwork_insert(wkhd, jsegdep);
4439	} else {
4440		free_jsegdep(jsegdep);
4441		remove_from_journal(&jnewblk->jn_list);
4442	}
4443	wake_worklist(&jnewblk->jn_list);
4444	WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4445}
4446
4447static void
4448free_jblkdep(jblkdep)
4449	struct jblkdep *jblkdep;
4450{
4451
4452	if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4453		WORKITEM_FREE(jblkdep, D_JFREEBLK);
4454	else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4455		WORKITEM_FREE(jblkdep, D_JTRUNC);
4456	else
4457		panic("free_jblkdep: Unexpected type %s",
4458		    TYPENAME(jblkdep->jb_list.wk_type));
4459}
4460
4461/*
4462 * Free a single jseg once it is no longer referenced in memory or on
4463 * disk.  Reclaim journal blocks and dependencies waiting for the segment
4464 * to disappear.
4465 */
4466static void
4467free_jseg(jseg, jblocks)
4468	struct jseg *jseg;
4469	struct jblocks *jblocks;
4470{
4471	struct freework *freework;
4472
4473	/*
4474	 * Free freework structures that were lingering to indicate freed
4475	 * indirect blocks that forced journal write ordering on reallocate.
4476	 */
4477	while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4478		indirblk_remove(freework);
4479	if (jblocks->jb_oldestseg == jseg)
4480		jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4481	TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4482	jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4483	KASSERT(LIST_EMPTY(&jseg->js_entries),
4484	    ("free_jseg: Freed jseg has valid entries."));
4485	WORKITEM_FREE(jseg, D_JSEG);
4486}
4487
4488/*
4489 * Free all jsegs that meet the criteria for being reclaimed and update
4490 * oldestseg.
4491 */
4492static void
4493free_jsegs(jblocks)
4494	struct jblocks *jblocks;
4495{
4496	struct jseg *jseg;
4497
4498	/*
4499	 * Free only those jsegs which have none allocated before them to
4500	 * preserve the journal space ordering.
4501	 */
4502	while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4503		/*
4504		 * Only reclaim space when nothing depends on this journal
4505		 * set and another set has written that it is no longer
4506		 * valid.
4507		 */
4508		if (jseg->js_refs != 0) {
4509			jblocks->jb_oldestseg = jseg;
4510			return;
4511		}
4512		if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4513			break;
4514		if (jseg->js_seq > jblocks->jb_oldestwrseq)
4515			break;
4516		/*
4517		 * We can free jsegs that didn't write entries when
4518		 * oldestwrseq == js_seq.
4519		 */
4520		if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4521		    jseg->js_cnt != 0)
4522			break;
4523		free_jseg(jseg, jblocks);
4524	}
4525	/*
4526	 * If we exited the loop above we still must discover the
4527	 * oldest valid segment.
4528	 */
4529	if (jseg)
4530		for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4531		     jseg = TAILQ_NEXT(jseg, js_next))
4532			if (jseg->js_refs != 0)
4533				break;
4534	jblocks->jb_oldestseg = jseg;
4535	/*
4536	 * The journal has no valid records but some jsegs may still be
4537	 * waiting on oldestwrseq to advance.  We force a small record
4538	 * out to permit these lingering records to be reclaimed.
4539	 */
4540	if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4541		jblocks->jb_needseg = 1;
4542}
4543
4544/*
4545 * Release one reference to a jseg and free it if the count reaches 0.  This
4546 * should eventually reclaim journal space as well.
4547 */
4548static void
4549rele_jseg(jseg)
4550	struct jseg *jseg;
4551{
4552
4553	KASSERT(jseg->js_refs > 0,
4554	    ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4555	if (--jseg->js_refs != 0)
4556		return;
4557	free_jsegs(jseg->js_jblocks);
4558}
4559
4560/*
4561 * Release a jsegdep and decrement the jseg count.
4562 */
4563static void
4564free_jsegdep(jsegdep)
4565	struct jsegdep *jsegdep;
4566{
4567
4568	if (jsegdep->jd_seg)
4569		rele_jseg(jsegdep->jd_seg);
4570	WORKITEM_FREE(jsegdep, D_JSEGDEP);
4571}
4572
4573/*
4574 * Wait for a journal item to make it to disk.  Initiate journal processing
4575 * if required.
4576 */
4577static int
4578jwait(wk, waitfor)
4579	struct worklist *wk;
4580	int waitfor;
4581{
4582
4583	LOCK_OWNED(VFSTOUFS(wk->wk_mp));
4584	/*
4585	 * Blocking journal waits cause slow synchronous behavior.  Record
4586	 * stats on the frequency of these blocking operations.
4587	 */
4588	if (waitfor == MNT_WAIT) {
4589		stat_journal_wait++;
4590		switch (wk->wk_type) {
4591		case D_JREMREF:
4592		case D_JMVREF:
4593			stat_jwait_filepage++;
4594			break;
4595		case D_JTRUNC:
4596		case D_JFREEBLK:
4597			stat_jwait_freeblks++;
4598			break;
4599		case D_JNEWBLK:
4600			stat_jwait_newblk++;
4601			break;
4602		case D_JADDREF:
4603			stat_jwait_inode++;
4604			break;
4605		default:
4606			break;
4607		}
4608	}
4609	/*
4610	 * If IO has not started we process the journal.  We can't mark the
4611	 * worklist item as IOWAITING because we drop the lock while
4612	 * processing the journal and the worklist entry may be freed after
4613	 * this point.  The caller may call back in and re-issue the request.
4614	 */
4615	if ((wk->wk_state & INPROGRESS) == 0) {
4616		softdep_process_journal(wk->wk_mp, wk, waitfor);
4617		if (waitfor != MNT_WAIT)
4618			return (EBUSY);
4619		return (0);
4620	}
4621	if (waitfor != MNT_WAIT)
4622		return (EBUSY);
4623	wait_worklist(wk, "jwait");
4624	return (0);
4625}
4626
4627/*
4628 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4629 * appropriate.  This is a convenience function to reduce duplicate code
4630 * for the setup and revert functions below.
4631 */
4632static struct inodedep *
4633inodedep_lookup_ip(ip)
4634	struct inode *ip;
4635{
4636	struct inodedep *inodedep;
4637
4638	KASSERT(ip->i_nlink >= ip->i_effnlink,
4639	    ("inodedep_lookup_ip: bad delta"));
4640	(void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC,
4641	    &inodedep);
4642	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4643	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4644
4645	return (inodedep);
4646}
4647
4648/*
4649 * Called prior to creating a new inode and linking it to a directory.  The
4650 * jaddref structure must already be allocated by softdep_setup_inomapdep
4651 * and it is discovered here so we can initialize the mode and update
4652 * nlinkdelta.
4653 */
4654void
4655softdep_setup_create(dp, ip)
4656	struct inode *dp;
4657	struct inode *ip;
4658{
4659	struct inodedep *inodedep;
4660	struct jaddref *jaddref;
4661	struct vnode *dvp;
4662
4663	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4664	    ("softdep_setup_create called on non-softdep filesystem"));
4665	KASSERT(ip->i_nlink == 1,
4666	    ("softdep_setup_create: Invalid link count."));
4667	dvp = ITOV(dp);
4668	ACQUIRE_LOCK(dp->i_ump);
4669	inodedep = inodedep_lookup_ip(ip);
4670	if (DOINGSUJ(dvp)) {
4671		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4672		    inoreflst);
4673		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4674		    ("softdep_setup_create: No addref structure present."));
4675	}
4676	softdep_prelink(dvp, NULL);
4677	FREE_LOCK(dp->i_ump);
4678}
4679
4680/*
4681 * Create a jaddref structure to track the addition of a DOTDOT link when
4682 * we are reparenting an inode as part of a rename.  This jaddref will be
4683 * found by softdep_setup_directory_change.  Adjusts nlinkdelta for
4684 * non-journaling softdep.
4685 */
4686void
4687softdep_setup_dotdot_link(dp, ip)
4688	struct inode *dp;
4689	struct inode *ip;
4690{
4691	struct inodedep *inodedep;
4692	struct jaddref *jaddref;
4693	struct vnode *dvp;
4694
4695	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4696	    ("softdep_setup_dotdot_link called on non-softdep filesystem"));
4697	dvp = ITOV(dp);
4698	jaddref = NULL;
4699	/*
4700	 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4701	 * is used as a normal link would be.
4702	 */
4703	if (DOINGSUJ(dvp))
4704		jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4705		    dp->i_effnlink - 1, dp->i_mode);
4706	ACQUIRE_LOCK(dp->i_ump);
4707	inodedep = inodedep_lookup_ip(dp);
4708	if (jaddref)
4709		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4710		    if_deps);
4711	softdep_prelink(dvp, ITOV(ip));
4712	FREE_LOCK(dp->i_ump);
4713}
4714
4715/*
4716 * Create a jaddref structure to track a new link to an inode.  The directory
4717 * offset is not known until softdep_setup_directory_add or
4718 * softdep_setup_directory_change.  Adjusts nlinkdelta for non-journaling
4719 * softdep.
4720 */
4721void
4722softdep_setup_link(dp, ip)
4723	struct inode *dp;
4724	struct inode *ip;
4725{
4726	struct inodedep *inodedep;
4727	struct jaddref *jaddref;
4728	struct vnode *dvp;
4729
4730	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4731	    ("softdep_setup_link called on non-softdep filesystem"));
4732	dvp = ITOV(dp);
4733	jaddref = NULL;
4734	if (DOINGSUJ(dvp))
4735		jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4736		    ip->i_mode);
4737	ACQUIRE_LOCK(dp->i_ump);
4738	inodedep = inodedep_lookup_ip(ip);
4739	if (jaddref)
4740		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4741		    if_deps);
4742	softdep_prelink(dvp, ITOV(ip));
4743	FREE_LOCK(dp->i_ump);
4744}
4745
4746/*
4747 * Called to create the jaddref structures to track . and .. references as
4748 * well as lookup and further initialize the incomplete jaddref created
4749 * by softdep_setup_inomapdep when the inode was allocated.  Adjusts
4750 * nlinkdelta for non-journaling softdep.
4751 */
4752void
4753softdep_setup_mkdir(dp, ip)
4754	struct inode *dp;
4755	struct inode *ip;
4756{
4757	struct inodedep *inodedep;
4758	struct jaddref *dotdotaddref;
4759	struct jaddref *dotaddref;
4760	struct jaddref *jaddref;
4761	struct vnode *dvp;
4762
4763	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4764	    ("softdep_setup_mkdir called on non-softdep filesystem"));
4765	dvp = ITOV(dp);
4766	dotaddref = dotdotaddref = NULL;
4767	if (DOINGSUJ(dvp)) {
4768		dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
4769		    ip->i_mode);
4770		dotaddref->ja_state |= MKDIR_BODY;
4771		dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4772		    dp->i_effnlink - 1, dp->i_mode);
4773		dotdotaddref->ja_state |= MKDIR_PARENT;
4774	}
4775	ACQUIRE_LOCK(dp->i_ump);
4776	inodedep = inodedep_lookup_ip(ip);
4777	if (DOINGSUJ(dvp)) {
4778		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4779		    inoreflst);
4780		KASSERT(jaddref != NULL,
4781		    ("softdep_setup_mkdir: No addref structure present."));
4782		KASSERT(jaddref->ja_parent == dp->i_number,
4783		    ("softdep_setup_mkdir: bad parent %ju",
4784		    (uintmax_t)jaddref->ja_parent));
4785		TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
4786		    if_deps);
4787	}
4788	inodedep = inodedep_lookup_ip(dp);
4789	if (DOINGSUJ(dvp))
4790		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
4791		    &dotdotaddref->ja_ref, if_deps);
4792	softdep_prelink(ITOV(dp), NULL);
4793	FREE_LOCK(dp->i_ump);
4794}
4795
4796/*
4797 * Called to track nlinkdelta of the inode and parent directories prior to
4798 * unlinking a directory.
4799 */
4800void
4801softdep_setup_rmdir(dp, ip)
4802	struct inode *dp;
4803	struct inode *ip;
4804{
4805	struct vnode *dvp;
4806
4807	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4808	    ("softdep_setup_rmdir called on non-softdep filesystem"));
4809	dvp = ITOV(dp);
4810	ACQUIRE_LOCK(dp->i_ump);
4811	(void) inodedep_lookup_ip(ip);
4812	(void) inodedep_lookup_ip(dp);
4813	softdep_prelink(dvp, ITOV(ip));
4814	FREE_LOCK(dp->i_ump);
4815}
4816
4817/*
4818 * Called to track nlinkdelta of the inode and parent directories prior to
4819 * unlink.
4820 */
4821void
4822softdep_setup_unlink(dp, ip)
4823	struct inode *dp;
4824	struct inode *ip;
4825{
4826	struct vnode *dvp;
4827
4828	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4829	    ("softdep_setup_unlink called on non-softdep filesystem"));
4830	dvp = ITOV(dp);
4831	ACQUIRE_LOCK(dp->i_ump);
4832	(void) inodedep_lookup_ip(ip);
4833	(void) inodedep_lookup_ip(dp);
4834	softdep_prelink(dvp, ITOV(ip));
4835	FREE_LOCK(dp->i_ump);
4836}
4837
4838/*
4839 * Called to release the journal structures created by a failed non-directory
4840 * creation.  Adjusts nlinkdelta for non-journaling softdep.
4841 */
4842void
4843softdep_revert_create(dp, ip)
4844	struct inode *dp;
4845	struct inode *ip;
4846{
4847	struct inodedep *inodedep;
4848	struct jaddref *jaddref;
4849	struct vnode *dvp;
4850
4851	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4852	    ("softdep_revert_create called on non-softdep filesystem"));
4853	dvp = ITOV(dp);
4854	ACQUIRE_LOCK(dp->i_ump);
4855	inodedep = inodedep_lookup_ip(ip);
4856	if (DOINGSUJ(dvp)) {
4857		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4858		    inoreflst);
4859		KASSERT(jaddref->ja_parent == dp->i_number,
4860		    ("softdep_revert_create: addref parent mismatch"));
4861		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4862	}
4863	FREE_LOCK(dp->i_ump);
4864}
4865
4866/*
4867 * Called to release the journal structures created by a failed link
4868 * addition.  Adjusts nlinkdelta for non-journaling softdep.
4869 */
4870void
4871softdep_revert_link(dp, ip)
4872	struct inode *dp;
4873	struct inode *ip;
4874{
4875	struct inodedep *inodedep;
4876	struct jaddref *jaddref;
4877	struct vnode *dvp;
4878
4879	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4880	    ("softdep_revert_link called on non-softdep filesystem"));
4881	dvp = ITOV(dp);
4882	ACQUIRE_LOCK(dp->i_ump);
4883	inodedep = inodedep_lookup_ip(ip);
4884	if (DOINGSUJ(dvp)) {
4885		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4886		    inoreflst);
4887		KASSERT(jaddref->ja_parent == dp->i_number,
4888		    ("softdep_revert_link: addref parent mismatch"));
4889		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4890	}
4891	FREE_LOCK(dp->i_ump);
4892}
4893
4894/*
4895 * Called to release the journal structures created by a failed mkdir
4896 * attempt.  Adjusts nlinkdelta for non-journaling softdep.
4897 */
4898void
4899softdep_revert_mkdir(dp, ip)
4900	struct inode *dp;
4901	struct inode *ip;
4902{
4903	struct inodedep *inodedep;
4904	struct jaddref *jaddref;
4905	struct jaddref *dotaddref;
4906	struct vnode *dvp;
4907
4908	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4909	    ("softdep_revert_mkdir called on non-softdep filesystem"));
4910	dvp = ITOV(dp);
4911
4912	ACQUIRE_LOCK(dp->i_ump);
4913	inodedep = inodedep_lookup_ip(dp);
4914	if (DOINGSUJ(dvp)) {
4915		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4916		    inoreflst);
4917		KASSERT(jaddref->ja_parent == ip->i_number,
4918		    ("softdep_revert_mkdir: dotdot addref parent mismatch"));
4919		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4920	}
4921	inodedep = inodedep_lookup_ip(ip);
4922	if (DOINGSUJ(dvp)) {
4923		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4924		    inoreflst);
4925		KASSERT(jaddref->ja_parent == dp->i_number,
4926		    ("softdep_revert_mkdir: addref parent mismatch"));
4927		dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
4928		    inoreflst, if_deps);
4929		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4930		KASSERT(dotaddref->ja_parent == ip->i_number,
4931		    ("softdep_revert_mkdir: dot addref parent mismatch"));
4932		cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
4933	}
4934	FREE_LOCK(dp->i_ump);
4935}
4936
4937/*
4938 * Called to correct nlinkdelta after a failed rmdir.
4939 */
4940void
4941softdep_revert_rmdir(dp, ip)
4942	struct inode *dp;
4943	struct inode *ip;
4944{
4945
4946	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4947	    ("softdep_revert_rmdir called on non-softdep filesystem"));
4948	ACQUIRE_LOCK(dp->i_ump);
4949	(void) inodedep_lookup_ip(ip);
4950	(void) inodedep_lookup_ip(dp);
4951	FREE_LOCK(dp->i_ump);
4952}
4953
4954/*
4955 * Protecting the freemaps (or bitmaps).
4956 *
4957 * To eliminate the need to execute fsck before mounting a filesystem
4958 * after a power failure, one must (conservatively) guarantee that the
4959 * on-disk copy of the bitmaps never indicate that a live inode or block is
4960 * free.  So, when a block or inode is allocated, the bitmap should be
4961 * updated (on disk) before any new pointers.  When a block or inode is
4962 * freed, the bitmap should not be updated until all pointers have been
4963 * reset.  The latter dependency is handled by the delayed de-allocation
4964 * approach described below for block and inode de-allocation.  The former
4965 * dependency is handled by calling the following procedure when a block or
4966 * inode is allocated. When an inode is allocated an "inodedep" is created
4967 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
4968 * Each "inodedep" is also inserted into the hash indexing structure so
4969 * that any additional link additions can be made dependent on the inode
4970 * allocation.
4971 *
4972 * The ufs filesystem maintains a number of free block counts (e.g., per
4973 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
4974 * in addition to the bitmaps.  These counts are used to improve efficiency
4975 * during allocation and therefore must be consistent with the bitmaps.
4976 * There is no convenient way to guarantee post-crash consistency of these
4977 * counts with simple update ordering, for two main reasons: (1) The counts
4978 * and bitmaps for a single cylinder group block are not in the same disk
4979 * sector.  If a disk write is interrupted (e.g., by power failure), one may
4980 * be written and the other not.  (2) Some of the counts are located in the
4981 * superblock rather than the cylinder group block. So, we focus our soft
4982 * updates implementation on protecting the bitmaps. When mounting a
4983 * filesystem, we recompute the auxiliary counts from the bitmaps.
4984 */
4985
4986/*
4987 * Called just after updating the cylinder group block to allocate an inode.
4988 */
4989void
4990softdep_setup_inomapdep(bp, ip, newinum, mode)
4991	struct buf *bp;		/* buffer for cylgroup block with inode map */
4992	struct inode *ip;	/* inode related to allocation */
4993	ino_t newinum;		/* new inode number being allocated */
4994	int mode;
4995{
4996	struct inodedep *inodedep;
4997	struct bmsafemap *bmsafemap;
4998	struct jaddref *jaddref;
4999	struct mount *mp;
5000	struct fs *fs;
5001
5002	mp = UFSTOVFS(ip->i_ump);
5003	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5004	    ("softdep_setup_inomapdep called on non-softdep filesystem"));
5005	fs = ip->i_ump->um_fs;
5006	jaddref = NULL;
5007
5008	/*
5009	 * Allocate the journal reference add structure so that the bitmap
5010	 * can be dependent on it.
5011	 */
5012	if (MOUNTEDSUJ(mp)) {
5013		jaddref = newjaddref(ip, newinum, 0, 0, mode);
5014		jaddref->ja_state |= NEWBLOCK;
5015	}
5016
5017	/*
5018	 * Create a dependency for the newly allocated inode.
5019	 * Panic if it already exists as something is seriously wrong.
5020	 * Otherwise add it to the dependency list for the buffer holding
5021	 * the cylinder group map from which it was allocated.
5022	 *
5023	 * We have to preallocate a bmsafemap entry in case it is needed
5024	 * in bmsafemap_lookup since once we allocate the inodedep, we
5025	 * have to finish initializing it before we can FREE_LOCK().
5026	 * By preallocating, we avoid FREE_LOCK() while doing a malloc
5027	 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
5028	 * creating the inodedep as it can be freed during the time
5029	 * that we FREE_LOCK() while allocating the inodedep. We must
5030	 * call workitem_alloc() before entering the locked section as
5031	 * it also acquires the lock and we must avoid trying doing so
5032	 * recursively.
5033	 */
5034	bmsafemap = malloc(sizeof(struct bmsafemap),
5035	    M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5036	workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5037	ACQUIRE_LOCK(ip->i_ump);
5038	if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep)))
5039		panic("softdep_setup_inomapdep: dependency %p for new"
5040		    "inode already exists", inodedep);
5041	bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
5042	if (jaddref) {
5043		LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
5044		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
5045		    if_deps);
5046	} else {
5047		inodedep->id_state |= ONDEPLIST;
5048		LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
5049	}
5050	inodedep->id_bmsafemap = bmsafemap;
5051	inodedep->id_state &= ~DEPCOMPLETE;
5052	FREE_LOCK(ip->i_ump);
5053}
5054
5055/*
5056 * Called just after updating the cylinder group block to
5057 * allocate block or fragment.
5058 */
5059void
5060softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
5061	struct buf *bp;		/* buffer for cylgroup block with block map */
5062	struct mount *mp;	/* filesystem doing allocation */
5063	ufs2_daddr_t newblkno;	/* number of newly allocated block */
5064	int frags;		/* Number of fragments. */
5065	int oldfrags;		/* Previous number of fragments for extend. */
5066{
5067	struct newblk *newblk;
5068	struct bmsafemap *bmsafemap;
5069	struct jnewblk *jnewblk;
5070	struct ufsmount *ump;
5071	struct fs *fs;
5072
5073	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5074	    ("softdep_setup_blkmapdep called on non-softdep filesystem"));
5075	ump = VFSTOUFS(mp);
5076	fs = ump->um_fs;
5077	jnewblk = NULL;
5078	/*
5079	 * Create a dependency for the newly allocated block.
5080	 * Add it to the dependency list for the buffer holding
5081	 * the cylinder group map from which it was allocated.
5082	 */
5083	if (MOUNTEDSUJ(mp)) {
5084		jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
5085		workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
5086		jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
5087		jnewblk->jn_state = ATTACHED;
5088		jnewblk->jn_blkno = newblkno;
5089		jnewblk->jn_frags = frags;
5090		jnewblk->jn_oldfrags = oldfrags;
5091#ifdef SUJ_DEBUG
5092		{
5093			struct cg *cgp;
5094			uint8_t *blksfree;
5095			long bno;
5096			int i;
5097
5098			cgp = (struct cg *)bp->b_data;
5099			blksfree = cg_blksfree(cgp);
5100			bno = dtogd(fs, jnewblk->jn_blkno);
5101			for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
5102			    i++) {
5103				if (isset(blksfree, bno + i))
5104					panic("softdep_setup_blkmapdep: "
5105					    "free fragment %d from %d-%d "
5106					    "state 0x%X dep %p", i,
5107					    jnewblk->jn_oldfrags,
5108					    jnewblk->jn_frags,
5109					    jnewblk->jn_state,
5110					    jnewblk->jn_dep);
5111			}
5112		}
5113#endif
5114	}
5115
5116	CTR3(KTR_SUJ,
5117	    "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
5118	    newblkno, frags, oldfrags);
5119	ACQUIRE_LOCK(ump);
5120	if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
5121		panic("softdep_setup_blkmapdep: found block");
5122	newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
5123	    dtog(fs, newblkno), NULL);
5124	if (jnewblk) {
5125		jnewblk->jn_dep = (struct worklist *)newblk;
5126		LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
5127	} else {
5128		newblk->nb_state |= ONDEPLIST;
5129		LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
5130	}
5131	newblk->nb_bmsafemap = bmsafemap;
5132	newblk->nb_jnewblk = jnewblk;
5133	FREE_LOCK(ump);
5134}
5135
5136#define	BMSAFEMAP_HASH(ump, cg) \
5137      (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size])
5138
5139static int
5140bmsafemap_find(bmsafemaphd, cg, bmsafemapp)
5141	struct bmsafemap_hashhead *bmsafemaphd;
5142	int cg;
5143	struct bmsafemap **bmsafemapp;
5144{
5145	struct bmsafemap *bmsafemap;
5146
5147	LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
5148		if (bmsafemap->sm_cg == cg)
5149			break;
5150	if (bmsafemap) {
5151		*bmsafemapp = bmsafemap;
5152		return (1);
5153	}
5154	*bmsafemapp = NULL;
5155
5156	return (0);
5157}
5158
5159/*
5160 * Find the bmsafemap associated with a cylinder group buffer.
5161 * If none exists, create one. The buffer must be locked when
5162 * this routine is called and this routine must be called with
5163 * the softdep lock held. To avoid giving up the lock while
5164 * allocating a new bmsafemap, a preallocated bmsafemap may be
5165 * provided. If it is provided but not needed, it is freed.
5166 */
5167static struct bmsafemap *
5168bmsafemap_lookup(mp, bp, cg, newbmsafemap)
5169	struct mount *mp;
5170	struct buf *bp;
5171	int cg;
5172	struct bmsafemap *newbmsafemap;
5173{
5174	struct bmsafemap_hashhead *bmsafemaphd;
5175	struct bmsafemap *bmsafemap, *collision;
5176	struct worklist *wk;
5177	struct ufsmount *ump;
5178
5179	ump = VFSTOUFS(mp);
5180	LOCK_OWNED(ump);
5181	KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
5182	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5183		if (wk->wk_type == D_BMSAFEMAP) {
5184			if (newbmsafemap)
5185				WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5186			return (WK_BMSAFEMAP(wk));
5187		}
5188	}
5189	bmsafemaphd = BMSAFEMAP_HASH(ump, cg);
5190	if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) {
5191		if (newbmsafemap)
5192			WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5193		return (bmsafemap);
5194	}
5195	if (newbmsafemap) {
5196		bmsafemap = newbmsafemap;
5197	} else {
5198		FREE_LOCK(ump);
5199		bmsafemap = malloc(sizeof(struct bmsafemap),
5200			M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5201		workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5202		ACQUIRE_LOCK(ump);
5203	}
5204	bmsafemap->sm_buf = bp;
5205	LIST_INIT(&bmsafemap->sm_inodedephd);
5206	LIST_INIT(&bmsafemap->sm_inodedepwr);
5207	LIST_INIT(&bmsafemap->sm_newblkhd);
5208	LIST_INIT(&bmsafemap->sm_newblkwr);
5209	LIST_INIT(&bmsafemap->sm_jaddrefhd);
5210	LIST_INIT(&bmsafemap->sm_jnewblkhd);
5211	LIST_INIT(&bmsafemap->sm_freehd);
5212	LIST_INIT(&bmsafemap->sm_freewr);
5213	if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) {
5214		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5215		return (collision);
5216	}
5217	bmsafemap->sm_cg = cg;
5218	LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5219	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
5220	WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5221	return (bmsafemap);
5222}
5223
5224/*
5225 * Direct block allocation dependencies.
5226 *
5227 * When a new block is allocated, the corresponding disk locations must be
5228 * initialized (with zeros or new data) before the on-disk inode points to
5229 * them.  Also, the freemap from which the block was allocated must be
5230 * updated (on disk) before the inode's pointer. These two dependencies are
5231 * independent of each other and are needed for all file blocks and indirect
5232 * blocks that are pointed to directly by the inode.  Just before the
5233 * "in-core" version of the inode is updated with a newly allocated block
5234 * number, a procedure (below) is called to setup allocation dependency
5235 * structures.  These structures are removed when the corresponding
5236 * dependencies are satisfied or when the block allocation becomes obsolete
5237 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5238 * fragment that gets upgraded).  All of these cases are handled in
5239 * procedures described later.
5240 *
5241 * When a file extension causes a fragment to be upgraded, either to a larger
5242 * fragment or to a full block, the on-disk location may change (if the
5243 * previous fragment could not simply be extended). In this case, the old
5244 * fragment must be de-allocated, but not until after the inode's pointer has
5245 * been updated. In most cases, this is handled by later procedures, which
5246 * will construct a "freefrag" structure to be added to the workitem queue
5247 * when the inode update is complete (or obsolete).  The main exception to
5248 * this is when an allocation occurs while a pending allocation dependency
5249 * (for the same block pointer) remains.  This case is handled in the main
5250 * allocation dependency setup procedure by immediately freeing the
5251 * unreferenced fragments.
5252 */
5253void
5254softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5255	struct inode *ip;	/* inode to which block is being added */
5256	ufs_lbn_t off;		/* block pointer within inode */
5257	ufs2_daddr_t newblkno;	/* disk block number being added */
5258	ufs2_daddr_t oldblkno;	/* previous block number, 0 unless frag */
5259	long newsize;		/* size of new block */
5260	long oldsize;		/* size of new block */
5261	struct buf *bp;		/* bp for allocated block */
5262{
5263	struct allocdirect *adp, *oldadp;
5264	struct allocdirectlst *adphead;
5265	struct freefrag *freefrag;
5266	struct inodedep *inodedep;
5267	struct pagedep *pagedep;
5268	struct jnewblk *jnewblk;
5269	struct newblk *newblk;
5270	struct mount *mp;
5271	ufs_lbn_t lbn;
5272
5273	lbn = bp->b_lblkno;
5274	mp = UFSTOVFS(ip->i_ump);
5275	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5276	    ("softdep_setup_allocdirect called on non-softdep filesystem"));
5277	if (oldblkno && oldblkno != newblkno)
5278		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5279	else
5280		freefrag = NULL;
5281
5282	CTR6(KTR_SUJ,
5283	    "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5284	    "off %jd newsize %ld oldsize %d",
5285	    ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5286	ACQUIRE_LOCK(ip->i_ump);
5287	if (off >= NDADDR) {
5288		if (lbn > 0)
5289			panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5290			    lbn, off);
5291		/* allocating an indirect block */
5292		if (oldblkno != 0)
5293			panic("softdep_setup_allocdirect: non-zero indir");
5294	} else {
5295		if (off != lbn)
5296			panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5297			    lbn, off);
5298		/*
5299		 * Allocating a direct block.
5300		 *
5301		 * If we are allocating a directory block, then we must
5302		 * allocate an associated pagedep to track additions and
5303		 * deletions.
5304		 */
5305		if ((ip->i_mode & IFMT) == IFDIR)
5306			pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5307			    &pagedep);
5308	}
5309	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5310		panic("softdep_setup_allocdirect: lost block");
5311	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5312	    ("softdep_setup_allocdirect: newblk already initialized"));
5313	/*
5314	 * Convert the newblk to an allocdirect.
5315	 */
5316	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5317	adp = (struct allocdirect *)newblk;
5318	newblk->nb_freefrag = freefrag;
5319	adp->ad_offset = off;
5320	adp->ad_oldblkno = oldblkno;
5321	adp->ad_newsize = newsize;
5322	adp->ad_oldsize = oldsize;
5323
5324	/*
5325	 * Finish initializing the journal.
5326	 */
5327	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5328		jnewblk->jn_ino = ip->i_number;
5329		jnewblk->jn_lbn = lbn;
5330		add_to_journal(&jnewblk->jn_list);
5331	}
5332	if (freefrag && freefrag->ff_jdep != NULL &&
5333	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5334		add_to_journal(freefrag->ff_jdep);
5335	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5336	adp->ad_inodedep = inodedep;
5337
5338	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5339	/*
5340	 * The list of allocdirects must be kept in sorted and ascending
5341	 * order so that the rollback routines can quickly determine the
5342	 * first uncommitted block (the size of the file stored on disk
5343	 * ends at the end of the lowest committed fragment, or if there
5344	 * are no fragments, at the end of the highest committed block).
5345	 * Since files generally grow, the typical case is that the new
5346	 * block is to be added at the end of the list. We speed this
5347	 * special case by checking against the last allocdirect in the
5348	 * list before laboriously traversing the list looking for the
5349	 * insertion point.
5350	 */
5351	adphead = &inodedep->id_newinoupdt;
5352	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5353	if (oldadp == NULL || oldadp->ad_offset <= off) {
5354		/* insert at end of list */
5355		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5356		if (oldadp != NULL && oldadp->ad_offset == off)
5357			allocdirect_merge(adphead, adp, oldadp);
5358		FREE_LOCK(ip->i_ump);
5359		return;
5360	}
5361	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5362		if (oldadp->ad_offset >= off)
5363			break;
5364	}
5365	if (oldadp == NULL)
5366		panic("softdep_setup_allocdirect: lost entry");
5367	/* insert in middle of list */
5368	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5369	if (oldadp->ad_offset == off)
5370		allocdirect_merge(adphead, adp, oldadp);
5371
5372	FREE_LOCK(ip->i_ump);
5373}
5374
5375/*
5376 * Merge a newer and older journal record to be stored either in a
5377 * newblock or freefrag.  This handles aggregating journal records for
5378 * fragment allocation into a second record as well as replacing a
5379 * journal free with an aborted journal allocation.  A segment for the
5380 * oldest record will be placed on wkhd if it has been written.  If not
5381 * the segment for the newer record will suffice.
5382 */
5383static struct worklist *
5384jnewblk_merge(new, old, wkhd)
5385	struct worklist *new;
5386	struct worklist *old;
5387	struct workhead *wkhd;
5388{
5389	struct jnewblk *njnewblk;
5390	struct jnewblk *jnewblk;
5391
5392	/* Handle NULLs to simplify callers. */
5393	if (new == NULL)
5394		return (old);
5395	if (old == NULL)
5396		return (new);
5397	/* Replace a jfreefrag with a jnewblk. */
5398	if (new->wk_type == D_JFREEFRAG) {
5399		if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5400			panic("jnewblk_merge: blkno mismatch: %p, %p",
5401			    old, new);
5402		cancel_jfreefrag(WK_JFREEFRAG(new));
5403		return (old);
5404	}
5405	if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5406		panic("jnewblk_merge: Bad type: old %d new %d\n",
5407		    old->wk_type, new->wk_type);
5408	/*
5409	 * Handle merging of two jnewblk records that describe
5410	 * different sets of fragments in the same block.
5411	 */
5412	jnewblk = WK_JNEWBLK(old);
5413	njnewblk = WK_JNEWBLK(new);
5414	if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5415		panic("jnewblk_merge: Merging disparate blocks.");
5416	/*
5417	 * The record may be rolled back in the cg.
5418	 */
5419	if (jnewblk->jn_state & UNDONE) {
5420		jnewblk->jn_state &= ~UNDONE;
5421		njnewblk->jn_state |= UNDONE;
5422		njnewblk->jn_state &= ~ATTACHED;
5423	}
5424	/*
5425	 * We modify the newer addref and free the older so that if neither
5426	 * has been written the most up-to-date copy will be on disk.  If
5427	 * both have been written but rolled back we only temporarily need
5428	 * one of them to fix the bits when the cg write completes.
5429	 */
5430	jnewblk->jn_state |= ATTACHED | COMPLETE;
5431	njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5432	cancel_jnewblk(jnewblk, wkhd);
5433	WORKLIST_REMOVE(&jnewblk->jn_list);
5434	free_jnewblk(jnewblk);
5435	return (new);
5436}
5437
5438/*
5439 * Replace an old allocdirect dependency with a newer one.
5440 * This routine must be called with splbio interrupts blocked.
5441 */
5442static void
5443allocdirect_merge(adphead, newadp, oldadp)
5444	struct allocdirectlst *adphead;	/* head of list holding allocdirects */
5445	struct allocdirect *newadp;	/* allocdirect being added */
5446	struct allocdirect *oldadp;	/* existing allocdirect being checked */
5447{
5448	struct worklist *wk;
5449	struct freefrag *freefrag;
5450
5451	freefrag = NULL;
5452	LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp));
5453	if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5454	    newadp->ad_oldsize != oldadp->ad_newsize ||
5455	    newadp->ad_offset >= NDADDR)
5456		panic("%s %jd != new %jd || old size %ld != new %ld",
5457		    "allocdirect_merge: old blkno",
5458		    (intmax_t)newadp->ad_oldblkno,
5459		    (intmax_t)oldadp->ad_newblkno,
5460		    newadp->ad_oldsize, oldadp->ad_newsize);
5461	newadp->ad_oldblkno = oldadp->ad_oldblkno;
5462	newadp->ad_oldsize = oldadp->ad_oldsize;
5463	/*
5464	 * If the old dependency had a fragment to free or had never
5465	 * previously had a block allocated, then the new dependency
5466	 * can immediately post its freefrag and adopt the old freefrag.
5467	 * This action is done by swapping the freefrag dependencies.
5468	 * The new dependency gains the old one's freefrag, and the
5469	 * old one gets the new one and then immediately puts it on
5470	 * the worklist when it is freed by free_newblk. It is
5471	 * not possible to do this swap when the old dependency had a
5472	 * non-zero size but no previous fragment to free. This condition
5473	 * arises when the new block is an extension of the old block.
5474	 * Here, the first part of the fragment allocated to the new
5475	 * dependency is part of the block currently claimed on disk by
5476	 * the old dependency, so cannot legitimately be freed until the
5477	 * conditions for the new dependency are fulfilled.
5478	 */
5479	freefrag = newadp->ad_freefrag;
5480	if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5481		newadp->ad_freefrag = oldadp->ad_freefrag;
5482		oldadp->ad_freefrag = freefrag;
5483	}
5484	/*
5485	 * If we are tracking a new directory-block allocation,
5486	 * move it from the old allocdirect to the new allocdirect.
5487	 */
5488	if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5489		WORKLIST_REMOVE(wk);
5490		if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5491			panic("allocdirect_merge: extra newdirblk");
5492		WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5493	}
5494	TAILQ_REMOVE(adphead, oldadp, ad_next);
5495	/*
5496	 * We need to move any journal dependencies over to the freefrag
5497	 * that releases this block if it exists.  Otherwise we are
5498	 * extending an existing block and we'll wait until that is
5499	 * complete to release the journal space and extend the
5500	 * new journal to cover this old space as well.
5501	 */
5502	if (freefrag == NULL) {
5503		if (oldadp->ad_newblkno != newadp->ad_newblkno)
5504			panic("allocdirect_merge: %jd != %jd",
5505			    oldadp->ad_newblkno, newadp->ad_newblkno);
5506		newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5507		    jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5508		    &oldadp->ad_block.nb_jnewblk->jn_list,
5509		    &newadp->ad_block.nb_jwork);
5510		oldadp->ad_block.nb_jnewblk = NULL;
5511		cancel_newblk(&oldadp->ad_block, NULL,
5512		    &newadp->ad_block.nb_jwork);
5513	} else {
5514		wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5515		    &freefrag->ff_list, &freefrag->ff_jwork);
5516		freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5517		    &freefrag->ff_jwork);
5518	}
5519	free_newblk(&oldadp->ad_block);
5520}
5521
5522/*
5523 * Allocate a jfreefrag structure to journal a single block free.
5524 */
5525static struct jfreefrag *
5526newjfreefrag(freefrag, ip, blkno, size, lbn)
5527	struct freefrag *freefrag;
5528	struct inode *ip;
5529	ufs2_daddr_t blkno;
5530	long size;
5531	ufs_lbn_t lbn;
5532{
5533	struct jfreefrag *jfreefrag;
5534	struct fs *fs;
5535
5536	fs = ip->i_fs;
5537	jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5538	    M_SOFTDEP_FLAGS);
5539	workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump));
5540	jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5541	jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5542	jfreefrag->fr_ino = ip->i_number;
5543	jfreefrag->fr_lbn = lbn;
5544	jfreefrag->fr_blkno = blkno;
5545	jfreefrag->fr_frags = numfrags(fs, size);
5546	jfreefrag->fr_freefrag = freefrag;
5547
5548	return (jfreefrag);
5549}
5550
5551/*
5552 * Allocate a new freefrag structure.
5553 */
5554static struct freefrag *
5555newfreefrag(ip, blkno, size, lbn)
5556	struct inode *ip;
5557	ufs2_daddr_t blkno;
5558	long size;
5559	ufs_lbn_t lbn;
5560{
5561	struct freefrag *freefrag;
5562	struct fs *fs;
5563
5564	CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5565	    ip->i_number, blkno, size, lbn);
5566	fs = ip->i_fs;
5567	if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5568		panic("newfreefrag: frag size");
5569	freefrag = malloc(sizeof(struct freefrag),
5570	    M_FREEFRAG, M_SOFTDEP_FLAGS);
5571	workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump));
5572	freefrag->ff_state = ATTACHED;
5573	LIST_INIT(&freefrag->ff_jwork);
5574	freefrag->ff_inum = ip->i_number;
5575	freefrag->ff_vtype = ITOV(ip)->v_type;
5576	freefrag->ff_blkno = blkno;
5577	freefrag->ff_fragsize = size;
5578
5579	if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) {
5580		freefrag->ff_jdep = (struct worklist *)
5581		    newjfreefrag(freefrag, ip, blkno, size, lbn);
5582	} else {
5583		freefrag->ff_state |= DEPCOMPLETE;
5584		freefrag->ff_jdep = NULL;
5585	}
5586
5587	return (freefrag);
5588}
5589
5590/*
5591 * This workitem de-allocates fragments that were replaced during
5592 * file block allocation.
5593 */
5594static void
5595handle_workitem_freefrag(freefrag)
5596	struct freefrag *freefrag;
5597{
5598	struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5599	struct workhead wkhd;
5600
5601	CTR3(KTR_SUJ,
5602	    "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5603	    freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5604	/*
5605	 * It would be illegal to add new completion items to the
5606	 * freefrag after it was schedule to be done so it must be
5607	 * safe to modify the list head here.
5608	 */
5609	LIST_INIT(&wkhd);
5610	ACQUIRE_LOCK(ump);
5611	LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5612	/*
5613	 * If the journal has not been written we must cancel it here.
5614	 */
5615	if (freefrag->ff_jdep) {
5616		if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5617			panic("handle_workitem_freefrag: Unexpected type %d\n",
5618			    freefrag->ff_jdep->wk_type);
5619		cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5620	}
5621	FREE_LOCK(ump);
5622	ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5623	   freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd);
5624	ACQUIRE_LOCK(ump);
5625	WORKITEM_FREE(freefrag, D_FREEFRAG);
5626	FREE_LOCK(ump);
5627}
5628
5629/*
5630 * Set up a dependency structure for an external attributes data block.
5631 * This routine follows much of the structure of softdep_setup_allocdirect.
5632 * See the description of softdep_setup_allocdirect above for details.
5633 */
5634void
5635softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5636	struct inode *ip;
5637	ufs_lbn_t off;
5638	ufs2_daddr_t newblkno;
5639	ufs2_daddr_t oldblkno;
5640	long newsize;
5641	long oldsize;
5642	struct buf *bp;
5643{
5644	struct allocdirect *adp, *oldadp;
5645	struct allocdirectlst *adphead;
5646	struct freefrag *freefrag;
5647	struct inodedep *inodedep;
5648	struct jnewblk *jnewblk;
5649	struct newblk *newblk;
5650	struct mount *mp;
5651	ufs_lbn_t lbn;
5652
5653	mp = UFSTOVFS(ip->i_ump);
5654	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5655	    ("softdep_setup_allocext called on non-softdep filesystem"));
5656	KASSERT(off < NXADDR, ("softdep_setup_allocext: lbn %lld > NXADDR",
5657		    (long long)off));
5658
5659	lbn = bp->b_lblkno;
5660	if (oldblkno && oldblkno != newblkno)
5661		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5662	else
5663		freefrag = NULL;
5664
5665	ACQUIRE_LOCK(ip->i_ump);
5666	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5667		panic("softdep_setup_allocext: lost block");
5668	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5669	    ("softdep_setup_allocext: newblk already initialized"));
5670	/*
5671	 * Convert the newblk to an allocdirect.
5672	 */
5673	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5674	adp = (struct allocdirect *)newblk;
5675	newblk->nb_freefrag = freefrag;
5676	adp->ad_offset = off;
5677	adp->ad_oldblkno = oldblkno;
5678	adp->ad_newsize = newsize;
5679	adp->ad_oldsize = oldsize;
5680	adp->ad_state |=  EXTDATA;
5681
5682	/*
5683	 * Finish initializing the journal.
5684	 */
5685	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5686		jnewblk->jn_ino = ip->i_number;
5687		jnewblk->jn_lbn = lbn;
5688		add_to_journal(&jnewblk->jn_list);
5689	}
5690	if (freefrag && freefrag->ff_jdep != NULL &&
5691	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5692		add_to_journal(freefrag->ff_jdep);
5693	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5694	adp->ad_inodedep = inodedep;
5695
5696	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5697	/*
5698	 * The list of allocdirects must be kept in sorted and ascending
5699	 * order so that the rollback routines can quickly determine the
5700	 * first uncommitted block (the size of the file stored on disk
5701	 * ends at the end of the lowest committed fragment, or if there
5702	 * are no fragments, at the end of the highest committed block).
5703	 * Since files generally grow, the typical case is that the new
5704	 * block is to be added at the end of the list. We speed this
5705	 * special case by checking against the last allocdirect in the
5706	 * list before laboriously traversing the list looking for the
5707	 * insertion point.
5708	 */
5709	adphead = &inodedep->id_newextupdt;
5710	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5711	if (oldadp == NULL || oldadp->ad_offset <= off) {
5712		/* insert at end of list */
5713		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5714		if (oldadp != NULL && oldadp->ad_offset == off)
5715			allocdirect_merge(adphead, adp, oldadp);
5716		FREE_LOCK(ip->i_ump);
5717		return;
5718	}
5719	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5720		if (oldadp->ad_offset >= off)
5721			break;
5722	}
5723	if (oldadp == NULL)
5724		panic("softdep_setup_allocext: lost entry");
5725	/* insert in middle of list */
5726	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5727	if (oldadp->ad_offset == off)
5728		allocdirect_merge(adphead, adp, oldadp);
5729	FREE_LOCK(ip->i_ump);
5730}
5731
5732/*
5733 * Indirect block allocation dependencies.
5734 *
5735 * The same dependencies that exist for a direct block also exist when
5736 * a new block is allocated and pointed to by an entry in a block of
5737 * indirect pointers. The undo/redo states described above are also
5738 * used here. Because an indirect block contains many pointers that
5739 * may have dependencies, a second copy of the entire in-memory indirect
5740 * block is kept. The buffer cache copy is always completely up-to-date.
5741 * The second copy, which is used only as a source for disk writes,
5742 * contains only the safe pointers (i.e., those that have no remaining
5743 * update dependencies). The second copy is freed when all pointers
5744 * are safe. The cache is not allowed to replace indirect blocks with
5745 * pending update dependencies. If a buffer containing an indirect
5746 * block with dependencies is written, these routines will mark it
5747 * dirty again. It can only be successfully written once all the
5748 * dependencies are removed. The ffs_fsync routine in conjunction with
5749 * softdep_sync_metadata work together to get all the dependencies
5750 * removed so that a file can be successfully written to disk. Three
5751 * procedures are used when setting up indirect block pointer
5752 * dependencies. The division is necessary because of the organization
5753 * of the "balloc" routine and because of the distinction between file
5754 * pages and file metadata blocks.
5755 */
5756
5757/*
5758 * Allocate a new allocindir structure.
5759 */
5760static struct allocindir *
5761newallocindir(ip, ptrno, newblkno, oldblkno, lbn)
5762	struct inode *ip;	/* inode for file being extended */
5763	int ptrno;		/* offset of pointer in indirect block */
5764	ufs2_daddr_t newblkno;	/* disk block number being added */
5765	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5766	ufs_lbn_t lbn;
5767{
5768	struct newblk *newblk;
5769	struct allocindir *aip;
5770	struct freefrag *freefrag;
5771	struct jnewblk *jnewblk;
5772
5773	if (oldblkno)
5774		freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn);
5775	else
5776		freefrag = NULL;
5777	ACQUIRE_LOCK(ip->i_ump);
5778	if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0)
5779		panic("new_allocindir: lost block");
5780	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5781	    ("newallocindir: newblk already initialized"));
5782	WORKITEM_REASSIGN(newblk, D_ALLOCINDIR);
5783	newblk->nb_freefrag = freefrag;
5784	aip = (struct allocindir *)newblk;
5785	aip->ai_offset = ptrno;
5786	aip->ai_oldblkno = oldblkno;
5787	aip->ai_lbn = lbn;
5788	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5789		jnewblk->jn_ino = ip->i_number;
5790		jnewblk->jn_lbn = lbn;
5791		add_to_journal(&jnewblk->jn_list);
5792	}
5793	if (freefrag && freefrag->ff_jdep != NULL &&
5794	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5795		add_to_journal(freefrag->ff_jdep);
5796	return (aip);
5797}
5798
5799/*
5800 * Called just before setting an indirect block pointer
5801 * to a newly allocated file page.
5802 */
5803void
5804softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
5805	struct inode *ip;	/* inode for file being extended */
5806	ufs_lbn_t lbn;		/* allocated block number within file */
5807	struct buf *bp;		/* buffer with indirect blk referencing page */
5808	int ptrno;		/* offset of pointer in indirect block */
5809	ufs2_daddr_t newblkno;	/* disk block number being added */
5810	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5811	struct buf *nbp;	/* buffer holding allocated page */
5812{
5813	struct inodedep *inodedep;
5814	struct freefrag *freefrag;
5815	struct allocindir *aip;
5816	struct pagedep *pagedep;
5817	struct mount *mp;
5818
5819	mp = UFSTOVFS(ip->i_ump);
5820	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5821	    ("softdep_setup_allocindir_page called on non-softdep filesystem"));
5822	KASSERT(lbn == nbp->b_lblkno,
5823	    ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
5824	    lbn, bp->b_lblkno));
5825	CTR4(KTR_SUJ,
5826	    "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
5827	    "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
5828	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
5829	aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
5830	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5831	/*
5832	 * If we are allocating a directory page, then we must
5833	 * allocate an associated pagedep to track additions and
5834	 * deletions.
5835	 */
5836	if ((ip->i_mode & IFMT) == IFDIR)
5837		pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
5838	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5839	freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
5840	FREE_LOCK(ip->i_ump);
5841	if (freefrag)
5842		handle_workitem_freefrag(freefrag);
5843}
5844
5845/*
5846 * Called just before setting an indirect block pointer to a
5847 * newly allocated indirect block.
5848 */
5849void
5850softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
5851	struct buf *nbp;	/* newly allocated indirect block */
5852	struct inode *ip;	/* inode for file being extended */
5853	struct buf *bp;		/* indirect block referencing allocated block */
5854	int ptrno;		/* offset of pointer in indirect block */
5855	ufs2_daddr_t newblkno;	/* disk block number being added */
5856{
5857	struct inodedep *inodedep;
5858	struct allocindir *aip;
5859	ufs_lbn_t lbn;
5860
5861	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
5862	    ("softdep_setup_allocindir_meta called on non-softdep filesystem"));
5863	CTR3(KTR_SUJ,
5864	    "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
5865	    ip->i_number, newblkno, ptrno);
5866	lbn = nbp->b_lblkno;
5867	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
5868	aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
5869	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC,
5870	    &inodedep);
5871	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5872	if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
5873		panic("softdep_setup_allocindir_meta: Block already existed");
5874	FREE_LOCK(ip->i_ump);
5875}
5876
5877static void
5878indirdep_complete(indirdep)
5879	struct indirdep *indirdep;
5880{
5881	struct allocindir *aip;
5882
5883	LIST_REMOVE(indirdep, ir_next);
5884	indirdep->ir_state |= DEPCOMPLETE;
5885
5886	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
5887		LIST_REMOVE(aip, ai_next);
5888		free_newblk(&aip->ai_block);
5889	}
5890	/*
5891	 * If this indirdep is not attached to a buf it was simply waiting
5892	 * on completion to clear completehd.  free_indirdep() asserts
5893	 * that nothing is dangling.
5894	 */
5895	if ((indirdep->ir_state & ONWORKLIST) == 0)
5896		free_indirdep(indirdep);
5897}
5898
5899static struct indirdep *
5900indirdep_lookup(mp, ip, bp)
5901	struct mount *mp;
5902	struct inode *ip;
5903	struct buf *bp;
5904{
5905	struct indirdep *indirdep, *newindirdep;
5906	struct newblk *newblk;
5907	struct ufsmount *ump;
5908	struct worklist *wk;
5909	struct fs *fs;
5910	ufs2_daddr_t blkno;
5911
5912	ump = VFSTOUFS(mp);
5913	LOCK_OWNED(ump);
5914	indirdep = NULL;
5915	newindirdep = NULL;
5916	fs = ip->i_fs;
5917	for (;;) {
5918		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5919			if (wk->wk_type != D_INDIRDEP)
5920				continue;
5921			indirdep = WK_INDIRDEP(wk);
5922			break;
5923		}
5924		/* Found on the buffer worklist, no new structure to free. */
5925		if (indirdep != NULL && newindirdep == NULL)
5926			return (indirdep);
5927		if (indirdep != NULL && newindirdep != NULL)
5928			panic("indirdep_lookup: simultaneous create");
5929		/* None found on the buffer and a new structure is ready. */
5930		if (indirdep == NULL && newindirdep != NULL)
5931			break;
5932		/* None found and no new structure available. */
5933		FREE_LOCK(ump);
5934		newindirdep = malloc(sizeof(struct indirdep),
5935		    M_INDIRDEP, M_SOFTDEP_FLAGS);
5936		workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
5937		newindirdep->ir_state = ATTACHED;
5938		if (ip->i_ump->um_fstype == UFS1)
5939			newindirdep->ir_state |= UFS1FMT;
5940		TAILQ_INIT(&newindirdep->ir_trunc);
5941		newindirdep->ir_saveddata = NULL;
5942		LIST_INIT(&newindirdep->ir_deplisthd);
5943		LIST_INIT(&newindirdep->ir_donehd);
5944		LIST_INIT(&newindirdep->ir_writehd);
5945		LIST_INIT(&newindirdep->ir_completehd);
5946		if (bp->b_blkno == bp->b_lblkno) {
5947			ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
5948			    NULL, NULL);
5949			bp->b_blkno = blkno;
5950		}
5951		newindirdep->ir_freeblks = NULL;
5952		newindirdep->ir_savebp =
5953		    getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
5954		newindirdep->ir_bp = bp;
5955		BUF_KERNPROC(newindirdep->ir_savebp);
5956		bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
5957		ACQUIRE_LOCK(ump);
5958	}
5959	indirdep = newindirdep;
5960	WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
5961	/*
5962	 * If the block is not yet allocated we don't set DEPCOMPLETE so
5963	 * that we don't free dependencies until the pointers are valid.
5964	 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
5965	 * than using the hash.
5966	 */
5967	if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
5968		LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
5969	else
5970		indirdep->ir_state |= DEPCOMPLETE;
5971	return (indirdep);
5972}
5973
5974/*
5975 * Called to finish the allocation of the "aip" allocated
5976 * by one of the two routines above.
5977 */
5978static struct freefrag *
5979setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)
5980	struct buf *bp;		/* in-memory copy of the indirect block */
5981	struct inode *ip;	/* inode for file being extended */
5982	struct inodedep *inodedep; /* Inodedep for ip */
5983	struct allocindir *aip;	/* allocindir allocated by the above routines */
5984	ufs_lbn_t lbn;		/* Logical block number for this block. */
5985{
5986	struct fs *fs;
5987	struct indirdep *indirdep;
5988	struct allocindir *oldaip;
5989	struct freefrag *freefrag;
5990	struct mount *mp;
5991
5992	LOCK_OWNED(ip->i_ump);
5993	mp = UFSTOVFS(ip->i_ump);
5994	fs = ip->i_fs;
5995	if (bp->b_lblkno >= 0)
5996		panic("setup_allocindir_phase2: not indir blk");
5997	KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
5998	    ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
5999	indirdep = indirdep_lookup(mp, ip, bp);
6000	KASSERT(indirdep->ir_savebp != NULL,
6001	    ("setup_allocindir_phase2 NULL ir_savebp"));
6002	aip->ai_indirdep = indirdep;
6003	/*
6004	 * Check for an unwritten dependency for this indirect offset.  If
6005	 * there is, merge the old dependency into the new one.  This happens
6006	 * as a result of reallocblk only.
6007	 */
6008	freefrag = NULL;
6009	if (aip->ai_oldblkno != 0) {
6010		LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
6011			if (oldaip->ai_offset == aip->ai_offset) {
6012				freefrag = allocindir_merge(aip, oldaip);
6013				goto done;
6014			}
6015		}
6016		LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
6017			if (oldaip->ai_offset == aip->ai_offset) {
6018				freefrag = allocindir_merge(aip, oldaip);
6019				goto done;
6020			}
6021		}
6022	}
6023done:
6024	LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
6025	return (freefrag);
6026}
6027
6028/*
6029 * Merge two allocindirs which refer to the same block.  Move newblock
6030 * dependencies and setup the freefrags appropriately.
6031 */
6032static struct freefrag *
6033allocindir_merge(aip, oldaip)
6034	struct allocindir *aip;
6035	struct allocindir *oldaip;
6036{
6037	struct freefrag *freefrag;
6038	struct worklist *wk;
6039
6040	if (oldaip->ai_newblkno != aip->ai_oldblkno)
6041		panic("allocindir_merge: blkno");
6042	aip->ai_oldblkno = oldaip->ai_oldblkno;
6043	freefrag = aip->ai_freefrag;
6044	aip->ai_freefrag = oldaip->ai_freefrag;
6045	oldaip->ai_freefrag = NULL;
6046	KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
6047	/*
6048	 * If we are tracking a new directory-block allocation,
6049	 * move it from the old allocindir to the new allocindir.
6050	 */
6051	if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
6052		WORKLIST_REMOVE(wk);
6053		if (!LIST_EMPTY(&oldaip->ai_newdirblk))
6054			panic("allocindir_merge: extra newdirblk");
6055		WORKLIST_INSERT(&aip->ai_newdirblk, wk);
6056	}
6057	/*
6058	 * We can skip journaling for this freefrag and just complete
6059	 * any pending journal work for the allocindir that is being
6060	 * removed after the freefrag completes.
6061	 */
6062	if (freefrag->ff_jdep)
6063		cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
6064	LIST_REMOVE(oldaip, ai_next);
6065	freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
6066	    &freefrag->ff_list, &freefrag->ff_jwork);
6067	free_newblk(&oldaip->ai_block);
6068
6069	return (freefrag);
6070}
6071
6072static inline void
6073setup_freedirect(freeblks, ip, i, needj)
6074	struct freeblks *freeblks;
6075	struct inode *ip;
6076	int i;
6077	int needj;
6078{
6079	ufs2_daddr_t blkno;
6080	int frags;
6081
6082	blkno = DIP(ip, i_db[i]);
6083	if (blkno == 0)
6084		return;
6085	DIP_SET(ip, i_db[i], 0);
6086	frags = sblksize(ip->i_fs, ip->i_size, i);
6087	frags = numfrags(ip->i_fs, frags);
6088	newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj);
6089}
6090
6091static inline void
6092setup_freeext(freeblks, ip, i, needj)
6093	struct freeblks *freeblks;
6094	struct inode *ip;
6095	int i;
6096	int needj;
6097{
6098	ufs2_daddr_t blkno;
6099	int frags;
6100
6101	blkno = ip->i_din2->di_extb[i];
6102	if (blkno == 0)
6103		return;
6104	ip->i_din2->di_extb[i] = 0;
6105	frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i);
6106	frags = numfrags(ip->i_fs, frags);
6107	newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
6108}
6109
6110static inline void
6111setup_freeindir(freeblks, ip, i, lbn, needj)
6112	struct freeblks *freeblks;
6113	struct inode *ip;
6114	int i;
6115	ufs_lbn_t lbn;
6116	int needj;
6117{
6118	ufs2_daddr_t blkno;
6119
6120	blkno = DIP(ip, i_ib[i]);
6121	if (blkno == 0)
6122		return;
6123	DIP_SET(ip, i_ib[i], 0);
6124	newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag,
6125	    0, needj);
6126}
6127
6128static inline struct freeblks *
6129newfreeblks(mp, ip)
6130	struct mount *mp;
6131	struct inode *ip;
6132{
6133	struct freeblks *freeblks;
6134
6135	freeblks = malloc(sizeof(struct freeblks),
6136		M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
6137	workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
6138	LIST_INIT(&freeblks->fb_jblkdephd);
6139	LIST_INIT(&freeblks->fb_jwork);
6140	freeblks->fb_ref = 0;
6141	freeblks->fb_cgwait = 0;
6142	freeblks->fb_state = ATTACHED;
6143	freeblks->fb_uid = ip->i_uid;
6144	freeblks->fb_inum = ip->i_number;
6145	freeblks->fb_vtype = ITOV(ip)->v_type;
6146	freeblks->fb_modrev = DIP(ip, i_modrev);
6147	freeblks->fb_devvp = ip->i_devvp;
6148	freeblks->fb_chkcnt = 0;
6149	freeblks->fb_len = 0;
6150
6151	return (freeblks);
6152}
6153
6154static void
6155trunc_indirdep(indirdep, freeblks, bp, off)
6156	struct indirdep *indirdep;
6157	struct freeblks *freeblks;
6158	struct buf *bp;
6159	int off;
6160{
6161	struct allocindir *aip, *aipn;
6162
6163	/*
6164	 * The first set of allocindirs won't be in savedbp.
6165	 */
6166	LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
6167		if (aip->ai_offset > off)
6168			cancel_allocindir(aip, bp, freeblks, 1);
6169	LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
6170		if (aip->ai_offset > off)
6171			cancel_allocindir(aip, bp, freeblks, 1);
6172	/*
6173	 * These will exist in savedbp.
6174	 */
6175	LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
6176		if (aip->ai_offset > off)
6177			cancel_allocindir(aip, NULL, freeblks, 0);
6178	LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
6179		if (aip->ai_offset > off)
6180			cancel_allocindir(aip, NULL, freeblks, 0);
6181}
6182
6183/*
6184 * Follow the chain of indirects down to lastlbn creating a freework
6185 * structure for each.  This will be used to start indir_trunc() at
6186 * the right offset and create the journal records for the parrtial
6187 * truncation.  A second step will handle the truncated dependencies.
6188 */
6189static int
6190setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno)
6191	struct freeblks *freeblks;
6192	struct inode *ip;
6193	ufs_lbn_t lbn;
6194	ufs_lbn_t lastlbn;
6195	ufs2_daddr_t blkno;
6196{
6197	struct indirdep *indirdep;
6198	struct indirdep *indirn;
6199	struct freework *freework;
6200	struct newblk *newblk;
6201	struct mount *mp;
6202	struct buf *bp;
6203	uint8_t *start;
6204	uint8_t *end;
6205	ufs_lbn_t lbnadd;
6206	int level;
6207	int error;
6208	int off;
6209
6210
6211	freework = NULL;
6212	if (blkno == 0)
6213		return (0);
6214	mp = freeblks->fb_list.wk_mp;
6215	bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
6216	if ((bp->b_flags & B_CACHE) == 0) {
6217		bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno);
6218		bp->b_iocmd = BIO_READ;
6219		bp->b_flags &= ~B_INVAL;
6220		bp->b_ioflags &= ~BIO_ERROR;
6221		vfs_busy_pages(bp, 0);
6222		bp->b_iooffset = dbtob(bp->b_blkno);
6223		bstrategy(bp);
6224		curthread->td_ru.ru_inblock++;
6225		error = bufwait(bp);
6226		if (error) {
6227			brelse(bp);
6228			return (error);
6229		}
6230	}
6231	level = lbn_level(lbn);
6232	lbnadd = lbn_offset(ip->i_fs, level);
6233	/*
6234	 * Compute the offset of the last block we want to keep.  Store
6235	 * in the freework the first block we want to completely free.
6236	 */
6237	off = (lastlbn - -(lbn + level)) / lbnadd;
6238	if (off + 1 == NINDIR(ip->i_fs))
6239		goto nowork;
6240	freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1,
6241	    0);
6242	/*
6243	 * Link the freework into the indirdep.  This will prevent any new
6244	 * allocations from proceeding until we are finished with the
6245	 * truncate and the block is written.
6246	 */
6247	ACQUIRE_LOCK(ip->i_ump);
6248	indirdep = indirdep_lookup(mp, ip, bp);
6249	if (indirdep->ir_freeblks)
6250		panic("setup_trunc_indir: indirdep already truncated.");
6251	TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6252	freework->fw_indir = indirdep;
6253	/*
6254	 * Cancel any allocindirs that will not make it to disk.
6255	 * We have to do this for all copies of the indirdep that
6256	 * live on this newblk.
6257	 */
6258	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6259		newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk);
6260		LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6261			trunc_indirdep(indirn, freeblks, bp, off);
6262	} else
6263		trunc_indirdep(indirdep, freeblks, bp, off);
6264	FREE_LOCK(ip->i_ump);
6265	/*
6266	 * Creation is protected by the buf lock. The saveddata is only
6267	 * needed if a full truncation follows a partial truncation but it
6268	 * is difficult to allocate in that case so we fetch it anyway.
6269	 */
6270	if (indirdep->ir_saveddata == NULL)
6271		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6272		    M_SOFTDEP_FLAGS);
6273nowork:
6274	/* Fetch the blkno of the child and the zero start offset. */
6275	if (ip->i_ump->um_fstype == UFS1) {
6276		blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6277		start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6278	} else {
6279		blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6280		start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6281	}
6282	if (freework) {
6283		/* Zero the truncated pointers. */
6284		end = bp->b_data + bp->b_bcount;
6285		bzero(start, end - start);
6286		bdwrite(bp);
6287	} else
6288		bqrelse(bp);
6289	if (level == 0)
6290		return (0);
6291	lbn++; /* adjust level */
6292	lbn -= (off * lbnadd);
6293	return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6294}
6295
6296/*
6297 * Complete the partial truncation of an indirect block setup by
6298 * setup_trunc_indir().  This zeros the truncated pointers in the saved
6299 * copy and writes them to disk before the freeblks is allowed to complete.
6300 */
6301static void
6302complete_trunc_indir(freework)
6303	struct freework *freework;
6304{
6305	struct freework *fwn;
6306	struct indirdep *indirdep;
6307	struct ufsmount *ump;
6308	struct buf *bp;
6309	uintptr_t start;
6310	int count;
6311
6312	ump = VFSTOUFS(freework->fw_list.wk_mp);
6313	LOCK_OWNED(ump);
6314	indirdep = freework->fw_indir;
6315	for (;;) {
6316		bp = indirdep->ir_bp;
6317		/* See if the block was discarded. */
6318		if (bp == NULL)
6319			break;
6320		/* Inline part of getdirtybuf().  We dont want bremfree. */
6321		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6322			break;
6323		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6324		    LOCK_PTR(ump)) == 0)
6325			BUF_UNLOCK(bp);
6326		ACQUIRE_LOCK(ump);
6327	}
6328	freework->fw_state |= DEPCOMPLETE;
6329	TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6330	/*
6331	 * Zero the pointers in the saved copy.
6332	 */
6333	if (indirdep->ir_state & UFS1FMT)
6334		start = sizeof(ufs1_daddr_t);
6335	else
6336		start = sizeof(ufs2_daddr_t);
6337	start *= freework->fw_start;
6338	count = indirdep->ir_savebp->b_bcount - start;
6339	start += (uintptr_t)indirdep->ir_savebp->b_data;
6340	bzero((char *)start, count);
6341	/*
6342	 * We need to start the next truncation in the list if it has not
6343	 * been started yet.
6344	 */
6345	fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6346	if (fwn != NULL) {
6347		if (fwn->fw_freeblks == indirdep->ir_freeblks)
6348			TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6349		if ((fwn->fw_state & ONWORKLIST) == 0)
6350			freework_enqueue(fwn);
6351	}
6352	/*
6353	 * If bp is NULL the block was fully truncated, restore
6354	 * the saved block list otherwise free it if it is no
6355	 * longer needed.
6356	 */
6357	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6358		if (bp == NULL)
6359			bcopy(indirdep->ir_saveddata,
6360			    indirdep->ir_savebp->b_data,
6361			    indirdep->ir_savebp->b_bcount);
6362		free(indirdep->ir_saveddata, M_INDIRDEP);
6363		indirdep->ir_saveddata = NULL;
6364	}
6365	/*
6366	 * When bp is NULL there is a full truncation pending.  We
6367	 * must wait for this full truncation to be journaled before
6368	 * we can release this freework because the disk pointers will
6369	 * never be written as zero.
6370	 */
6371	if (bp == NULL)  {
6372		if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6373			handle_written_freework(freework);
6374		else
6375			WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6376			   &freework->fw_list);
6377	} else {
6378		/* Complete when the real copy is written. */
6379		WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6380		BUF_UNLOCK(bp);
6381	}
6382}
6383
6384/*
6385 * Calculate the number of blocks we are going to release where datablocks
6386 * is the current total and length is the new file size.
6387 */
6388static ufs2_daddr_t
6389blkcount(fs, datablocks, length)
6390	struct fs *fs;
6391	ufs2_daddr_t datablocks;
6392	off_t length;
6393{
6394	off_t totblks, numblks;
6395
6396	totblks = 0;
6397	numblks = howmany(length, fs->fs_bsize);
6398	if (numblks <= NDADDR) {
6399		totblks = howmany(length, fs->fs_fsize);
6400		goto out;
6401	}
6402        totblks = blkstofrags(fs, numblks);
6403	numblks -= NDADDR;
6404	/*
6405	 * Count all single, then double, then triple indirects required.
6406	 * Subtracting one indirects worth of blocks for each pass
6407	 * acknowledges one of each pointed to by the inode.
6408	 */
6409	for (;;) {
6410		totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6411		numblks -= NINDIR(fs);
6412		if (numblks <= 0)
6413			break;
6414		numblks = howmany(numblks, NINDIR(fs));
6415	}
6416out:
6417	totblks = fsbtodb(fs, totblks);
6418	/*
6419	 * Handle sparse files.  We can't reclaim more blocks than the inode
6420	 * references.  We will correct it later in handle_complete_freeblks()
6421	 * when we know the real count.
6422	 */
6423	if (totblks > datablocks)
6424		return (0);
6425	return (datablocks - totblks);
6426}
6427
6428/*
6429 * Handle freeblocks for journaled softupdate filesystems.
6430 *
6431 * Contrary to normal softupdates, we must preserve the block pointers in
6432 * indirects until their subordinates are free.  This is to avoid journaling
6433 * every block that is freed which may consume more space than the journal
6434 * itself.  The recovery program will see the free block journals at the
6435 * base of the truncated area and traverse them to reclaim space.  The
6436 * pointers in the inode may be cleared immediately after the journal
6437 * records are written because each direct and indirect pointer in the
6438 * inode is recorded in a journal.  This permits full truncation to proceed
6439 * asynchronously.  The write order is journal -> inode -> cgs -> indirects.
6440 *
6441 * The algorithm is as follows:
6442 * 1) Traverse the in-memory state and create journal entries to release
6443 *    the relevant blocks and full indirect trees.
6444 * 2) Traverse the indirect block chain adding partial truncation freework
6445 *    records to indirects in the path to lastlbn.  The freework will
6446 *    prevent new allocation dependencies from being satisfied in this
6447 *    indirect until the truncation completes.
6448 * 3) Read and lock the inode block, performing an update with the new size
6449 *    and pointers.  This prevents truncated data from becoming valid on
6450 *    disk through step 4.
6451 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6452 *    eliminate journal work for those records that do not require it.
6453 * 5) Schedule the journal records to be written followed by the inode block.
6454 * 6) Allocate any necessary frags for the end of file.
6455 * 7) Zero any partially truncated blocks.
6456 *
6457 * From this truncation proceeds asynchronously using the freework and
6458 * indir_trunc machinery.  The file will not be extended again into a
6459 * partially truncated indirect block until all work is completed but
6460 * the normal dependency mechanism ensures that it is rolled back/forward
6461 * as appropriate.  Further truncation may occur without delay and is
6462 * serialized in indir_trunc().
6463 */
6464void
6465softdep_journal_freeblocks(ip, cred, length, flags)
6466	struct inode *ip;	/* The inode whose length is to be reduced */
6467	struct ucred *cred;
6468	off_t length;		/* The new length for the file */
6469	int flags;		/* IO_EXT and/or IO_NORMAL */
6470{
6471	struct freeblks *freeblks, *fbn;
6472	struct worklist *wk, *wkn;
6473	struct inodedep *inodedep;
6474	struct jblkdep *jblkdep;
6475	struct allocdirect *adp, *adpn;
6476	struct ufsmount *ump;
6477	struct fs *fs;
6478	struct buf *bp;
6479	struct vnode *vp;
6480	struct mount *mp;
6481	ufs2_daddr_t extblocks, datablocks;
6482	ufs_lbn_t tmpval, lbn, lastlbn;
6483	int frags, lastoff, iboff, allocblock, needj, error, i;
6484
6485	fs = ip->i_fs;
6486	ump = ip->i_ump;
6487	mp = UFSTOVFS(ump);
6488	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6489	    ("softdep_journal_freeblocks called on non-softdep filesystem"));
6490	vp = ITOV(ip);
6491	needj = 1;
6492	iboff = -1;
6493	allocblock = 0;
6494	extblocks = 0;
6495	datablocks = 0;
6496	frags = 0;
6497	freeblks = newfreeblks(mp, ip);
6498	ACQUIRE_LOCK(ump);
6499	/*
6500	 * If we're truncating a removed file that will never be written
6501	 * we don't need to journal the block frees.  The canceled journals
6502	 * for the allocations will suffice.
6503	 */
6504	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6505	if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6506	    length == 0)
6507		needj = 0;
6508	CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6509	    ip->i_number, length, needj);
6510	FREE_LOCK(ump);
6511	/*
6512	 * Calculate the lbn that we are truncating to.  This results in -1
6513	 * if we're truncating the 0 bytes.  So it is the last lbn we want
6514	 * to keep, not the first lbn we want to truncate.
6515	 */
6516	lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6517	lastoff = blkoff(fs, length);
6518	/*
6519	 * Compute frags we are keeping in lastlbn.  0 means all.
6520	 */
6521	if (lastlbn >= 0 && lastlbn < NDADDR) {
6522		frags = fragroundup(fs, lastoff);
6523		/* adp offset of last valid allocdirect. */
6524		iboff = lastlbn;
6525	} else if (lastlbn > 0)
6526		iboff = NDADDR;
6527	if (fs->fs_magic == FS_UFS2_MAGIC)
6528		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6529	/*
6530	 * Handle normal data blocks and indirects.  This section saves
6531	 * values used after the inode update to complete frag and indirect
6532	 * truncation.
6533	 */
6534	if ((flags & IO_NORMAL) != 0) {
6535		/*
6536		 * Handle truncation of whole direct and indirect blocks.
6537		 */
6538		for (i = iboff + 1; i < NDADDR; i++)
6539			setup_freedirect(freeblks, ip, i, needj);
6540		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6541		    i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6542			/* Release a whole indirect tree. */
6543			if (lbn > lastlbn) {
6544				setup_freeindir(freeblks, ip, i, -lbn -i,
6545				    needj);
6546				continue;
6547			}
6548			iboff = i + NDADDR;
6549			/*
6550			 * Traverse partially truncated indirect tree.
6551			 */
6552			if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6553				setup_trunc_indir(freeblks, ip, -lbn - i,
6554				    lastlbn, DIP(ip, i_ib[i]));
6555		}
6556		/*
6557		 * Handle partial truncation to a frag boundary.
6558		 */
6559		if (frags) {
6560			ufs2_daddr_t blkno;
6561			long oldfrags;
6562
6563			oldfrags = blksize(fs, ip, lastlbn);
6564			blkno = DIP(ip, i_db[lastlbn]);
6565			if (blkno && oldfrags != frags) {
6566				oldfrags -= frags;
6567				oldfrags = numfrags(ip->i_fs, oldfrags);
6568				blkno += numfrags(ip->i_fs, frags);
6569				newfreework(ump, freeblks, NULL, lastlbn,
6570				    blkno, oldfrags, 0, needj);
6571				if (needj)
6572					adjust_newfreework(freeblks,
6573					    numfrags(ip->i_fs, frags));
6574			} else if (blkno == 0)
6575				allocblock = 1;
6576		}
6577		/*
6578		 * Add a journal record for partial truncate if we are
6579		 * handling indirect blocks.  Non-indirects need no extra
6580		 * journaling.
6581		 */
6582		if (length != 0 && lastlbn >= NDADDR) {
6583			ip->i_flag |= IN_TRUNCATED;
6584			newjtrunc(freeblks, length, 0);
6585		}
6586		ip->i_size = length;
6587		DIP_SET(ip, i_size, ip->i_size);
6588		datablocks = DIP(ip, i_blocks) - extblocks;
6589		if (length != 0)
6590			datablocks = blkcount(ip->i_fs, datablocks, length);
6591		freeblks->fb_len = length;
6592	}
6593	if ((flags & IO_EXT) != 0) {
6594		for (i = 0; i < NXADDR; i++)
6595			setup_freeext(freeblks, ip, i, needj);
6596		ip->i_din2->di_extsize = 0;
6597		datablocks += extblocks;
6598	}
6599#ifdef QUOTA
6600	/* Reference the quotas in case the block count is wrong in the end. */
6601	quotaref(vp, freeblks->fb_quota);
6602	(void) chkdq(ip, -datablocks, NOCRED, 0);
6603#endif
6604	freeblks->fb_chkcnt = -datablocks;
6605	UFS_LOCK(ump);
6606	fs->fs_pendingblocks += datablocks;
6607	UFS_UNLOCK(ump);
6608	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6609	/*
6610	 * Handle truncation of incomplete alloc direct dependencies.  We
6611	 * hold the inode block locked to prevent incomplete dependencies
6612	 * from reaching the disk while we are eliminating those that
6613	 * have been truncated.  This is a partially inlined ffs_update().
6614	 */
6615	ufs_itimes(vp);
6616	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6617	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6618	    (int)fs->fs_bsize, cred, &bp);
6619	if (error) {
6620		brelse(bp);
6621		softdep_error("softdep_journal_freeblocks", error);
6622		return;
6623	}
6624	if (bp->b_bufsize == fs->fs_bsize)
6625		bp->b_flags |= B_CLUSTEROK;
6626	softdep_update_inodeblock(ip, bp, 0);
6627	if (ump->um_fstype == UFS1)
6628		*((struct ufs1_dinode *)bp->b_data +
6629		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6630	else
6631		*((struct ufs2_dinode *)bp->b_data +
6632		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6633	ACQUIRE_LOCK(ump);
6634	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6635	if ((inodedep->id_state & IOSTARTED) != 0)
6636		panic("softdep_setup_freeblocks: inode busy");
6637	/*
6638	 * Add the freeblks structure to the list of operations that
6639	 * must await the zero'ed inode being written to disk. If we
6640	 * still have a bitmap dependency (needj), then the inode
6641	 * has never been written to disk, so we can process the
6642	 * freeblks below once we have deleted the dependencies.
6643	 */
6644	if (needj)
6645		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6646	else
6647		freeblks->fb_state |= COMPLETE;
6648	if ((flags & IO_NORMAL) != 0) {
6649		TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6650			if (adp->ad_offset > iboff)
6651				cancel_allocdirect(&inodedep->id_inoupdt, adp,
6652				    freeblks);
6653			/*
6654			 * Truncate the allocdirect.  We could eliminate
6655			 * or modify journal records as well.
6656			 */
6657			else if (adp->ad_offset == iboff && frags)
6658				adp->ad_newsize = frags;
6659		}
6660	}
6661	if ((flags & IO_EXT) != 0)
6662		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6663			cancel_allocdirect(&inodedep->id_extupdt, adp,
6664			    freeblks);
6665	/*
6666	 * Scan the bufwait list for newblock dependencies that will never
6667	 * make it to disk.
6668	 */
6669	LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6670		if (wk->wk_type != D_ALLOCDIRECT)
6671			continue;
6672		adp = WK_ALLOCDIRECT(wk);
6673		if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6674		    ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6675			cancel_jfreeblk(freeblks, adp->ad_newblkno);
6676			cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6677			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6678		}
6679	}
6680	/*
6681	 * Add journal work.
6682	 */
6683	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6684		add_to_journal(&jblkdep->jb_list);
6685	FREE_LOCK(ump);
6686	bdwrite(bp);
6687	/*
6688	 * Truncate dependency structures beyond length.
6689	 */
6690	trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6691	/*
6692	 * This is only set when we need to allocate a fragment because
6693	 * none existed at the end of a frag-sized file.  It handles only
6694	 * allocating a new, zero filled block.
6695	 */
6696	if (allocblock) {
6697		ip->i_size = length - lastoff;
6698		DIP_SET(ip, i_size, ip->i_size);
6699		error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6700		if (error != 0) {
6701			softdep_error("softdep_journal_freeblks", error);
6702			return;
6703		}
6704		ip->i_size = length;
6705		DIP_SET(ip, i_size, length);
6706		ip->i_flag |= IN_CHANGE | IN_UPDATE;
6707		allocbuf(bp, frags);
6708		ffs_update(vp, 0);
6709		bawrite(bp);
6710	} else if (lastoff != 0 && vp->v_type != VDIR) {
6711		int size;
6712
6713		/*
6714		 * Zero the end of a truncated frag or block.
6715		 */
6716		size = sblksize(fs, length, lastlbn);
6717		error = bread(vp, lastlbn, size, cred, &bp);
6718		if (error) {
6719			softdep_error("softdep_journal_freeblks", error);
6720			return;
6721		}
6722		bzero((char *)bp->b_data + lastoff, size - lastoff);
6723		bawrite(bp);
6724
6725	}
6726	ACQUIRE_LOCK(ump);
6727	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6728	TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
6729	freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
6730	/*
6731	 * We zero earlier truncations so they don't erroneously
6732	 * update i_blocks.
6733	 */
6734	if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
6735		TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
6736			fbn->fb_len = 0;
6737	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
6738	    LIST_EMPTY(&freeblks->fb_jblkdephd))
6739		freeblks->fb_state |= INPROGRESS;
6740	else
6741		freeblks = NULL;
6742	FREE_LOCK(ump);
6743	if (freeblks)
6744		handle_workitem_freeblocks(freeblks, 0);
6745	trunc_pages(ip, length, extblocks, flags);
6746
6747}
6748
6749/*
6750 * Flush a JOP_SYNC to the journal.
6751 */
6752void
6753softdep_journal_fsync(ip)
6754	struct inode *ip;
6755{
6756	struct jfsync *jfsync;
6757
6758	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
6759	    ("softdep_journal_fsync called on non-softdep filesystem"));
6760	if ((ip->i_flag & IN_TRUNCATED) == 0)
6761		return;
6762	ip->i_flag &= ~IN_TRUNCATED;
6763	jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
6764	workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump));
6765	jfsync->jfs_size = ip->i_size;
6766	jfsync->jfs_ino = ip->i_number;
6767	ACQUIRE_LOCK(ip->i_ump);
6768	add_to_journal(&jfsync->jfs_list);
6769	jwait(&jfsync->jfs_list, MNT_WAIT);
6770	FREE_LOCK(ip->i_ump);
6771}
6772
6773/*
6774 * Block de-allocation dependencies.
6775 *
6776 * When blocks are de-allocated, the on-disk pointers must be nullified before
6777 * the blocks are made available for use by other files.  (The true
6778 * requirement is that old pointers must be nullified before new on-disk
6779 * pointers are set.  We chose this slightly more stringent requirement to
6780 * reduce complexity.) Our implementation handles this dependency by updating
6781 * the inode (or indirect block) appropriately but delaying the actual block
6782 * de-allocation (i.e., freemap and free space count manipulation) until
6783 * after the updated versions reach stable storage.  After the disk is
6784 * updated, the blocks can be safely de-allocated whenever it is convenient.
6785 * This implementation handles only the common case of reducing a file's
6786 * length to zero. Other cases are handled by the conventional synchronous
6787 * write approach.
6788 *
6789 * The ffs implementation with which we worked double-checks
6790 * the state of the block pointers and file size as it reduces
6791 * a file's length.  Some of this code is replicated here in our
6792 * soft updates implementation.  The freeblks->fb_chkcnt field is
6793 * used to transfer a part of this information to the procedure
6794 * that eventually de-allocates the blocks.
6795 *
6796 * This routine should be called from the routine that shortens
6797 * a file's length, before the inode's size or block pointers
6798 * are modified. It will save the block pointer information for
6799 * later release and zero the inode so that the calling routine
6800 * can release it.
6801 */
6802void
6803softdep_setup_freeblocks(ip, length, flags)
6804	struct inode *ip;	/* The inode whose length is to be reduced */
6805	off_t length;		/* The new length for the file */
6806	int flags;		/* IO_EXT and/or IO_NORMAL */
6807{
6808	struct ufs1_dinode *dp1;
6809	struct ufs2_dinode *dp2;
6810	struct freeblks *freeblks;
6811	struct inodedep *inodedep;
6812	struct allocdirect *adp;
6813	struct ufsmount *ump;
6814	struct buf *bp;
6815	struct fs *fs;
6816	ufs2_daddr_t extblocks, datablocks;
6817	struct mount *mp;
6818	int i, delay, error;
6819	ufs_lbn_t tmpval;
6820	ufs_lbn_t lbn;
6821
6822	ump = ip->i_ump;
6823	mp = UFSTOVFS(ump);
6824	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6825	    ("softdep_setup_freeblocks called on non-softdep filesystem"));
6826	CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
6827	    ip->i_number, length);
6828	KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length"));
6829	fs = ip->i_fs;
6830	freeblks = newfreeblks(mp, ip);
6831	extblocks = 0;
6832	datablocks = 0;
6833	if (fs->fs_magic == FS_UFS2_MAGIC)
6834		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6835	if ((flags & IO_NORMAL) != 0) {
6836		for (i = 0; i < NDADDR; i++)
6837			setup_freedirect(freeblks, ip, i, 0);
6838		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6839		    i++, lbn += tmpval, tmpval *= NINDIR(fs))
6840			setup_freeindir(freeblks, ip, i, -lbn -i, 0);
6841		ip->i_size = 0;
6842		DIP_SET(ip, i_size, 0);
6843		datablocks = DIP(ip, i_blocks) - extblocks;
6844	}
6845	if ((flags & IO_EXT) != 0) {
6846		for (i = 0; i < NXADDR; i++)
6847			setup_freeext(freeblks, ip, i, 0);
6848		ip->i_din2->di_extsize = 0;
6849		datablocks += extblocks;
6850	}
6851#ifdef QUOTA
6852	/* Reference the quotas in case the block count is wrong in the end. */
6853	quotaref(ITOV(ip), freeblks->fb_quota);
6854	(void) chkdq(ip, -datablocks, NOCRED, 0);
6855#endif
6856	freeblks->fb_chkcnt = -datablocks;
6857	UFS_LOCK(ump);
6858	fs->fs_pendingblocks += datablocks;
6859	UFS_UNLOCK(ump);
6860	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6861	/*
6862	 * Push the zero'ed inode to to its disk buffer so that we are free
6863	 * to delete its dependencies below. Once the dependencies are gone
6864	 * the buffer can be safely released.
6865	 */
6866	if ((error = bread(ip->i_devvp,
6867	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6868	    (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
6869		brelse(bp);
6870		softdep_error("softdep_setup_freeblocks", error);
6871	}
6872	if (ump->um_fstype == UFS1) {
6873		dp1 = ((struct ufs1_dinode *)bp->b_data +
6874		    ino_to_fsbo(fs, ip->i_number));
6875		ip->i_din1->di_freelink = dp1->di_freelink;
6876		*dp1 = *ip->i_din1;
6877	} else {
6878		dp2 = ((struct ufs2_dinode *)bp->b_data +
6879		    ino_to_fsbo(fs, ip->i_number));
6880		ip->i_din2->di_freelink = dp2->di_freelink;
6881		*dp2 = *ip->i_din2;
6882	}
6883	/*
6884	 * Find and eliminate any inode dependencies.
6885	 */
6886	ACQUIRE_LOCK(ump);
6887	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6888	if ((inodedep->id_state & IOSTARTED) != 0)
6889		panic("softdep_setup_freeblocks: inode busy");
6890	/*
6891	 * Add the freeblks structure to the list of operations that
6892	 * must await the zero'ed inode being written to disk. If we
6893	 * still have a bitmap dependency (delay == 0), then the inode
6894	 * has never been written to disk, so we can process the
6895	 * freeblks below once we have deleted the dependencies.
6896	 */
6897	delay = (inodedep->id_state & DEPCOMPLETE);
6898	if (delay)
6899		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6900	else
6901		freeblks->fb_state |= COMPLETE;
6902	/*
6903	 * Because the file length has been truncated to zero, any
6904	 * pending block allocation dependency structures associated
6905	 * with this inode are obsolete and can simply be de-allocated.
6906	 * We must first merge the two dependency lists to get rid of
6907	 * any duplicate freefrag structures, then purge the merged list.
6908	 * If we still have a bitmap dependency, then the inode has never
6909	 * been written to disk, so we can free any fragments without delay.
6910	 */
6911	if (flags & IO_NORMAL) {
6912		merge_inode_lists(&inodedep->id_newinoupdt,
6913		    &inodedep->id_inoupdt);
6914		while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
6915			cancel_allocdirect(&inodedep->id_inoupdt, adp,
6916			    freeblks);
6917	}
6918	if (flags & IO_EXT) {
6919		merge_inode_lists(&inodedep->id_newextupdt,
6920		    &inodedep->id_extupdt);
6921		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6922			cancel_allocdirect(&inodedep->id_extupdt, adp,
6923			    freeblks);
6924	}
6925	FREE_LOCK(ump);
6926	bdwrite(bp);
6927	trunc_dependencies(ip, freeblks, -1, 0, flags);
6928	ACQUIRE_LOCK(ump);
6929	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
6930		(void) free_inodedep(inodedep);
6931	freeblks->fb_state |= DEPCOMPLETE;
6932	/*
6933	 * If the inode with zeroed block pointers is now on disk
6934	 * we can start freeing blocks.
6935	 */
6936	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
6937		freeblks->fb_state |= INPROGRESS;
6938	else
6939		freeblks = NULL;
6940	FREE_LOCK(ump);
6941	if (freeblks)
6942		handle_workitem_freeblocks(freeblks, 0);
6943	trunc_pages(ip, length, extblocks, flags);
6944}
6945
6946/*
6947 * Eliminate pages from the page cache that back parts of this inode and
6948 * adjust the vnode pager's idea of our size.  This prevents stale data
6949 * from hanging around in the page cache.
6950 */
6951static void
6952trunc_pages(ip, length, extblocks, flags)
6953	struct inode *ip;
6954	off_t length;
6955	ufs2_daddr_t extblocks;
6956	int flags;
6957{
6958	struct vnode *vp;
6959	struct fs *fs;
6960	ufs_lbn_t lbn;
6961	off_t end, extend;
6962
6963	vp = ITOV(ip);
6964	fs = ip->i_fs;
6965	extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
6966	if ((flags & IO_EXT) != 0)
6967		vn_pages_remove(vp, extend, 0);
6968	if ((flags & IO_NORMAL) == 0)
6969		return;
6970	BO_LOCK(&vp->v_bufobj);
6971	drain_output(vp);
6972	BO_UNLOCK(&vp->v_bufobj);
6973	/*
6974	 * The vnode pager eliminates file pages we eliminate indirects
6975	 * below.
6976	 */
6977	vnode_pager_setsize(vp, length);
6978	/*
6979	 * Calculate the end based on the last indirect we want to keep.  If
6980	 * the block extends into indirects we can just use the negative of
6981	 * its lbn.  Doubles and triples exist at lower numbers so we must
6982	 * be careful not to remove those, if they exist.  double and triple
6983	 * indirect lbns do not overlap with others so it is not important
6984	 * to verify how many levels are required.
6985	 */
6986	lbn = lblkno(fs, length);
6987	if (lbn >= NDADDR) {
6988		/* Calculate the virtual lbn of the triple indirect. */
6989		lbn = -lbn - (NIADDR - 1);
6990		end = OFF_TO_IDX(lblktosize(fs, lbn));
6991	} else
6992		end = extend;
6993	vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
6994}
6995
6996/*
6997 * See if the buf bp is in the range eliminated by truncation.
6998 */
6999static int
7000trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags)
7001	struct buf *bp;
7002	int *blkoffp;
7003	ufs_lbn_t lastlbn;
7004	int lastoff;
7005	int flags;
7006{
7007	ufs_lbn_t lbn;
7008
7009	*blkoffp = 0;
7010	/* Only match ext/normal blocks as appropriate. */
7011	if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
7012	    ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
7013		return (0);
7014	/* ALTDATA is always a full truncation. */
7015	if ((bp->b_xflags & BX_ALTDATA) != 0)
7016		return (1);
7017	/* -1 is full truncation. */
7018	if (lastlbn == -1)
7019		return (1);
7020	/*
7021	 * If this is a partial truncate we only want those
7022	 * blocks and indirect blocks that cover the range
7023	 * we're after.
7024	 */
7025	lbn = bp->b_lblkno;
7026	if (lbn < 0)
7027		lbn = -(lbn + lbn_level(lbn));
7028	if (lbn < lastlbn)
7029		return (0);
7030	/* Here we only truncate lblkno if it's partial. */
7031	if (lbn == lastlbn) {
7032		if (lastoff == 0)
7033			return (0);
7034		*blkoffp = lastoff;
7035	}
7036	return (1);
7037}
7038
7039/*
7040 * Eliminate any dependencies that exist in memory beyond lblkno:off
7041 */
7042static void
7043trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
7044	struct inode *ip;
7045	struct freeblks *freeblks;
7046	ufs_lbn_t lastlbn;
7047	int lastoff;
7048	int flags;
7049{
7050	struct bufobj *bo;
7051	struct vnode *vp;
7052	struct buf *bp;
7053	int blkoff;
7054
7055	/*
7056	 * We must wait for any I/O in progress to finish so that
7057	 * all potential buffers on the dirty list will be visible.
7058	 * Once they are all there, walk the list and get rid of
7059	 * any dependencies.
7060	 */
7061	vp = ITOV(ip);
7062	bo = &vp->v_bufobj;
7063	BO_LOCK(bo);
7064	drain_output(vp);
7065	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
7066		bp->b_vflags &= ~BV_SCANNED;
7067restart:
7068	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
7069		if (bp->b_vflags & BV_SCANNED)
7070			continue;
7071		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7072			bp->b_vflags |= BV_SCANNED;
7073			continue;
7074		}
7075		KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer"));
7076		if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL)
7077			goto restart;
7078		BO_UNLOCK(bo);
7079		if (deallocate_dependencies(bp, freeblks, blkoff))
7080			bqrelse(bp);
7081		else
7082			brelse(bp);
7083		BO_LOCK(bo);
7084		goto restart;
7085	}
7086	/*
7087	 * Now do the work of vtruncbuf while also matching indirect blocks.
7088	 */
7089	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
7090		bp->b_vflags &= ~BV_SCANNED;
7091cleanrestart:
7092	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
7093		if (bp->b_vflags & BV_SCANNED)
7094			continue;
7095		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7096			bp->b_vflags |= BV_SCANNED;
7097			continue;
7098		}
7099		if (BUF_LOCK(bp,
7100		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
7101		    BO_LOCKPTR(bo)) == ENOLCK) {
7102			BO_LOCK(bo);
7103			goto cleanrestart;
7104		}
7105		bp->b_vflags |= BV_SCANNED;
7106		bremfree(bp);
7107		if (blkoff != 0) {
7108			allocbuf(bp, blkoff);
7109			bqrelse(bp);
7110		} else {
7111			bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
7112			brelse(bp);
7113		}
7114		BO_LOCK(bo);
7115		goto cleanrestart;
7116	}
7117	drain_output(vp);
7118	BO_UNLOCK(bo);
7119}
7120
7121static int
7122cancel_pagedep(pagedep, freeblks, blkoff)
7123	struct pagedep *pagedep;
7124	struct freeblks *freeblks;
7125	int blkoff;
7126{
7127	struct jremref *jremref;
7128	struct jmvref *jmvref;
7129	struct dirrem *dirrem, *tmp;
7130	int i;
7131
7132	/*
7133	 * Copy any directory remove dependencies to the list
7134	 * to be processed after the freeblks proceeds.  If
7135	 * directory entry never made it to disk they
7136	 * can be dumped directly onto the work list.
7137	 */
7138	LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
7139		/* Skip this directory removal if it is intended to remain. */
7140		if (dirrem->dm_offset < blkoff)
7141			continue;
7142		/*
7143		 * If there are any dirrems we wait for the journal write
7144		 * to complete and then restart the buf scan as the lock
7145		 * has been dropped.
7146		 */
7147		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
7148			jwait(&jremref->jr_list, MNT_WAIT);
7149			return (ERESTART);
7150		}
7151		LIST_REMOVE(dirrem, dm_next);
7152		dirrem->dm_dirinum = pagedep->pd_ino;
7153		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
7154	}
7155	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
7156		jwait(&jmvref->jm_list, MNT_WAIT);
7157		return (ERESTART);
7158	}
7159	/*
7160	 * When we're partially truncating a pagedep we just want to flush
7161	 * journal entries and return.  There can not be any adds in the
7162	 * truncated portion of the directory and newblk must remain if
7163	 * part of the block remains.
7164	 */
7165	if (blkoff != 0) {
7166		struct diradd *dap;
7167
7168		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
7169			if (dap->da_offset > blkoff)
7170				panic("cancel_pagedep: diradd %p off %d > %d",
7171				    dap, dap->da_offset, blkoff);
7172		for (i = 0; i < DAHASHSZ; i++)
7173			LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
7174				if (dap->da_offset > blkoff)
7175					panic("cancel_pagedep: diradd %p off %d > %d",
7176					    dap, dap->da_offset, blkoff);
7177		return (0);
7178	}
7179	/*
7180	 * There should be no directory add dependencies present
7181	 * as the directory could not be truncated until all
7182	 * children were removed.
7183	 */
7184	KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
7185	    ("deallocate_dependencies: pendinghd != NULL"));
7186	for (i = 0; i < DAHASHSZ; i++)
7187		KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
7188		    ("deallocate_dependencies: diraddhd != NULL"));
7189	if ((pagedep->pd_state & NEWBLOCK) != 0)
7190		free_newdirblk(pagedep->pd_newdirblk);
7191	if (free_pagedep(pagedep) == 0)
7192		panic("Failed to free pagedep %p", pagedep);
7193	return (0);
7194}
7195
7196/*
7197 * Reclaim any dependency structures from a buffer that is about to
7198 * be reallocated to a new vnode. The buffer must be locked, thus,
7199 * no I/O completion operations can occur while we are manipulating
7200 * its associated dependencies. The mutex is held so that other I/O's
7201 * associated with related dependencies do not occur.
7202 */
7203static int
7204deallocate_dependencies(bp, freeblks, off)
7205	struct buf *bp;
7206	struct freeblks *freeblks;
7207	int off;
7208{
7209	struct indirdep *indirdep;
7210	struct pagedep *pagedep;
7211	struct worklist *wk, *wkn;
7212	struct ufsmount *ump;
7213
7214	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
7215		goto done;
7216	ump = VFSTOUFS(wk->wk_mp);
7217	ACQUIRE_LOCK(ump);
7218	LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7219		switch (wk->wk_type) {
7220		case D_INDIRDEP:
7221			indirdep = WK_INDIRDEP(wk);
7222			if (bp->b_lblkno >= 0 ||
7223			    bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7224				panic("deallocate_dependencies: not indir");
7225			cancel_indirdep(indirdep, bp, freeblks);
7226			continue;
7227
7228		case D_PAGEDEP:
7229			pagedep = WK_PAGEDEP(wk);
7230			if (cancel_pagedep(pagedep, freeblks, off)) {
7231				FREE_LOCK(ump);
7232				return (ERESTART);
7233			}
7234			continue;
7235
7236		case D_ALLOCINDIR:
7237			/*
7238			 * Simply remove the allocindir, we'll find it via
7239			 * the indirdep where we can clear pointers if
7240			 * needed.
7241			 */
7242			WORKLIST_REMOVE(wk);
7243			continue;
7244
7245		case D_FREEWORK:
7246			/*
7247			 * A truncation is waiting for the zero'd pointers
7248			 * to be written.  It can be freed when the freeblks
7249			 * is journaled.
7250			 */
7251			WORKLIST_REMOVE(wk);
7252			wk->wk_state |= ONDEPLIST;
7253			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7254			break;
7255
7256		case D_ALLOCDIRECT:
7257			if (off != 0)
7258				continue;
7259			/* FALLTHROUGH */
7260		default:
7261			panic("deallocate_dependencies: Unexpected type %s",
7262			    TYPENAME(wk->wk_type));
7263			/* NOTREACHED */
7264		}
7265	}
7266	FREE_LOCK(ump);
7267done:
7268	/*
7269	 * Don't throw away this buf, we were partially truncating and
7270	 * some deps may always remain.
7271	 */
7272	if (off) {
7273		allocbuf(bp, off);
7274		bp->b_vflags |= BV_SCANNED;
7275		return (EBUSY);
7276	}
7277	bp->b_flags |= B_INVAL | B_NOCACHE;
7278
7279	return (0);
7280}
7281
7282/*
7283 * An allocdirect is being canceled due to a truncate.  We must make sure
7284 * the journal entry is released in concert with the blkfree that releases
7285 * the storage.  Completed journal entries must not be released until the
7286 * space is no longer pointed to by the inode or in the bitmap.
7287 */
7288static void
7289cancel_allocdirect(adphead, adp, freeblks)
7290	struct allocdirectlst *adphead;
7291	struct allocdirect *adp;
7292	struct freeblks *freeblks;
7293{
7294	struct freework *freework;
7295	struct newblk *newblk;
7296	struct worklist *wk;
7297
7298	TAILQ_REMOVE(adphead, adp, ad_next);
7299	newblk = (struct newblk *)adp;
7300	freework = NULL;
7301	/*
7302	 * Find the correct freework structure.
7303	 */
7304	LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7305		if (wk->wk_type != D_FREEWORK)
7306			continue;
7307		freework = WK_FREEWORK(wk);
7308		if (freework->fw_blkno == newblk->nb_newblkno)
7309			break;
7310	}
7311	if (freework == NULL)
7312		panic("cancel_allocdirect: Freework not found");
7313	/*
7314	 * If a newblk exists at all we still have the journal entry that
7315	 * initiated the allocation so we do not need to journal the free.
7316	 */
7317	cancel_jfreeblk(freeblks, freework->fw_blkno);
7318	/*
7319	 * If the journal hasn't been written the jnewblk must be passed
7320	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
7321	 * this by linking the journal dependency into the freework to be
7322	 * freed when freework_freeblock() is called.  If the journal has
7323	 * been written we can simply reclaim the journal space when the
7324	 * freeblks work is complete.
7325	 */
7326	freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7327	    &freeblks->fb_jwork);
7328	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7329}
7330
7331
7332/*
7333 * Cancel a new block allocation.  May be an indirect or direct block.  We
7334 * remove it from various lists and return any journal record that needs to
7335 * be resolved by the caller.
7336 *
7337 * A special consideration is made for indirects which were never pointed
7338 * at on disk and will never be found once this block is released.
7339 */
7340static struct jnewblk *
7341cancel_newblk(newblk, wk, wkhd)
7342	struct newblk *newblk;
7343	struct worklist *wk;
7344	struct workhead *wkhd;
7345{
7346	struct jnewblk *jnewblk;
7347
7348	CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7349
7350	newblk->nb_state |= GOINGAWAY;
7351	/*
7352	 * Previously we traversed the completedhd on each indirdep
7353	 * attached to this newblk to cancel them and gather journal
7354	 * work.  Since we need only the oldest journal segment and
7355	 * the lowest point on the tree will always have the oldest
7356	 * journal segment we are free to release the segments
7357	 * of any subordinates and may leave the indirdep list to
7358	 * indirdep_complete() when this newblk is freed.
7359	 */
7360	if (newblk->nb_state & ONDEPLIST) {
7361		newblk->nb_state &= ~ONDEPLIST;
7362		LIST_REMOVE(newblk, nb_deps);
7363	}
7364	if (newblk->nb_state & ONWORKLIST)
7365		WORKLIST_REMOVE(&newblk->nb_list);
7366	/*
7367	 * If the journal entry hasn't been written we save a pointer to
7368	 * the dependency that frees it until it is written or the
7369	 * superseding operation completes.
7370	 */
7371	jnewblk = newblk->nb_jnewblk;
7372	if (jnewblk != NULL && wk != NULL) {
7373		newblk->nb_jnewblk = NULL;
7374		jnewblk->jn_dep = wk;
7375	}
7376	if (!LIST_EMPTY(&newblk->nb_jwork))
7377		jwork_move(wkhd, &newblk->nb_jwork);
7378	/*
7379	 * When truncating we must free the newdirblk early to remove
7380	 * the pagedep from the hash before returning.
7381	 */
7382	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7383		free_newdirblk(WK_NEWDIRBLK(wk));
7384	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7385		panic("cancel_newblk: extra newdirblk");
7386
7387	return (jnewblk);
7388}
7389
7390/*
7391 * Schedule the freefrag associated with a newblk to be released once
7392 * the pointers are written and the previous block is no longer needed.
7393 */
7394static void
7395newblk_freefrag(newblk)
7396	struct newblk *newblk;
7397{
7398	struct freefrag *freefrag;
7399
7400	if (newblk->nb_freefrag == NULL)
7401		return;
7402	freefrag = newblk->nb_freefrag;
7403	newblk->nb_freefrag = NULL;
7404	freefrag->ff_state |= COMPLETE;
7405	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7406		add_to_worklist(&freefrag->ff_list, 0);
7407}
7408
7409/*
7410 * Free a newblk. Generate a new freefrag work request if appropriate.
7411 * This must be called after the inode pointer and any direct block pointers
7412 * are valid or fully removed via truncate or frag extension.
7413 */
7414static void
7415free_newblk(newblk)
7416	struct newblk *newblk;
7417{
7418	struct indirdep *indirdep;
7419	struct worklist *wk;
7420
7421	KASSERT(newblk->nb_jnewblk == NULL,
7422	    ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk));
7423	KASSERT(newblk->nb_list.wk_type != D_NEWBLK,
7424	    ("free_newblk: unclaimed newblk"));
7425	LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp));
7426	newblk_freefrag(newblk);
7427	if (newblk->nb_state & ONDEPLIST)
7428		LIST_REMOVE(newblk, nb_deps);
7429	if (newblk->nb_state & ONWORKLIST)
7430		WORKLIST_REMOVE(&newblk->nb_list);
7431	LIST_REMOVE(newblk, nb_hash);
7432	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7433		free_newdirblk(WK_NEWDIRBLK(wk));
7434	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7435		panic("free_newblk: extra newdirblk");
7436	while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7437		indirdep_complete(indirdep);
7438	handle_jwork(&newblk->nb_jwork);
7439	WORKITEM_FREE(newblk, D_NEWBLK);
7440}
7441
7442/*
7443 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7444 * This routine must be called with splbio interrupts blocked.
7445 */
7446static void
7447free_newdirblk(newdirblk)
7448	struct newdirblk *newdirblk;
7449{
7450	struct pagedep *pagedep;
7451	struct diradd *dap;
7452	struct worklist *wk;
7453
7454	LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp));
7455	WORKLIST_REMOVE(&newdirblk->db_list);
7456	/*
7457	 * If the pagedep is still linked onto the directory buffer
7458	 * dependency chain, then some of the entries on the
7459	 * pd_pendinghd list may not be committed to disk yet. In
7460	 * this case, we will simply clear the NEWBLOCK flag and
7461	 * let the pd_pendinghd list be processed when the pagedep
7462	 * is next written. If the pagedep is no longer on the buffer
7463	 * dependency chain, then all the entries on the pd_pending
7464	 * list are committed to disk and we can free them here.
7465	 */
7466	pagedep = newdirblk->db_pagedep;
7467	pagedep->pd_state &= ~NEWBLOCK;
7468	if ((pagedep->pd_state & ONWORKLIST) == 0) {
7469		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7470			free_diradd(dap, NULL);
7471		/*
7472		 * If no dependencies remain, the pagedep will be freed.
7473		 */
7474		free_pagedep(pagedep);
7475	}
7476	/* Should only ever be one item in the list. */
7477	while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7478		WORKLIST_REMOVE(wk);
7479		handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7480	}
7481	WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7482}
7483
7484/*
7485 * Prepare an inode to be freed. The actual free operation is not
7486 * done until the zero'ed inode has been written to disk.
7487 */
7488void
7489softdep_freefile(pvp, ino, mode)
7490	struct vnode *pvp;
7491	ino_t ino;
7492	int mode;
7493{
7494	struct inode *ip = VTOI(pvp);
7495	struct inodedep *inodedep;
7496	struct freefile *freefile;
7497	struct freeblks *freeblks;
7498	struct ufsmount *ump;
7499
7500	ump = ip->i_ump;
7501	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7502	    ("softdep_freefile called on non-softdep filesystem"));
7503	/*
7504	 * This sets up the inode de-allocation dependency.
7505	 */
7506	freefile = malloc(sizeof(struct freefile),
7507		M_FREEFILE, M_SOFTDEP_FLAGS);
7508	workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7509	freefile->fx_mode = mode;
7510	freefile->fx_oldinum = ino;
7511	freefile->fx_devvp = ip->i_devvp;
7512	LIST_INIT(&freefile->fx_jwork);
7513	UFS_LOCK(ump);
7514	ip->i_fs->fs_pendinginodes += 1;
7515	UFS_UNLOCK(ump);
7516
7517	/*
7518	 * If the inodedep does not exist, then the zero'ed inode has
7519	 * been written to disk. If the allocated inode has never been
7520	 * written to disk, then the on-disk inode is zero'ed. In either
7521	 * case we can free the file immediately.  If the journal was
7522	 * canceled before being written the inode will never make it to
7523	 * disk and we must send the canceled journal entrys to
7524	 * ffs_freefile() to be cleared in conjunction with the bitmap.
7525	 * Any blocks waiting on the inode to write can be safely freed
7526	 * here as it will never been written.
7527	 */
7528	ACQUIRE_LOCK(ump);
7529	inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7530	if (inodedep) {
7531		/*
7532		 * Clear out freeblks that no longer need to reference
7533		 * this inode.
7534		 */
7535		while ((freeblks =
7536		    TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7537			TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7538			    fb_next);
7539			freeblks->fb_state &= ~ONDEPLIST;
7540		}
7541		/*
7542		 * Remove this inode from the unlinked list.
7543		 */
7544		if (inodedep->id_state & UNLINKED) {
7545			/*
7546			 * Save the journal work to be freed with the bitmap
7547			 * before we clear UNLINKED.  Otherwise it can be lost
7548			 * if the inode block is written.
7549			 */
7550			handle_bufwait(inodedep, &freefile->fx_jwork);
7551			clear_unlinked_inodedep(inodedep);
7552			/*
7553			 * Re-acquire inodedep as we've dropped the
7554			 * per-filesystem lock in clear_unlinked_inodedep().
7555			 */
7556			inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7557		}
7558	}
7559	if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7560		FREE_LOCK(ump);
7561		handle_workitem_freefile(freefile);
7562		return;
7563	}
7564	if ((inodedep->id_state & DEPCOMPLETE) == 0)
7565		inodedep->id_state |= GOINGAWAY;
7566	WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7567	FREE_LOCK(ump);
7568	if (ip->i_number == ino)
7569		ip->i_flag |= IN_MODIFIED;
7570}
7571
7572/*
7573 * Check to see if an inode has never been written to disk. If
7574 * so free the inodedep and return success, otherwise return failure.
7575 * This routine must be called with splbio interrupts blocked.
7576 *
7577 * If we still have a bitmap dependency, then the inode has never
7578 * been written to disk. Drop the dependency as it is no longer
7579 * necessary since the inode is being deallocated. We set the
7580 * ALLCOMPLETE flags since the bitmap now properly shows that the
7581 * inode is not allocated. Even if the inode is actively being
7582 * written, it has been rolled back to its zero'ed state, so we
7583 * are ensured that a zero inode is what is on the disk. For short
7584 * lived files, this change will usually result in removing all the
7585 * dependencies from the inode so that it can be freed immediately.
7586 */
7587static int
7588check_inode_unwritten(inodedep)
7589	struct inodedep *inodedep;
7590{
7591
7592	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7593
7594	if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7595	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7596	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7597	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7598	    !LIST_EMPTY(&inodedep->id_inowait) ||
7599	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7600	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7601	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7602	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7603	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7604	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7605	    inodedep->id_mkdiradd != NULL ||
7606	    inodedep->id_nlinkdelta != 0)
7607		return (0);
7608	/*
7609	 * Another process might be in initiate_write_inodeblock_ufs[12]
7610	 * trying to allocate memory without holding "Softdep Lock".
7611	 */
7612	if ((inodedep->id_state & IOSTARTED) != 0 &&
7613	    inodedep->id_savedino1 == NULL)
7614		return (0);
7615
7616	if (inodedep->id_state & ONDEPLIST)
7617		LIST_REMOVE(inodedep, id_deps);
7618	inodedep->id_state &= ~ONDEPLIST;
7619	inodedep->id_state |= ALLCOMPLETE;
7620	inodedep->id_bmsafemap = NULL;
7621	if (inodedep->id_state & ONWORKLIST)
7622		WORKLIST_REMOVE(&inodedep->id_list);
7623	if (inodedep->id_savedino1 != NULL) {
7624		free(inodedep->id_savedino1, M_SAVEDINO);
7625		inodedep->id_savedino1 = NULL;
7626	}
7627	if (free_inodedep(inodedep) == 0)
7628		panic("check_inode_unwritten: busy inode");
7629	return (1);
7630}
7631
7632static int
7633check_inodedep_free(inodedep)
7634	struct inodedep *inodedep;
7635{
7636
7637	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7638	if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7639	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7640	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7641	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7642	    !LIST_EMPTY(&inodedep->id_inowait) ||
7643	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7644	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7645	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7646	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7647	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7648	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7649	    inodedep->id_mkdiradd != NULL ||
7650	    inodedep->id_nlinkdelta != 0 ||
7651	    inodedep->id_savedino1 != NULL)
7652		return (0);
7653	return (1);
7654}
7655
7656/*
7657 * Try to free an inodedep structure. Return 1 if it could be freed.
7658 */
7659static int
7660free_inodedep(inodedep)
7661	struct inodedep *inodedep;
7662{
7663
7664	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7665	if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7666	    !check_inodedep_free(inodedep))
7667		return (0);
7668	if (inodedep->id_state & ONDEPLIST)
7669		LIST_REMOVE(inodedep, id_deps);
7670	LIST_REMOVE(inodedep, id_hash);
7671	WORKITEM_FREE(inodedep, D_INODEDEP);
7672	return (1);
7673}
7674
7675/*
7676 * Free the block referenced by a freework structure.  The parent freeblks
7677 * structure is released and completed when the final cg bitmap reaches
7678 * the disk.  This routine may be freeing a jnewblk which never made it to
7679 * disk in which case we do not have to wait as the operation is undone
7680 * in memory immediately.
7681 */
7682static void
7683freework_freeblock(freework)
7684	struct freework *freework;
7685{
7686	struct freeblks *freeblks;
7687	struct jnewblk *jnewblk;
7688	struct ufsmount *ump;
7689	struct workhead wkhd;
7690	struct fs *fs;
7691	int bsize;
7692	int needj;
7693
7694	ump = VFSTOUFS(freework->fw_list.wk_mp);
7695	LOCK_OWNED(ump);
7696	/*
7697	 * Handle partial truncate separately.
7698	 */
7699	if (freework->fw_indir) {
7700		complete_trunc_indir(freework);
7701		return;
7702	}
7703	freeblks = freework->fw_freeblks;
7704	fs = ump->um_fs;
7705	needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7706	bsize = lfragtosize(fs, freework->fw_frags);
7707	LIST_INIT(&wkhd);
7708	/*
7709	 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7710	 * on the indirblk hashtable and prevents premature freeing.
7711	 */
7712	freework->fw_state |= DEPCOMPLETE;
7713	/*
7714	 * SUJ needs to wait for the segment referencing freed indirect
7715	 * blocks to expire so that we know the checker will not confuse
7716	 * a re-allocated indirect block with its old contents.
7717	 */
7718	if (needj && freework->fw_lbn <= -NDADDR)
7719		indirblk_insert(freework);
7720	/*
7721	 * If we are canceling an existing jnewblk pass it to the free
7722	 * routine, otherwise pass the freeblk which will ultimately
7723	 * release the freeblks.  If we're not journaling, we can just
7724	 * free the freeblks immediately.
7725	 */
7726	jnewblk = freework->fw_jnewblk;
7727	if (jnewblk != NULL) {
7728		cancel_jnewblk(jnewblk, &wkhd);
7729		needj = 0;
7730	} else if (needj) {
7731		freework->fw_state |= DELAYEDFREE;
7732		freeblks->fb_cgwait++;
7733		WORKLIST_INSERT(&wkhd, &freework->fw_list);
7734	}
7735	FREE_LOCK(ump);
7736	freeblks_free(ump, freeblks, btodb(bsize));
7737	CTR4(KTR_SUJ,
7738	    "freework_freeblock: ino %d blkno %jd lbn %jd size %ld",
7739	    freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
7740	ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
7741	    freeblks->fb_inum, freeblks->fb_vtype, &wkhd);
7742	ACQUIRE_LOCK(ump);
7743	/*
7744	 * The jnewblk will be discarded and the bits in the map never
7745	 * made it to disk.  We can immediately free the freeblk.
7746	 */
7747	if (needj == 0)
7748		handle_written_freework(freework);
7749}
7750
7751/*
7752 * We enqueue freework items that need processing back on the freeblks and
7753 * add the freeblks to the worklist.  This makes it easier to find all work
7754 * required to flush a truncation in process_truncates().
7755 */
7756static void
7757freework_enqueue(freework)
7758	struct freework *freework;
7759{
7760	struct freeblks *freeblks;
7761
7762	freeblks = freework->fw_freeblks;
7763	if ((freework->fw_state & INPROGRESS) == 0)
7764		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
7765	if ((freeblks->fb_state &
7766	    (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
7767	    LIST_EMPTY(&freeblks->fb_jblkdephd))
7768		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7769}
7770
7771/*
7772 * Start, continue, or finish the process of freeing an indirect block tree.
7773 * The free operation may be paused at any point with fw_off containing the
7774 * offset to restart from.  This enables us to implement some flow control
7775 * for large truncates which may fan out and generate a huge number of
7776 * dependencies.
7777 */
7778static void
7779handle_workitem_indirblk(freework)
7780	struct freework *freework;
7781{
7782	struct freeblks *freeblks;
7783	struct ufsmount *ump;
7784	struct fs *fs;
7785
7786	freeblks = freework->fw_freeblks;
7787	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7788	fs = ump->um_fs;
7789	if (freework->fw_state & DEPCOMPLETE) {
7790		handle_written_freework(freework);
7791		return;
7792	}
7793	if (freework->fw_off == NINDIR(fs)) {
7794		freework_freeblock(freework);
7795		return;
7796	}
7797	freework->fw_state |= INPROGRESS;
7798	FREE_LOCK(ump);
7799	indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
7800	    freework->fw_lbn);
7801	ACQUIRE_LOCK(ump);
7802}
7803
7804/*
7805 * Called when a freework structure attached to a cg buf is written.  The
7806 * ref on either the parent or the freeblks structure is released and
7807 * the freeblks is added back to the worklist if there is more work to do.
7808 */
7809static void
7810handle_written_freework(freework)
7811	struct freework *freework;
7812{
7813	struct freeblks *freeblks;
7814	struct freework *parent;
7815
7816	freeblks = freework->fw_freeblks;
7817	parent = freework->fw_parent;
7818	if (freework->fw_state & DELAYEDFREE)
7819		freeblks->fb_cgwait--;
7820	freework->fw_state |= COMPLETE;
7821	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
7822		WORKITEM_FREE(freework, D_FREEWORK);
7823	if (parent) {
7824		if (--parent->fw_ref == 0)
7825			freework_enqueue(parent);
7826		return;
7827	}
7828	if (--freeblks->fb_ref != 0)
7829		return;
7830	if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
7831	    ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
7832		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7833}
7834
7835/*
7836 * This workitem routine performs the block de-allocation.
7837 * The workitem is added to the pending list after the updated
7838 * inode block has been written to disk.  As mentioned above,
7839 * checks regarding the number of blocks de-allocated (compared
7840 * to the number of blocks allocated for the file) are also
7841 * performed in this function.
7842 */
7843static int
7844handle_workitem_freeblocks(freeblks, flags)
7845	struct freeblks *freeblks;
7846	int flags;
7847{
7848	struct freework *freework;
7849	struct newblk *newblk;
7850	struct allocindir *aip;
7851	struct ufsmount *ump;
7852	struct worklist *wk;
7853
7854	KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
7855	    ("handle_workitem_freeblocks: Journal entries not written."));
7856	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7857	ACQUIRE_LOCK(ump);
7858	while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
7859		WORKLIST_REMOVE(wk);
7860		switch (wk->wk_type) {
7861		case D_DIRREM:
7862			wk->wk_state |= COMPLETE;
7863			add_to_worklist(wk, 0);
7864			continue;
7865
7866		case D_ALLOCDIRECT:
7867			free_newblk(WK_NEWBLK(wk));
7868			continue;
7869
7870		case D_ALLOCINDIR:
7871			aip = WK_ALLOCINDIR(wk);
7872			freework = NULL;
7873			if (aip->ai_state & DELAYEDFREE) {
7874				FREE_LOCK(ump);
7875				freework = newfreework(ump, freeblks, NULL,
7876				    aip->ai_lbn, aip->ai_newblkno,
7877				    ump->um_fs->fs_frag, 0, 0);
7878				ACQUIRE_LOCK(ump);
7879			}
7880			newblk = WK_NEWBLK(wk);
7881			if (newblk->nb_jnewblk) {
7882				freework->fw_jnewblk = newblk->nb_jnewblk;
7883				newblk->nb_jnewblk->jn_dep = &freework->fw_list;
7884				newblk->nb_jnewblk = NULL;
7885			}
7886			free_newblk(newblk);
7887			continue;
7888
7889		case D_FREEWORK:
7890			freework = WK_FREEWORK(wk);
7891			if (freework->fw_lbn <= -NDADDR)
7892				handle_workitem_indirblk(freework);
7893			else
7894				freework_freeblock(freework);
7895			continue;
7896		default:
7897			panic("handle_workitem_freeblocks: Unknown type %s",
7898			    TYPENAME(wk->wk_type));
7899		}
7900	}
7901	if (freeblks->fb_ref != 0) {
7902		freeblks->fb_state &= ~INPROGRESS;
7903		wake_worklist(&freeblks->fb_list);
7904		freeblks = NULL;
7905	}
7906	FREE_LOCK(ump);
7907	if (freeblks)
7908		return handle_complete_freeblocks(freeblks, flags);
7909	return (0);
7910}
7911
7912/*
7913 * Handle completion of block free via truncate.  This allows fs_pending
7914 * to track the actual free block count more closely than if we only updated
7915 * it at the end.  We must be careful to handle cases where the block count
7916 * on free was incorrect.
7917 */
7918static void
7919freeblks_free(ump, freeblks, blocks)
7920	struct ufsmount *ump;
7921	struct freeblks *freeblks;
7922	int blocks;
7923{
7924	struct fs *fs;
7925	ufs2_daddr_t remain;
7926
7927	UFS_LOCK(ump);
7928	remain = -freeblks->fb_chkcnt;
7929	freeblks->fb_chkcnt += blocks;
7930	if (remain > 0) {
7931		if (remain < blocks)
7932			blocks = remain;
7933		fs = ump->um_fs;
7934		fs->fs_pendingblocks -= blocks;
7935	}
7936	UFS_UNLOCK(ump);
7937}
7938
7939/*
7940 * Once all of the freework workitems are complete we can retire the
7941 * freeblocks dependency and any journal work awaiting completion.  This
7942 * can not be called until all other dependencies are stable on disk.
7943 */
7944static int
7945handle_complete_freeblocks(freeblks, flags)
7946	struct freeblks *freeblks;
7947	int flags;
7948{
7949	struct inodedep *inodedep;
7950	struct inode *ip;
7951	struct vnode *vp;
7952	struct fs *fs;
7953	struct ufsmount *ump;
7954	ufs2_daddr_t spare;
7955
7956	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7957	fs = ump->um_fs;
7958	flags = LK_EXCLUSIVE | flags;
7959	spare = freeblks->fb_chkcnt;
7960
7961	/*
7962	 * If we did not release the expected number of blocks we may have
7963	 * to adjust the inode block count here.  Only do so if it wasn't
7964	 * a truncation to zero and the modrev still matches.
7965	 */
7966	if (spare && freeblks->fb_len != 0) {
7967		if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7968		    flags, &vp, FFSV_FORCEINSMQ) != 0)
7969			return (EBUSY);
7970		ip = VTOI(vp);
7971		if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
7972			DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
7973			ip->i_flag |= IN_CHANGE;
7974			/*
7975			 * We must wait so this happens before the
7976			 * journal is reclaimed.
7977			 */
7978			ffs_update(vp, 1);
7979		}
7980		vput(vp);
7981	}
7982	if (spare < 0) {
7983		UFS_LOCK(ump);
7984		fs->fs_pendingblocks += spare;
7985		UFS_UNLOCK(ump);
7986	}
7987#ifdef QUOTA
7988	/* Handle spare. */
7989	if (spare)
7990		quotaadj(freeblks->fb_quota, ump, -spare);
7991	quotarele(freeblks->fb_quota);
7992#endif
7993	ACQUIRE_LOCK(ump);
7994	if (freeblks->fb_state & ONDEPLIST) {
7995		inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7996		    0, &inodedep);
7997		TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
7998		freeblks->fb_state &= ~ONDEPLIST;
7999		if (TAILQ_EMPTY(&inodedep->id_freeblklst))
8000			free_inodedep(inodedep);
8001	}
8002	/*
8003	 * All of the freeblock deps must be complete prior to this call
8004	 * so it's now safe to complete earlier outstanding journal entries.
8005	 */
8006	handle_jwork(&freeblks->fb_jwork);
8007	WORKITEM_FREE(freeblks, D_FREEBLKS);
8008	FREE_LOCK(ump);
8009	return (0);
8010}
8011
8012/*
8013 * Release blocks associated with the freeblks and stored in the indirect
8014 * block dbn. If level is greater than SINGLE, the block is an indirect block
8015 * and recursive calls to indirtrunc must be used to cleanse other indirect
8016 * blocks.
8017 *
8018 * This handles partial and complete truncation of blocks.  Partial is noted
8019 * with goingaway == 0.  In this case the freework is completed after the
8020 * zero'd indirects are written to disk.  For full truncation the freework
8021 * is completed after the block is freed.
8022 */
8023static void
8024indir_trunc(freework, dbn, lbn)
8025	struct freework *freework;
8026	ufs2_daddr_t dbn;
8027	ufs_lbn_t lbn;
8028{
8029	struct freework *nfreework;
8030	struct workhead wkhd;
8031	struct freeblks *freeblks;
8032	struct buf *bp;
8033	struct fs *fs;
8034	struct indirdep *indirdep;
8035	struct ufsmount *ump;
8036	ufs1_daddr_t *bap1 = 0;
8037	ufs2_daddr_t nb, nnb, *bap2 = 0;
8038	ufs_lbn_t lbnadd, nlbn;
8039	int i, nblocks, ufs1fmt;
8040	int freedblocks;
8041	int goingaway;
8042	int freedeps;
8043	int needj;
8044	int level;
8045	int cnt;
8046
8047	freeblks = freework->fw_freeblks;
8048	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8049	fs = ump->um_fs;
8050	/*
8051	 * Get buffer of block pointers to be freed.  There are three cases:
8052	 *
8053	 * 1) Partial truncate caches the indirdep pointer in the freework
8054	 *    which provides us a back copy to the save bp which holds the
8055	 *    pointers we want to clear.  When this completes the zero
8056	 *    pointers are written to the real copy.
8057	 * 2) The indirect is being completely truncated, cancel_indirdep()
8058	 *    eliminated the real copy and placed the indirdep on the saved
8059	 *    copy.  The indirdep and buf are discarded when this completes.
8060	 * 3) The indirect was not in memory, we read a copy off of the disk
8061	 *    using the devvp and drop and invalidate the buffer when we're
8062	 *    done.
8063	 */
8064	goingaway = 1;
8065	indirdep = NULL;
8066	if (freework->fw_indir != NULL) {
8067		goingaway = 0;
8068		indirdep = freework->fw_indir;
8069		bp = indirdep->ir_savebp;
8070		if (bp == NULL || bp->b_blkno != dbn)
8071			panic("indir_trunc: Bad saved buf %p blkno %jd",
8072			    bp, (intmax_t)dbn);
8073	} else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
8074		/*
8075		 * The lock prevents the buf dep list from changing and
8076	 	 * indirects on devvp should only ever have one dependency.
8077		 */
8078		indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
8079		if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
8080			panic("indir_trunc: Bad indirdep %p from buf %p",
8081			    indirdep, bp);
8082	} else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize,
8083	    NOCRED, &bp) != 0) {
8084		brelse(bp);
8085		return;
8086	}
8087	ACQUIRE_LOCK(ump);
8088	/* Protects against a race with complete_trunc_indir(). */
8089	freework->fw_state &= ~INPROGRESS;
8090	/*
8091	 * If we have an indirdep we need to enforce the truncation order
8092	 * and discard it when it is complete.
8093	 */
8094	if (indirdep) {
8095		if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
8096		    !TAILQ_EMPTY(&indirdep->ir_trunc)) {
8097			/*
8098			 * Add the complete truncate to the list on the
8099			 * indirdep to enforce in-order processing.
8100			 */
8101			if (freework->fw_indir == NULL)
8102				TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
8103				    freework, fw_next);
8104			FREE_LOCK(ump);
8105			return;
8106		}
8107		/*
8108		 * If we're goingaway, free the indirdep.  Otherwise it will
8109		 * linger until the write completes.
8110		 */
8111		if (goingaway)
8112			free_indirdep(indirdep);
8113	}
8114	FREE_LOCK(ump);
8115	/* Initialize pointers depending on block size. */
8116	if (ump->um_fstype == UFS1) {
8117		bap1 = (ufs1_daddr_t *)bp->b_data;
8118		nb = bap1[freework->fw_off];
8119		ufs1fmt = 1;
8120	} else {
8121		bap2 = (ufs2_daddr_t *)bp->b_data;
8122		nb = bap2[freework->fw_off];
8123		ufs1fmt = 0;
8124	}
8125	level = lbn_level(lbn);
8126	needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
8127	lbnadd = lbn_offset(fs, level);
8128	nblocks = btodb(fs->fs_bsize);
8129	nfreework = freework;
8130	freedeps = 0;
8131	cnt = 0;
8132	/*
8133	 * Reclaim blocks.  Traverses into nested indirect levels and
8134	 * arranges for the current level to be freed when subordinates
8135	 * are free when journaling.
8136	 */
8137	for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
8138		if (i != NINDIR(fs) - 1) {
8139			if (ufs1fmt)
8140				nnb = bap1[i+1];
8141			else
8142				nnb = bap2[i+1];
8143		} else
8144			nnb = 0;
8145		if (nb == 0)
8146			continue;
8147		cnt++;
8148		if (level != 0) {
8149			nlbn = (lbn + 1) - (i * lbnadd);
8150			if (needj != 0) {
8151				nfreework = newfreework(ump, freeblks, freework,
8152				    nlbn, nb, fs->fs_frag, 0, 0);
8153				freedeps++;
8154			}
8155			indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
8156		} else {
8157			struct freedep *freedep;
8158
8159			/*
8160			 * Attempt to aggregate freedep dependencies for
8161			 * all blocks being released to the same CG.
8162			 */
8163			LIST_INIT(&wkhd);
8164			if (needj != 0 &&
8165			    (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
8166				freedep = newfreedep(freework);
8167				WORKLIST_INSERT_UNLOCKED(&wkhd,
8168				    &freedep->fd_list);
8169				freedeps++;
8170			}
8171			CTR3(KTR_SUJ,
8172			    "indir_trunc: ino %d blkno %jd size %ld",
8173			    freeblks->fb_inum, nb, fs->fs_bsize);
8174			ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
8175			    fs->fs_bsize, freeblks->fb_inum,
8176			    freeblks->fb_vtype, &wkhd);
8177		}
8178	}
8179	if (goingaway) {
8180		bp->b_flags |= B_INVAL | B_NOCACHE;
8181		brelse(bp);
8182	}
8183	freedblocks = 0;
8184	if (level == 0)
8185		freedblocks = (nblocks * cnt);
8186	if (needj == 0)
8187		freedblocks += nblocks;
8188	freeblks_free(ump, freeblks, freedblocks);
8189	/*
8190	 * If we are journaling set up the ref counts and offset so this
8191	 * indirect can be completed when its children are free.
8192	 */
8193	if (needj) {
8194		ACQUIRE_LOCK(ump);
8195		freework->fw_off = i;
8196		freework->fw_ref += freedeps;
8197		freework->fw_ref -= NINDIR(fs) + 1;
8198		if (level == 0)
8199			freeblks->fb_cgwait += freedeps;
8200		if (freework->fw_ref == 0)
8201			freework_freeblock(freework);
8202		FREE_LOCK(ump);
8203		return;
8204	}
8205	/*
8206	 * If we're not journaling we can free the indirect now.
8207	 */
8208	dbn = dbtofsb(fs, dbn);
8209	CTR3(KTR_SUJ,
8210	    "indir_trunc 2: ino %d blkno %jd size %ld",
8211	    freeblks->fb_inum, dbn, fs->fs_bsize);
8212	ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8213	    freeblks->fb_inum, freeblks->fb_vtype, NULL);
8214	/* Non SUJ softdep does single-threaded truncations. */
8215	if (freework->fw_blkno == dbn) {
8216		freework->fw_state |= ALLCOMPLETE;
8217		ACQUIRE_LOCK(ump);
8218		handle_written_freework(freework);
8219		FREE_LOCK(ump);
8220	}
8221	return;
8222}
8223
8224/*
8225 * Cancel an allocindir when it is removed via truncation.  When bp is not
8226 * NULL the indirect never appeared on disk and is scheduled to be freed
8227 * independently of the indir so we can more easily track journal work.
8228 */
8229static void
8230cancel_allocindir(aip, bp, freeblks, trunc)
8231	struct allocindir *aip;
8232	struct buf *bp;
8233	struct freeblks *freeblks;
8234	int trunc;
8235{
8236	struct indirdep *indirdep;
8237	struct freefrag *freefrag;
8238	struct newblk *newblk;
8239
8240	newblk = (struct newblk *)aip;
8241	LIST_REMOVE(aip, ai_next);
8242	/*
8243	 * We must eliminate the pointer in bp if it must be freed on its
8244	 * own due to partial truncate or pending journal work.
8245	 */
8246	if (bp && (trunc || newblk->nb_jnewblk)) {
8247		/*
8248		 * Clear the pointer and mark the aip to be freed
8249		 * directly if it never existed on disk.
8250		 */
8251		aip->ai_state |= DELAYEDFREE;
8252		indirdep = aip->ai_indirdep;
8253		if (indirdep->ir_state & UFS1FMT)
8254			((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8255		else
8256			((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8257	}
8258	/*
8259	 * When truncating the previous pointer will be freed via
8260	 * savedbp.  Eliminate the freefrag which would dup free.
8261	 */
8262	if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8263		newblk->nb_freefrag = NULL;
8264		if (freefrag->ff_jdep)
8265			cancel_jfreefrag(
8266			    WK_JFREEFRAG(freefrag->ff_jdep));
8267		jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8268		WORKITEM_FREE(freefrag, D_FREEFRAG);
8269	}
8270	/*
8271	 * If the journal hasn't been written the jnewblk must be passed
8272	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
8273	 * this by leaving the journal dependency on the newblk to be freed
8274	 * when a freework is created in handle_workitem_freeblocks().
8275	 */
8276	cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8277	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8278}
8279
8280/*
8281 * Create the mkdir dependencies for . and .. in a new directory.  Link them
8282 * in to a newdirblk so any subsequent additions are tracked properly.  The
8283 * caller is responsible for adding the mkdir1 dependency to the journal
8284 * and updating id_mkdiradd.  This function returns with the per-filesystem
8285 * lock held.
8286 */
8287static struct mkdir *
8288setup_newdir(dap, newinum, dinum, newdirbp, mkdirp)
8289	struct diradd *dap;
8290	ino_t newinum;
8291	ino_t dinum;
8292	struct buf *newdirbp;
8293	struct mkdir **mkdirp;
8294{
8295	struct newblk *newblk;
8296	struct pagedep *pagedep;
8297	struct inodedep *inodedep;
8298	struct newdirblk *newdirblk = 0;
8299	struct mkdir *mkdir1, *mkdir2;
8300	struct worklist *wk;
8301	struct jaddref *jaddref;
8302	struct ufsmount *ump;
8303	struct mount *mp;
8304
8305	mp = dap->da_list.wk_mp;
8306	ump = VFSTOUFS(mp);
8307	newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8308	    M_SOFTDEP_FLAGS);
8309	workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8310	LIST_INIT(&newdirblk->db_mkdir);
8311	mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8312	workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8313	mkdir1->md_state = ATTACHED | MKDIR_BODY;
8314	mkdir1->md_diradd = dap;
8315	mkdir1->md_jaddref = NULL;
8316	mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8317	workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8318	mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8319	mkdir2->md_diradd = dap;
8320	mkdir2->md_jaddref = NULL;
8321	if (MOUNTEDSUJ(mp) == 0) {
8322		mkdir1->md_state |= DEPCOMPLETE;
8323		mkdir2->md_state |= DEPCOMPLETE;
8324	}
8325	/*
8326	 * Dependency on "." and ".." being written to disk.
8327	 */
8328	mkdir1->md_buf = newdirbp;
8329	ACQUIRE_LOCK(VFSTOUFS(mp));
8330	LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs);
8331	/*
8332	 * We must link the pagedep, allocdirect, and newdirblk for
8333	 * the initial file page so the pointer to the new directory
8334	 * is not written until the directory contents are live and
8335	 * any subsequent additions are not marked live until the
8336	 * block is reachable via the inode.
8337	 */
8338	if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8339		panic("setup_newdir: lost pagedep");
8340	LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8341		if (wk->wk_type == D_ALLOCDIRECT)
8342			break;
8343	if (wk == NULL)
8344		panic("setup_newdir: lost allocdirect");
8345	if (pagedep->pd_state & NEWBLOCK)
8346		panic("setup_newdir: NEWBLOCK already set");
8347	newblk = WK_NEWBLK(wk);
8348	pagedep->pd_state |= NEWBLOCK;
8349	pagedep->pd_newdirblk = newdirblk;
8350	newdirblk->db_pagedep = pagedep;
8351	WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8352	WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8353	/*
8354	 * Look up the inodedep for the parent directory so that we
8355	 * can link mkdir2 into the pending dotdot jaddref or
8356	 * the inode write if there is none.  If the inode is
8357	 * ALLCOMPLETE and no jaddref is present all dependencies have
8358	 * been satisfied and mkdir2 can be freed.
8359	 */
8360	inodedep_lookup(mp, dinum, 0, &inodedep);
8361	if (MOUNTEDSUJ(mp)) {
8362		if (inodedep == NULL)
8363			panic("setup_newdir: Lost parent.");
8364		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8365		    inoreflst);
8366		KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8367		    (jaddref->ja_state & MKDIR_PARENT),
8368		    ("setup_newdir: bad dotdot jaddref %p", jaddref));
8369		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8370		mkdir2->md_jaddref = jaddref;
8371		jaddref->ja_mkdir = mkdir2;
8372	} else if (inodedep == NULL ||
8373	    (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8374		dap->da_state &= ~MKDIR_PARENT;
8375		WORKITEM_FREE(mkdir2, D_MKDIR);
8376		mkdir2 = NULL;
8377	} else {
8378		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8379		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8380	}
8381	*mkdirp = mkdir2;
8382
8383	return (mkdir1);
8384}
8385
8386/*
8387 * Directory entry addition dependencies.
8388 *
8389 * When adding a new directory entry, the inode (with its incremented link
8390 * count) must be written to disk before the directory entry's pointer to it.
8391 * Also, if the inode is newly allocated, the corresponding freemap must be
8392 * updated (on disk) before the directory entry's pointer. These requirements
8393 * are met via undo/redo on the directory entry's pointer, which consists
8394 * simply of the inode number.
8395 *
8396 * As directory entries are added and deleted, the free space within a
8397 * directory block can become fragmented.  The ufs filesystem will compact
8398 * a fragmented directory block to make space for a new entry. When this
8399 * occurs, the offsets of previously added entries change. Any "diradd"
8400 * dependency structures corresponding to these entries must be updated with
8401 * the new offsets.
8402 */
8403
8404/*
8405 * This routine is called after the in-memory inode's link
8406 * count has been incremented, but before the directory entry's
8407 * pointer to the inode has been set.
8408 */
8409int
8410softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
8411	struct buf *bp;		/* buffer containing directory block */
8412	struct inode *dp;	/* inode for directory */
8413	off_t diroffset;	/* offset of new entry in directory */
8414	ino_t newinum;		/* inode referenced by new directory entry */
8415	struct buf *newdirbp;	/* non-NULL => contents of new mkdir */
8416	int isnewblk;		/* entry is in a newly allocated block */
8417{
8418	int offset;		/* offset of new entry within directory block */
8419	ufs_lbn_t lbn;		/* block in directory containing new entry */
8420	struct fs *fs;
8421	struct diradd *dap;
8422	struct newblk *newblk;
8423	struct pagedep *pagedep;
8424	struct inodedep *inodedep;
8425	struct newdirblk *newdirblk = 0;
8426	struct mkdir *mkdir1, *mkdir2;
8427	struct jaddref *jaddref;
8428	struct ufsmount *ump;
8429	struct mount *mp;
8430	int isindir;
8431
8432	ump = dp->i_ump;
8433	mp = UFSTOVFS(ump);
8434	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8435	    ("softdep_setup_directory_add called on non-softdep filesystem"));
8436	/*
8437	 * Whiteouts have no dependencies.
8438	 */
8439	if (newinum == WINO) {
8440		if (newdirbp != NULL)
8441			bdwrite(newdirbp);
8442		return (0);
8443	}
8444	jaddref = NULL;
8445	mkdir1 = mkdir2 = NULL;
8446	fs = dp->i_fs;
8447	lbn = lblkno(fs, diroffset);
8448	offset = blkoff(fs, diroffset);
8449	dap = malloc(sizeof(struct diradd), M_DIRADD,
8450		M_SOFTDEP_FLAGS|M_ZERO);
8451	workitem_alloc(&dap->da_list, D_DIRADD, mp);
8452	dap->da_offset = offset;
8453	dap->da_newinum = newinum;
8454	dap->da_state = ATTACHED;
8455	LIST_INIT(&dap->da_jwork);
8456	isindir = bp->b_lblkno >= NDADDR;
8457	if (isnewblk &&
8458	    (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8459		newdirblk = malloc(sizeof(struct newdirblk),
8460		    M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8461		workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8462		LIST_INIT(&newdirblk->db_mkdir);
8463	}
8464	/*
8465	 * If we're creating a new directory setup the dependencies and set
8466	 * the dap state to wait for them.  Otherwise it's COMPLETE and
8467	 * we can move on.
8468	 */
8469	if (newdirbp == NULL) {
8470		dap->da_state |= DEPCOMPLETE;
8471		ACQUIRE_LOCK(ump);
8472	} else {
8473		dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8474		mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8475		    &mkdir2);
8476	}
8477	/*
8478	 * Link into parent directory pagedep to await its being written.
8479	 */
8480	pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8481#ifdef DEBUG
8482	if (diradd_lookup(pagedep, offset) != NULL)
8483		panic("softdep_setup_directory_add: %p already at off %d\n",
8484		    diradd_lookup(pagedep, offset), offset);
8485#endif
8486	dap->da_pagedep = pagedep;
8487	LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8488	    da_pdlist);
8489	inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
8490	/*
8491	 * If we're journaling, link the diradd into the jaddref so it
8492	 * may be completed after the journal entry is written.  Otherwise,
8493	 * link the diradd into its inodedep.  If the inode is not yet
8494	 * written place it on the bufwait list, otherwise do the post-inode
8495	 * write processing to put it on the id_pendinghd list.
8496	 */
8497	if (MOUNTEDSUJ(mp)) {
8498		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8499		    inoreflst);
8500		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8501		    ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8502		jaddref->ja_diroff = diroffset;
8503		jaddref->ja_diradd = dap;
8504		add_to_journal(&jaddref->ja_list);
8505	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8506		diradd_inode_written(dap, inodedep);
8507	else
8508		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8509	/*
8510	 * Add the journal entries for . and .. links now that the primary
8511	 * link is written.
8512	 */
8513	if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8514		jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8515		    inoreflst, if_deps);
8516		KASSERT(jaddref != NULL &&
8517		    jaddref->ja_ino == jaddref->ja_parent &&
8518		    (jaddref->ja_state & MKDIR_BODY),
8519		    ("softdep_setup_directory_add: bad dot jaddref %p",
8520		    jaddref));
8521		mkdir1->md_jaddref = jaddref;
8522		jaddref->ja_mkdir = mkdir1;
8523		/*
8524		 * It is important that the dotdot journal entry
8525		 * is added prior to the dot entry since dot writes
8526		 * both the dot and dotdot links.  These both must
8527		 * be added after the primary link for the journal
8528		 * to remain consistent.
8529		 */
8530		add_to_journal(&mkdir2->md_jaddref->ja_list);
8531		add_to_journal(&jaddref->ja_list);
8532	}
8533	/*
8534	 * If we are adding a new directory remember this diradd so that if
8535	 * we rename it we can keep the dot and dotdot dependencies.  If
8536	 * we are adding a new name for an inode that has a mkdiradd we
8537	 * must be in rename and we have to move the dot and dotdot
8538	 * dependencies to this new name.  The old name is being orphaned
8539	 * soon.
8540	 */
8541	if (mkdir1 != NULL) {
8542		if (inodedep->id_mkdiradd != NULL)
8543			panic("softdep_setup_directory_add: Existing mkdir");
8544		inodedep->id_mkdiradd = dap;
8545	} else if (inodedep->id_mkdiradd)
8546		merge_diradd(inodedep, dap);
8547	if (newdirblk) {
8548		/*
8549		 * There is nothing to do if we are already tracking
8550		 * this block.
8551		 */
8552		if ((pagedep->pd_state & NEWBLOCK) != 0) {
8553			WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8554			FREE_LOCK(ump);
8555			return (0);
8556		}
8557		if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8558		    == 0)
8559			panic("softdep_setup_directory_add: lost entry");
8560		WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8561		pagedep->pd_state |= NEWBLOCK;
8562		pagedep->pd_newdirblk = newdirblk;
8563		newdirblk->db_pagedep = pagedep;
8564		FREE_LOCK(ump);
8565		/*
8566		 * If we extended into an indirect signal direnter to sync.
8567		 */
8568		if (isindir)
8569			return (1);
8570		return (0);
8571	}
8572	FREE_LOCK(ump);
8573	return (0);
8574}
8575
8576/*
8577 * This procedure is called to change the offset of a directory
8578 * entry when compacting a directory block which must be owned
8579 * exclusively by the caller. Note that the actual entry movement
8580 * must be done in this procedure to ensure that no I/O completions
8581 * occur while the move is in progress.
8582 */
8583void
8584softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
8585	struct buf *bp;		/* Buffer holding directory block. */
8586	struct inode *dp;	/* inode for directory */
8587	caddr_t base;		/* address of dp->i_offset */
8588	caddr_t oldloc;		/* address of old directory location */
8589	caddr_t newloc;		/* address of new directory location */
8590	int entrysize;		/* size of directory entry */
8591{
8592	int offset, oldoffset, newoffset;
8593	struct pagedep *pagedep;
8594	struct jmvref *jmvref;
8595	struct diradd *dap;
8596	struct direct *de;
8597	struct mount *mp;
8598	ufs_lbn_t lbn;
8599	int flags;
8600
8601	mp = UFSTOVFS(dp->i_ump);
8602	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8603	    ("softdep_change_directoryentry_offset called on "
8604	     "non-softdep filesystem"));
8605	de = (struct direct *)oldloc;
8606	jmvref = NULL;
8607	flags = 0;
8608	/*
8609	 * Moves are always journaled as it would be too complex to
8610	 * determine if any affected adds or removes are present in the
8611	 * journal.
8612	 */
8613	if (MOUNTEDSUJ(mp)) {
8614		flags = DEPALLOC;
8615		jmvref = newjmvref(dp, de->d_ino,
8616		    dp->i_offset + (oldloc - base),
8617		    dp->i_offset + (newloc - base));
8618	}
8619	lbn = lblkno(dp->i_fs, dp->i_offset);
8620	offset = blkoff(dp->i_fs, dp->i_offset);
8621	oldoffset = offset + (oldloc - base);
8622	newoffset = offset + (newloc - base);
8623	ACQUIRE_LOCK(dp->i_ump);
8624	if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8625		goto done;
8626	dap = diradd_lookup(pagedep, oldoffset);
8627	if (dap) {
8628		dap->da_offset = newoffset;
8629		newoffset = DIRADDHASH(newoffset);
8630		oldoffset = DIRADDHASH(oldoffset);
8631		if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8632		    newoffset != oldoffset) {
8633			LIST_REMOVE(dap, da_pdlist);
8634			LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8635			    dap, da_pdlist);
8636		}
8637	}
8638done:
8639	if (jmvref) {
8640		jmvref->jm_pagedep = pagedep;
8641		LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8642		add_to_journal(&jmvref->jm_list);
8643	}
8644	bcopy(oldloc, newloc, entrysize);
8645	FREE_LOCK(dp->i_ump);
8646}
8647
8648/*
8649 * Move the mkdir dependencies and journal work from one diradd to another
8650 * when renaming a directory.  The new name must depend on the mkdir deps
8651 * completing as the old name did.  Directories can only have one valid link
8652 * at a time so one must be canonical.
8653 */
8654static void
8655merge_diradd(inodedep, newdap)
8656	struct inodedep *inodedep;
8657	struct diradd *newdap;
8658{
8659	struct diradd *olddap;
8660	struct mkdir *mkdir, *nextmd;
8661	struct ufsmount *ump;
8662	short state;
8663
8664	olddap = inodedep->id_mkdiradd;
8665	inodedep->id_mkdiradd = newdap;
8666	if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8667		newdap->da_state &= ~DEPCOMPLETE;
8668		ump = VFSTOUFS(inodedep->id_list.wk_mp);
8669		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8670		     mkdir = nextmd) {
8671			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8672			if (mkdir->md_diradd != olddap)
8673				continue;
8674			mkdir->md_diradd = newdap;
8675			state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8676			newdap->da_state |= state;
8677			olddap->da_state &= ~state;
8678			if ((olddap->da_state &
8679			    (MKDIR_PARENT | MKDIR_BODY)) == 0)
8680				break;
8681		}
8682		if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8683			panic("merge_diradd: unfound ref");
8684	}
8685	/*
8686	 * Any mkdir related journal items are not safe to be freed until
8687	 * the new name is stable.
8688	 */
8689	jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8690	olddap->da_state |= DEPCOMPLETE;
8691	complete_diradd(olddap);
8692}
8693
8694/*
8695 * Move the diradd to the pending list when all diradd dependencies are
8696 * complete.
8697 */
8698static void
8699complete_diradd(dap)
8700	struct diradd *dap;
8701{
8702	struct pagedep *pagedep;
8703
8704	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8705		if (dap->da_state & DIRCHG)
8706			pagedep = dap->da_previous->dm_pagedep;
8707		else
8708			pagedep = dap->da_pagedep;
8709		LIST_REMOVE(dap, da_pdlist);
8710		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8711	}
8712}
8713
8714/*
8715 * Cancel a diradd when a dirrem overlaps with it.  We must cancel the journal
8716 * add entries and conditonally journal the remove.
8717 */
8718static void
8719cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref)
8720	struct diradd *dap;
8721	struct dirrem *dirrem;
8722	struct jremref *jremref;
8723	struct jremref *dotremref;
8724	struct jremref *dotdotremref;
8725{
8726	struct inodedep *inodedep;
8727	struct jaddref *jaddref;
8728	struct inoref *inoref;
8729	struct ufsmount *ump;
8730	struct mkdir *mkdir;
8731
8732	/*
8733	 * If no remove references were allocated we're on a non-journaled
8734	 * filesystem and can skip the cancel step.
8735	 */
8736	if (jremref == NULL) {
8737		free_diradd(dap, NULL);
8738		return;
8739	}
8740	/*
8741	 * Cancel the primary name an free it if it does not require
8742	 * journaling.
8743	 */
8744	if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
8745	    0, &inodedep) != 0) {
8746		/* Abort the addref that reference this diradd.  */
8747		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
8748			if (inoref->if_list.wk_type != D_JADDREF)
8749				continue;
8750			jaddref = (struct jaddref *)inoref;
8751			if (jaddref->ja_diradd != dap)
8752				continue;
8753			if (cancel_jaddref(jaddref, inodedep,
8754			    &dirrem->dm_jwork) == 0) {
8755				free_jremref(jremref);
8756				jremref = NULL;
8757			}
8758			break;
8759		}
8760	}
8761	/*
8762	 * Cancel subordinate names and free them if they do not require
8763	 * journaling.
8764	 */
8765	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8766		ump = VFSTOUFS(dap->da_list.wk_mp);
8767		LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) {
8768			if (mkdir->md_diradd != dap)
8769				continue;
8770			if ((jaddref = mkdir->md_jaddref) == NULL)
8771				continue;
8772			mkdir->md_jaddref = NULL;
8773			if (mkdir->md_state & MKDIR_PARENT) {
8774				if (cancel_jaddref(jaddref, NULL,
8775				    &dirrem->dm_jwork) == 0) {
8776					free_jremref(dotdotremref);
8777					dotdotremref = NULL;
8778				}
8779			} else {
8780				if (cancel_jaddref(jaddref, inodedep,
8781				    &dirrem->dm_jwork) == 0) {
8782					free_jremref(dotremref);
8783					dotremref = NULL;
8784				}
8785			}
8786		}
8787	}
8788
8789	if (jremref)
8790		journal_jremref(dirrem, jremref, inodedep);
8791	if (dotremref)
8792		journal_jremref(dirrem, dotremref, inodedep);
8793	if (dotdotremref)
8794		journal_jremref(dirrem, dotdotremref, NULL);
8795	jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
8796	free_diradd(dap, &dirrem->dm_jwork);
8797}
8798
8799/*
8800 * Free a diradd dependency structure. This routine must be called
8801 * with splbio interrupts blocked.
8802 */
8803static void
8804free_diradd(dap, wkhd)
8805	struct diradd *dap;
8806	struct workhead *wkhd;
8807{
8808	struct dirrem *dirrem;
8809	struct pagedep *pagedep;
8810	struct inodedep *inodedep;
8811	struct mkdir *mkdir, *nextmd;
8812	struct ufsmount *ump;
8813
8814	ump = VFSTOUFS(dap->da_list.wk_mp);
8815	LOCK_OWNED(ump);
8816	LIST_REMOVE(dap, da_pdlist);
8817	if (dap->da_state & ONWORKLIST)
8818		WORKLIST_REMOVE(&dap->da_list);
8819	if ((dap->da_state & DIRCHG) == 0) {
8820		pagedep = dap->da_pagedep;
8821	} else {
8822		dirrem = dap->da_previous;
8823		pagedep = dirrem->dm_pagedep;
8824		dirrem->dm_dirinum = pagedep->pd_ino;
8825		dirrem->dm_state |= COMPLETE;
8826		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
8827			add_to_worklist(&dirrem->dm_list, 0);
8828	}
8829	if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
8830	    0, &inodedep) != 0)
8831		if (inodedep->id_mkdiradd == dap)
8832			inodedep->id_mkdiradd = NULL;
8833	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8834		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8835		     mkdir = nextmd) {
8836			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8837			if (mkdir->md_diradd != dap)
8838				continue;
8839			dap->da_state &=
8840			    ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
8841			LIST_REMOVE(mkdir, md_mkdirs);
8842			if (mkdir->md_state & ONWORKLIST)
8843				WORKLIST_REMOVE(&mkdir->md_list);
8844			if (mkdir->md_jaddref != NULL)
8845				panic("free_diradd: Unexpected jaddref");
8846			WORKITEM_FREE(mkdir, D_MKDIR);
8847			if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
8848				break;
8849		}
8850		if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8851			panic("free_diradd: unfound ref");
8852	}
8853	if (inodedep)
8854		free_inodedep(inodedep);
8855	/*
8856	 * Free any journal segments waiting for the directory write.
8857	 */
8858	handle_jwork(&dap->da_jwork);
8859	WORKITEM_FREE(dap, D_DIRADD);
8860}
8861
8862/*
8863 * Directory entry removal dependencies.
8864 *
8865 * When removing a directory entry, the entry's inode pointer must be
8866 * zero'ed on disk before the corresponding inode's link count is decremented
8867 * (possibly freeing the inode for re-use). This dependency is handled by
8868 * updating the directory entry but delaying the inode count reduction until
8869 * after the directory block has been written to disk. After this point, the
8870 * inode count can be decremented whenever it is convenient.
8871 */
8872
8873/*
8874 * This routine should be called immediately after removing
8875 * a directory entry.  The inode's link count should not be
8876 * decremented by the calling procedure -- the soft updates
8877 * code will do this task when it is safe.
8878 */
8879void
8880softdep_setup_remove(bp, dp, ip, isrmdir)
8881	struct buf *bp;		/* buffer containing directory block */
8882	struct inode *dp;	/* inode for the directory being modified */
8883	struct inode *ip;	/* inode for directory entry being removed */
8884	int isrmdir;		/* indicates if doing RMDIR */
8885{
8886	struct dirrem *dirrem, *prevdirrem;
8887	struct inodedep *inodedep;
8888	int direct;
8889
8890	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
8891	    ("softdep_setup_remove called on non-softdep filesystem"));
8892	/*
8893	 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.  We want
8894	 * newdirrem() to setup the full directory remove which requires
8895	 * isrmdir > 1.
8896	 */
8897	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
8898	/*
8899	 * Add the dirrem to the inodedep's pending remove list for quick
8900	 * discovery later.
8901	 */
8902	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8903	    &inodedep) == 0)
8904		panic("softdep_setup_remove: Lost inodedep.");
8905	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
8906	dirrem->dm_state |= ONDEPLIST;
8907	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
8908
8909	/*
8910	 * If the COMPLETE flag is clear, then there were no active
8911	 * entries and we want to roll back to a zeroed entry until
8912	 * the new inode is committed to disk. If the COMPLETE flag is
8913	 * set then we have deleted an entry that never made it to
8914	 * disk. If the entry we deleted resulted from a name change,
8915	 * then the old name still resides on disk. We cannot delete
8916	 * its inode (returned to us in prevdirrem) until the zeroed
8917	 * directory entry gets to disk. The new inode has never been
8918	 * referenced on the disk, so can be deleted immediately.
8919	 */
8920	if ((dirrem->dm_state & COMPLETE) == 0) {
8921		LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
8922		    dm_next);
8923		FREE_LOCK(ip->i_ump);
8924	} else {
8925		if (prevdirrem != NULL)
8926			LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
8927			    prevdirrem, dm_next);
8928		dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
8929		direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
8930		FREE_LOCK(ip->i_ump);
8931		if (direct)
8932			handle_workitem_remove(dirrem, 0);
8933	}
8934}
8935
8936/*
8937 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
8938 * pd_pendinghd list of a pagedep.
8939 */
8940static struct diradd *
8941diradd_lookup(pagedep, offset)
8942	struct pagedep *pagedep;
8943	int offset;
8944{
8945	struct diradd *dap;
8946
8947	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
8948		if (dap->da_offset == offset)
8949			return (dap);
8950	LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
8951		if (dap->da_offset == offset)
8952			return (dap);
8953	return (NULL);
8954}
8955
8956/*
8957 * Search for a .. diradd dependency in a directory that is being removed.
8958 * If the directory was renamed to a new parent we have a diradd rather
8959 * than a mkdir for the .. entry.  We need to cancel it now before
8960 * it is found in truncate().
8961 */
8962static struct jremref *
8963cancel_diradd_dotdot(ip, dirrem, jremref)
8964	struct inode *ip;
8965	struct dirrem *dirrem;
8966	struct jremref *jremref;
8967{
8968	struct pagedep *pagedep;
8969	struct diradd *dap;
8970	struct worklist *wk;
8971
8972	if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0,
8973	    &pagedep) == 0)
8974		return (jremref);
8975	dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
8976	if (dap == NULL)
8977		return (jremref);
8978	cancel_diradd(dap, dirrem, jremref, NULL, NULL);
8979	/*
8980	 * Mark any journal work as belonging to the parent so it is freed
8981	 * with the .. reference.
8982	 */
8983	LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
8984		wk->wk_state |= MKDIR_PARENT;
8985	return (NULL);
8986}
8987
8988/*
8989 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
8990 * replace it with a dirrem/diradd pair as a result of re-parenting a
8991 * directory.  This ensures that we don't simultaneously have a mkdir and
8992 * a diradd for the same .. entry.
8993 */
8994static struct jremref *
8995cancel_mkdir_dotdot(ip, dirrem, jremref)
8996	struct inode *ip;
8997	struct dirrem *dirrem;
8998	struct jremref *jremref;
8999{
9000	struct inodedep *inodedep;
9001	struct jaddref *jaddref;
9002	struct ufsmount *ump;
9003	struct mkdir *mkdir;
9004	struct diradd *dap;
9005
9006	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
9007	    &inodedep) == 0)
9008		return (jremref);
9009	dap = inodedep->id_mkdiradd;
9010	if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
9011		return (jremref);
9012	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9013	for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
9014	    mkdir = LIST_NEXT(mkdir, md_mkdirs))
9015		if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
9016			break;
9017	if (mkdir == NULL)
9018		panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
9019	if ((jaddref = mkdir->md_jaddref) != NULL) {
9020		mkdir->md_jaddref = NULL;
9021		jaddref->ja_state &= ~MKDIR_PARENT;
9022		if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0,
9023		    &inodedep) == 0)
9024			panic("cancel_mkdir_dotdot: Lost parent inodedep");
9025		if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
9026			journal_jremref(dirrem, jremref, inodedep);
9027			jremref = NULL;
9028		}
9029	}
9030	if (mkdir->md_state & ONWORKLIST)
9031		WORKLIST_REMOVE(&mkdir->md_list);
9032	mkdir->md_state |= ALLCOMPLETE;
9033	complete_mkdir(mkdir);
9034	return (jremref);
9035}
9036
9037static void
9038journal_jremref(dirrem, jremref, inodedep)
9039	struct dirrem *dirrem;
9040	struct jremref *jremref;
9041	struct inodedep *inodedep;
9042{
9043
9044	if (inodedep == NULL)
9045		if (inodedep_lookup(jremref->jr_list.wk_mp,
9046		    jremref->jr_ref.if_ino, 0, &inodedep) == 0)
9047			panic("journal_jremref: Lost inodedep");
9048	LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
9049	TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
9050	add_to_journal(&jremref->jr_list);
9051}
9052
9053static void
9054dirrem_journal(dirrem, jremref, dotremref, dotdotremref)
9055	struct dirrem *dirrem;
9056	struct jremref *jremref;
9057	struct jremref *dotremref;
9058	struct jremref *dotdotremref;
9059{
9060	struct inodedep *inodedep;
9061
9062
9063	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
9064	    &inodedep) == 0)
9065		panic("dirrem_journal: Lost inodedep");
9066	journal_jremref(dirrem, jremref, inodedep);
9067	if (dotremref)
9068		journal_jremref(dirrem, dotremref, inodedep);
9069	if (dotdotremref)
9070		journal_jremref(dirrem, dotdotremref, NULL);
9071}
9072
9073/*
9074 * Allocate a new dirrem if appropriate and return it along with
9075 * its associated pagedep. Called without a lock, returns with lock.
9076 */
9077static struct dirrem *
9078newdirrem(bp, dp, ip, isrmdir, prevdirremp)
9079	struct buf *bp;		/* buffer containing directory block */
9080	struct inode *dp;	/* inode for the directory being modified */
9081	struct inode *ip;	/* inode for directory entry being removed */
9082	int isrmdir;		/* indicates if doing RMDIR */
9083	struct dirrem **prevdirremp; /* previously referenced inode, if any */
9084{
9085	int offset;
9086	ufs_lbn_t lbn;
9087	struct diradd *dap;
9088	struct dirrem *dirrem;
9089	struct pagedep *pagedep;
9090	struct jremref *jremref;
9091	struct jremref *dotremref;
9092	struct jremref *dotdotremref;
9093	struct vnode *dvp;
9094
9095	/*
9096	 * Whiteouts have no deletion dependencies.
9097	 */
9098	if (ip == NULL)
9099		panic("newdirrem: whiteout");
9100	dvp = ITOV(dp);
9101	/*
9102	 * If the system is over its limit and our filesystem is
9103	 * responsible for more than our share of that usage and
9104	 * we are not a snapshot, request some inodedep cleanup.
9105	 * Limiting the number of dirrem structures will also limit
9106	 * the number of freefile and freeblks structures.
9107	 */
9108	ACQUIRE_LOCK(ip->i_ump);
9109	if (!IS_SNAPSHOT(ip) && softdep_excess_dirrem(ip->i_ump))
9110		schedule_cleanup(ITOV(dp)->v_mount);
9111	else
9112		FREE_LOCK(ip->i_ump);
9113	dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS |
9114	    M_ZERO);
9115	workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
9116	LIST_INIT(&dirrem->dm_jremrefhd);
9117	LIST_INIT(&dirrem->dm_jwork);
9118	dirrem->dm_state = isrmdir ? RMDIR : 0;
9119	dirrem->dm_oldinum = ip->i_number;
9120	*prevdirremp = NULL;
9121	/*
9122	 * Allocate remove reference structures to track journal write
9123	 * dependencies.  We will always have one for the link and
9124	 * when doing directories we will always have one more for dot.
9125	 * When renaming a directory we skip the dotdot link change so
9126	 * this is not needed.
9127	 */
9128	jremref = dotremref = dotdotremref = NULL;
9129	if (DOINGSUJ(dvp)) {
9130		if (isrmdir) {
9131			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
9132			    ip->i_effnlink + 2);
9133			dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
9134			    ip->i_effnlink + 1);
9135			dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
9136			    dp->i_effnlink + 1);
9137			dotdotremref->jr_state |= MKDIR_PARENT;
9138		} else
9139			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
9140			    ip->i_effnlink + 1);
9141	}
9142	ACQUIRE_LOCK(ip->i_ump);
9143	lbn = lblkno(dp->i_fs, dp->i_offset);
9144	offset = blkoff(dp->i_fs, dp->i_offset);
9145	pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC,
9146	    &pagedep);
9147	dirrem->dm_pagedep = pagedep;
9148	dirrem->dm_offset = offset;
9149	/*
9150	 * If we're renaming a .. link to a new directory, cancel any
9151	 * existing MKDIR_PARENT mkdir.  If it has already been canceled
9152	 * the jremref is preserved for any potential diradd in this
9153	 * location.  This can not coincide with a rmdir.
9154	 */
9155	if (dp->i_offset == DOTDOT_OFFSET) {
9156		if (isrmdir)
9157			panic("newdirrem: .. directory change during remove?");
9158		jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
9159	}
9160	/*
9161	 * If we're removing a directory search for the .. dependency now and
9162	 * cancel it.  Any pending journal work will be added to the dirrem
9163	 * to be completed when the workitem remove completes.
9164	 */
9165	if (isrmdir)
9166		dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
9167	/*
9168	 * Check for a diradd dependency for the same directory entry.
9169	 * If present, then both dependencies become obsolete and can
9170	 * be de-allocated.
9171	 */
9172	dap = diradd_lookup(pagedep, offset);
9173	if (dap == NULL) {
9174		/*
9175		 * Link the jremref structures into the dirrem so they are
9176		 * written prior to the pagedep.
9177		 */
9178		if (jremref)
9179			dirrem_journal(dirrem, jremref, dotremref,
9180			    dotdotremref);
9181		return (dirrem);
9182	}
9183	/*
9184	 * Must be ATTACHED at this point.
9185	 */
9186	if ((dap->da_state & ATTACHED) == 0)
9187		panic("newdirrem: not ATTACHED");
9188	if (dap->da_newinum != ip->i_number)
9189		panic("newdirrem: inum %ju should be %ju",
9190		    (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum);
9191	/*
9192	 * If we are deleting a changed name that never made it to disk,
9193	 * then return the dirrem describing the previous inode (which
9194	 * represents the inode currently referenced from this entry on disk).
9195	 */
9196	if ((dap->da_state & DIRCHG) != 0) {
9197		*prevdirremp = dap->da_previous;
9198		dap->da_state &= ~DIRCHG;
9199		dap->da_pagedep = pagedep;
9200	}
9201	/*
9202	 * We are deleting an entry that never made it to disk.
9203	 * Mark it COMPLETE so we can delete its inode immediately.
9204	 */
9205	dirrem->dm_state |= COMPLETE;
9206	cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
9207#ifdef SUJ_DEBUG
9208	if (isrmdir == 0) {
9209		struct worklist *wk;
9210
9211		LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9212			if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
9213				panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
9214	}
9215#endif
9216
9217	return (dirrem);
9218}
9219
9220/*
9221 * Directory entry change dependencies.
9222 *
9223 * Changing an existing directory entry requires that an add operation
9224 * be completed first followed by a deletion. The semantics for the addition
9225 * are identical to the description of adding a new entry above except
9226 * that the rollback is to the old inode number rather than zero. Once
9227 * the addition dependency is completed, the removal is done as described
9228 * in the removal routine above.
9229 */
9230
9231/*
9232 * This routine should be called immediately after changing
9233 * a directory entry.  The inode's link count should not be
9234 * decremented by the calling procedure -- the soft updates
9235 * code will perform this task when it is safe.
9236 */
9237void
9238softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
9239	struct buf *bp;		/* buffer containing directory block */
9240	struct inode *dp;	/* inode for the directory being modified */
9241	struct inode *ip;	/* inode for directory entry being removed */
9242	ino_t newinum;		/* new inode number for changed entry */
9243	int isrmdir;		/* indicates if doing RMDIR */
9244{
9245	int offset;
9246	struct diradd *dap = NULL;
9247	struct dirrem *dirrem, *prevdirrem;
9248	struct pagedep *pagedep;
9249	struct inodedep *inodedep;
9250	struct jaddref *jaddref;
9251	struct mount *mp;
9252
9253	offset = blkoff(dp->i_fs, dp->i_offset);
9254	mp = UFSTOVFS(dp->i_ump);
9255	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
9256	   ("softdep_setup_directory_change called on non-softdep filesystem"));
9257
9258	/*
9259	 * Whiteouts do not need diradd dependencies.
9260	 */
9261	if (newinum != WINO) {
9262		dap = malloc(sizeof(struct diradd),
9263		    M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9264		workitem_alloc(&dap->da_list, D_DIRADD, mp);
9265		dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9266		dap->da_offset = offset;
9267		dap->da_newinum = newinum;
9268		LIST_INIT(&dap->da_jwork);
9269	}
9270
9271	/*
9272	 * Allocate a new dirrem and ACQUIRE_LOCK.
9273	 */
9274	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9275	pagedep = dirrem->dm_pagedep;
9276	/*
9277	 * The possible values for isrmdir:
9278	 *	0 - non-directory file rename
9279	 *	1 - directory rename within same directory
9280	 *   inum - directory rename to new directory of given inode number
9281	 * When renaming to a new directory, we are both deleting and
9282	 * creating a new directory entry, so the link count on the new
9283	 * directory should not change. Thus we do not need the followup
9284	 * dirrem which is usually done in handle_workitem_remove. We set
9285	 * the DIRCHG flag to tell handle_workitem_remove to skip the
9286	 * followup dirrem.
9287	 */
9288	if (isrmdir > 1)
9289		dirrem->dm_state |= DIRCHG;
9290
9291	/*
9292	 * Whiteouts have no additional dependencies,
9293	 * so just put the dirrem on the correct list.
9294	 */
9295	if (newinum == WINO) {
9296		if ((dirrem->dm_state & COMPLETE) == 0) {
9297			LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9298			    dm_next);
9299		} else {
9300			dirrem->dm_dirinum = pagedep->pd_ino;
9301			if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9302				add_to_worklist(&dirrem->dm_list, 0);
9303		}
9304		FREE_LOCK(dp->i_ump);
9305		return;
9306	}
9307	/*
9308	 * Add the dirrem to the inodedep's pending remove list for quick
9309	 * discovery later.  A valid nlinkdelta ensures that this lookup
9310	 * will not fail.
9311	 */
9312	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9313		panic("softdep_setup_directory_change: Lost inodedep.");
9314	dirrem->dm_state |= ONDEPLIST;
9315	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9316
9317	/*
9318	 * If the COMPLETE flag is clear, then there were no active
9319	 * entries and we want to roll back to the previous inode until
9320	 * the new inode is committed to disk. If the COMPLETE flag is
9321	 * set, then we have deleted an entry that never made it to disk.
9322	 * If the entry we deleted resulted from a name change, then the old
9323	 * inode reference still resides on disk. Any rollback that we do
9324	 * needs to be to that old inode (returned to us in prevdirrem). If
9325	 * the entry we deleted resulted from a create, then there is
9326	 * no entry on the disk, so we want to roll back to zero rather
9327	 * than the uncommitted inode. In either of the COMPLETE cases we
9328	 * want to immediately free the unwritten and unreferenced inode.
9329	 */
9330	if ((dirrem->dm_state & COMPLETE) == 0) {
9331		dap->da_previous = dirrem;
9332	} else {
9333		if (prevdirrem != NULL) {
9334			dap->da_previous = prevdirrem;
9335		} else {
9336			dap->da_state &= ~DIRCHG;
9337			dap->da_pagedep = pagedep;
9338		}
9339		dirrem->dm_dirinum = pagedep->pd_ino;
9340		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9341			add_to_worklist(&dirrem->dm_list, 0);
9342	}
9343	/*
9344	 * Lookup the jaddref for this journal entry.  We must finish
9345	 * initializing it and make the diradd write dependent on it.
9346	 * If we're not journaling, put it on the id_bufwait list if the
9347	 * inode is not yet written. If it is written, do the post-inode
9348	 * write processing to put it on the id_pendinghd list.
9349	 */
9350	inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
9351	if (MOUNTEDSUJ(mp)) {
9352		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9353		    inoreflst);
9354		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9355		    ("softdep_setup_directory_change: bad jaddref %p",
9356		    jaddref));
9357		jaddref->ja_diroff = dp->i_offset;
9358		jaddref->ja_diradd = dap;
9359		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9360		    dap, da_pdlist);
9361		add_to_journal(&jaddref->ja_list);
9362	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9363		dap->da_state |= COMPLETE;
9364		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9365		WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9366	} else {
9367		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9368		    dap, da_pdlist);
9369		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9370	}
9371	/*
9372	 * If we're making a new name for a directory that has not been
9373	 * committed when need to move the dot and dotdot references to
9374	 * this new name.
9375	 */
9376	if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET)
9377		merge_diradd(inodedep, dap);
9378	FREE_LOCK(dp->i_ump);
9379}
9380
9381/*
9382 * Called whenever the link count on an inode is changed.
9383 * It creates an inode dependency so that the new reference(s)
9384 * to the inode cannot be committed to disk until the updated
9385 * inode has been written.
9386 */
9387void
9388softdep_change_linkcnt(ip)
9389	struct inode *ip;	/* the inode with the increased link count */
9390{
9391	struct inodedep *inodedep;
9392
9393	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
9394	    ("softdep_change_linkcnt called on non-softdep filesystem"));
9395	ACQUIRE_LOCK(ip->i_ump);
9396	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC,
9397	    &inodedep);
9398	if (ip->i_nlink < ip->i_effnlink)
9399		panic("softdep_change_linkcnt: bad delta");
9400	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9401	FREE_LOCK(ip->i_ump);
9402}
9403
9404/*
9405 * Attach a sbdep dependency to the superblock buf so that we can keep
9406 * track of the head of the linked list of referenced but unlinked inodes.
9407 */
9408void
9409softdep_setup_sbupdate(ump, fs, bp)
9410	struct ufsmount *ump;
9411	struct fs *fs;
9412	struct buf *bp;
9413{
9414	struct sbdep *sbdep;
9415	struct worklist *wk;
9416
9417	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9418	    ("softdep_setup_sbupdate called on non-softdep filesystem"));
9419	LIST_FOREACH(wk, &bp->b_dep, wk_list)
9420		if (wk->wk_type == D_SBDEP)
9421			break;
9422	if (wk != NULL)
9423		return;
9424	sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9425	workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9426	sbdep->sb_fs = fs;
9427	sbdep->sb_ump = ump;
9428	ACQUIRE_LOCK(ump);
9429	WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9430	FREE_LOCK(ump);
9431}
9432
9433/*
9434 * Return the first unlinked inodedep which is ready to be the head of the
9435 * list.  The inodedep and all those after it must have valid next pointers.
9436 */
9437static struct inodedep *
9438first_unlinked_inodedep(ump)
9439	struct ufsmount *ump;
9440{
9441	struct inodedep *inodedep;
9442	struct inodedep *idp;
9443
9444	LOCK_OWNED(ump);
9445	for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9446	    inodedep; inodedep = idp) {
9447		if ((inodedep->id_state & UNLINKNEXT) == 0)
9448			return (NULL);
9449		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9450		if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9451			break;
9452		if ((inodedep->id_state & UNLINKPREV) == 0)
9453			break;
9454	}
9455	return (inodedep);
9456}
9457
9458/*
9459 * Set the sujfree unlinked head pointer prior to writing a superblock.
9460 */
9461static void
9462initiate_write_sbdep(sbdep)
9463	struct sbdep *sbdep;
9464{
9465	struct inodedep *inodedep;
9466	struct fs *bpfs;
9467	struct fs *fs;
9468
9469	bpfs = sbdep->sb_fs;
9470	fs = sbdep->sb_ump->um_fs;
9471	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9472	if (inodedep) {
9473		fs->fs_sujfree = inodedep->id_ino;
9474		inodedep->id_state |= UNLINKPREV;
9475	} else
9476		fs->fs_sujfree = 0;
9477	bpfs->fs_sujfree = fs->fs_sujfree;
9478}
9479
9480/*
9481 * After a superblock is written determine whether it must be written again
9482 * due to a changing unlinked list head.
9483 */
9484static int
9485handle_written_sbdep(sbdep, bp)
9486	struct sbdep *sbdep;
9487	struct buf *bp;
9488{
9489	struct inodedep *inodedep;
9490	struct fs *fs;
9491
9492	LOCK_OWNED(sbdep->sb_ump);
9493	fs = sbdep->sb_fs;
9494	/*
9495	 * If the superblock doesn't match the in-memory list start over.
9496	 */
9497	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9498	if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9499	    (inodedep == NULL && fs->fs_sujfree != 0)) {
9500		bdirty(bp);
9501		return (1);
9502	}
9503	WORKITEM_FREE(sbdep, D_SBDEP);
9504	if (fs->fs_sujfree == 0)
9505		return (0);
9506	/*
9507	 * Now that we have a record of this inode in stable store allow it
9508	 * to be written to free up pending work.  Inodes may see a lot of
9509	 * write activity after they are unlinked which we must not hold up.
9510	 */
9511	for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9512		if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9513			panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9514			    inodedep, inodedep->id_state);
9515		if (inodedep->id_state & UNLINKONLIST)
9516			break;
9517		inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9518	}
9519
9520	return (0);
9521}
9522
9523/*
9524 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9525 */
9526static void
9527unlinked_inodedep(mp, inodedep)
9528	struct mount *mp;
9529	struct inodedep *inodedep;
9530{
9531	struct ufsmount *ump;
9532
9533	ump = VFSTOUFS(mp);
9534	LOCK_OWNED(ump);
9535	if (MOUNTEDSUJ(mp) == 0)
9536		return;
9537	ump->um_fs->fs_fmod = 1;
9538	if (inodedep->id_state & UNLINKED)
9539		panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9540	inodedep->id_state |= UNLINKED;
9541	TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9542}
9543
9544/*
9545 * Remove an inodedep from the unlinked inodedep list.  This may require
9546 * disk writes if the inode has made it that far.
9547 */
9548static void
9549clear_unlinked_inodedep(inodedep)
9550	struct inodedep *inodedep;
9551{
9552	struct ufsmount *ump;
9553	struct inodedep *idp;
9554	struct inodedep *idn;
9555	struct fs *fs;
9556	struct buf *bp;
9557	ino_t ino;
9558	ino_t nino;
9559	ino_t pino;
9560	int error;
9561
9562	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9563	fs = ump->um_fs;
9564	ino = inodedep->id_ino;
9565	error = 0;
9566	for (;;) {
9567		LOCK_OWNED(ump);
9568		KASSERT((inodedep->id_state & UNLINKED) != 0,
9569		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9570		    inodedep));
9571		/*
9572		 * If nothing has yet been written simply remove us from
9573		 * the in memory list and return.  This is the most common
9574		 * case where handle_workitem_remove() loses the final
9575		 * reference.
9576		 */
9577		if ((inodedep->id_state & UNLINKLINKS) == 0)
9578			break;
9579		/*
9580		 * If we have a NEXT pointer and no PREV pointer we can simply
9581		 * clear NEXT's PREV and remove ourselves from the list.  Be
9582		 * careful not to clear PREV if the superblock points at
9583		 * next as well.
9584		 */
9585		idn = TAILQ_NEXT(inodedep, id_unlinked);
9586		if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9587			if (idn && fs->fs_sujfree != idn->id_ino)
9588				idn->id_state &= ~UNLINKPREV;
9589			break;
9590		}
9591		/*
9592		 * Here we have an inodedep which is actually linked into
9593		 * the list.  We must remove it by forcing a write to the
9594		 * link before us, whether it be the superblock or an inode.
9595		 * Unfortunately the list may change while we're waiting
9596		 * on the buf lock for either resource so we must loop until
9597		 * we lock the right one.  If both the superblock and an
9598		 * inode point to this inode we must clear the inode first
9599		 * followed by the superblock.
9600		 */
9601		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9602		pino = 0;
9603		if (idp && (idp->id_state & UNLINKNEXT))
9604			pino = idp->id_ino;
9605		FREE_LOCK(ump);
9606		if (pino == 0) {
9607			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9608			    (int)fs->fs_sbsize, 0, 0, 0);
9609		} else {
9610			error = bread(ump->um_devvp,
9611			    fsbtodb(fs, ino_to_fsba(fs, pino)),
9612			    (int)fs->fs_bsize, NOCRED, &bp);
9613			if (error)
9614				brelse(bp);
9615		}
9616		ACQUIRE_LOCK(ump);
9617		if (error)
9618			break;
9619		/* If the list has changed restart the loop. */
9620		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9621		nino = 0;
9622		if (idp && (idp->id_state & UNLINKNEXT))
9623			nino = idp->id_ino;
9624		if (nino != pino ||
9625		    (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9626			FREE_LOCK(ump);
9627			brelse(bp);
9628			ACQUIRE_LOCK(ump);
9629			continue;
9630		}
9631		nino = 0;
9632		idn = TAILQ_NEXT(inodedep, id_unlinked);
9633		if (idn)
9634			nino = idn->id_ino;
9635		/*
9636		 * Remove us from the in memory list.  After this we cannot
9637		 * access the inodedep.
9638		 */
9639		KASSERT((inodedep->id_state & UNLINKED) != 0,
9640		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9641		    inodedep));
9642		inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9643		TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9644		FREE_LOCK(ump);
9645		/*
9646		 * The predecessor's next pointer is manually updated here
9647		 * so that the NEXT flag is never cleared for an element
9648		 * that is in the list.
9649		 */
9650		if (pino == 0) {
9651			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9652			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9653			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9654			    bp);
9655		} else if (fs->fs_magic == FS_UFS1_MAGIC)
9656			((struct ufs1_dinode *)bp->b_data +
9657			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9658		else
9659			((struct ufs2_dinode *)bp->b_data +
9660			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9661		/*
9662		 * If the bwrite fails we have no recourse to recover.  The
9663		 * filesystem is corrupted already.
9664		 */
9665		bwrite(bp);
9666		ACQUIRE_LOCK(ump);
9667		/*
9668		 * If the superblock pointer still needs to be cleared force
9669		 * a write here.
9670		 */
9671		if (fs->fs_sujfree == ino) {
9672			FREE_LOCK(ump);
9673			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9674			    (int)fs->fs_sbsize, 0, 0, 0);
9675			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9676			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9677			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9678			    bp);
9679			bwrite(bp);
9680			ACQUIRE_LOCK(ump);
9681		}
9682
9683		if (fs->fs_sujfree != ino)
9684			return;
9685		panic("clear_unlinked_inodedep: Failed to clear free head");
9686	}
9687	if (inodedep->id_ino == fs->fs_sujfree)
9688		panic("clear_unlinked_inodedep: Freeing head of free list");
9689	inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9690	TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9691	return;
9692}
9693
9694/*
9695 * This workitem decrements the inode's link count.
9696 * If the link count reaches zero, the file is removed.
9697 */
9698static int
9699handle_workitem_remove(dirrem, flags)
9700	struct dirrem *dirrem;
9701	int flags;
9702{
9703	struct inodedep *inodedep;
9704	struct workhead dotdotwk;
9705	struct worklist *wk;
9706	struct ufsmount *ump;
9707	struct mount *mp;
9708	struct vnode *vp;
9709	struct inode *ip;
9710	ino_t oldinum;
9711
9712	if (dirrem->dm_state & ONWORKLIST)
9713		panic("handle_workitem_remove: dirrem %p still on worklist",
9714		    dirrem);
9715	oldinum = dirrem->dm_oldinum;
9716	mp = dirrem->dm_list.wk_mp;
9717	ump = VFSTOUFS(mp);
9718	flags |= LK_EXCLUSIVE;
9719	if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0)
9720		return (EBUSY);
9721	ip = VTOI(vp);
9722	ACQUIRE_LOCK(ump);
9723	if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
9724		panic("handle_workitem_remove: lost inodedep");
9725	if (dirrem->dm_state & ONDEPLIST)
9726		LIST_REMOVE(dirrem, dm_inonext);
9727	KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
9728	    ("handle_workitem_remove:  Journal entries not written."));
9729
9730	/*
9731	 * Move all dependencies waiting on the remove to complete
9732	 * from the dirrem to the inode inowait list to be completed
9733	 * after the inode has been updated and written to disk.  Any
9734	 * marked MKDIR_PARENT are saved to be completed when the .. ref
9735	 * is removed.
9736	 */
9737	LIST_INIT(&dotdotwk);
9738	while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
9739		WORKLIST_REMOVE(wk);
9740		if (wk->wk_state & MKDIR_PARENT) {
9741			wk->wk_state &= ~MKDIR_PARENT;
9742			WORKLIST_INSERT(&dotdotwk, wk);
9743			continue;
9744		}
9745		WORKLIST_INSERT(&inodedep->id_inowait, wk);
9746	}
9747	LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
9748	/*
9749	 * Normal file deletion.
9750	 */
9751	if ((dirrem->dm_state & RMDIR) == 0) {
9752		ip->i_nlink--;
9753		DIP_SET(ip, i_nlink, ip->i_nlink);
9754		ip->i_flag |= IN_CHANGE;
9755		if (ip->i_nlink < ip->i_effnlink)
9756			panic("handle_workitem_remove: bad file delta");
9757		if (ip->i_nlink == 0)
9758			unlinked_inodedep(mp, inodedep);
9759		inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9760		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9761		    ("handle_workitem_remove: worklist not empty. %s",
9762		    TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
9763		WORKITEM_FREE(dirrem, D_DIRREM);
9764		FREE_LOCK(ump);
9765		goto out;
9766	}
9767	/*
9768	 * Directory deletion. Decrement reference count for both the
9769	 * just deleted parent directory entry and the reference for ".".
9770	 * Arrange to have the reference count on the parent decremented
9771	 * to account for the loss of "..".
9772	 */
9773	ip->i_nlink -= 2;
9774	DIP_SET(ip, i_nlink, ip->i_nlink);
9775	ip->i_flag |= IN_CHANGE;
9776	if (ip->i_nlink < ip->i_effnlink)
9777		panic("handle_workitem_remove: bad dir delta");
9778	if (ip->i_nlink == 0)
9779		unlinked_inodedep(mp, inodedep);
9780	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9781	/*
9782	 * Rename a directory to a new parent. Since, we are both deleting
9783	 * and creating a new directory entry, the link count on the new
9784	 * directory should not change. Thus we skip the followup dirrem.
9785	 */
9786	if (dirrem->dm_state & DIRCHG) {
9787		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9788		    ("handle_workitem_remove: DIRCHG and worklist not empty."));
9789		WORKITEM_FREE(dirrem, D_DIRREM);
9790		FREE_LOCK(ump);
9791		goto out;
9792	}
9793	dirrem->dm_state = ONDEPLIST;
9794	dirrem->dm_oldinum = dirrem->dm_dirinum;
9795	/*
9796	 * Place the dirrem on the parent's diremhd list.
9797	 */
9798	if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
9799		panic("handle_workitem_remove: lost dir inodedep");
9800	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9801	/*
9802	 * If the allocated inode has never been written to disk, then
9803	 * the on-disk inode is zero'ed and we can remove the file
9804	 * immediately.  When journaling if the inode has been marked
9805	 * unlinked and not DEPCOMPLETE we know it can never be written.
9806	 */
9807	inodedep_lookup(mp, oldinum, 0, &inodedep);
9808	if (inodedep == NULL ||
9809	    (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
9810	    check_inode_unwritten(inodedep)) {
9811		FREE_LOCK(ump);
9812		vput(vp);
9813		return handle_workitem_remove(dirrem, flags);
9814	}
9815	WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
9816	FREE_LOCK(ump);
9817	ip->i_flag |= IN_CHANGE;
9818out:
9819	ffs_update(vp, 0);
9820	vput(vp);
9821	return (0);
9822}
9823
9824/*
9825 * Inode de-allocation dependencies.
9826 *
9827 * When an inode's link count is reduced to zero, it can be de-allocated. We
9828 * found it convenient to postpone de-allocation until after the inode is
9829 * written to disk with its new link count (zero).  At this point, all of the
9830 * on-disk inode's block pointers are nullified and, with careful dependency
9831 * list ordering, all dependencies related to the inode will be satisfied and
9832 * the corresponding dependency structures de-allocated.  So, if/when the
9833 * inode is reused, there will be no mixing of old dependencies with new
9834 * ones.  This artificial dependency is set up by the block de-allocation
9835 * procedure above (softdep_setup_freeblocks) and completed by the
9836 * following procedure.
9837 */
9838static void
9839handle_workitem_freefile(freefile)
9840	struct freefile *freefile;
9841{
9842	struct workhead wkhd;
9843	struct fs *fs;
9844	struct inodedep *idp;
9845	struct ufsmount *ump;
9846	int error;
9847
9848	ump = VFSTOUFS(freefile->fx_list.wk_mp);
9849	fs = ump->um_fs;
9850#ifdef DEBUG
9851	ACQUIRE_LOCK(ump);
9852	error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
9853	FREE_LOCK(ump);
9854	if (error)
9855		panic("handle_workitem_freefile: inodedep %p survived", idp);
9856#endif
9857	UFS_LOCK(ump);
9858	fs->fs_pendinginodes -= 1;
9859	UFS_UNLOCK(ump);
9860	LIST_INIT(&wkhd);
9861	LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
9862	if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
9863	    freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
9864		softdep_error("handle_workitem_freefile", error);
9865	ACQUIRE_LOCK(ump);
9866	WORKITEM_FREE(freefile, D_FREEFILE);
9867	FREE_LOCK(ump);
9868}
9869
9870
9871/*
9872 * Helper function which unlinks marker element from work list and returns
9873 * the next element on the list.
9874 */
9875static __inline struct worklist *
9876markernext(struct worklist *marker)
9877{
9878	struct worklist *next;
9879
9880	next = LIST_NEXT(marker, wk_list);
9881	LIST_REMOVE(marker, wk_list);
9882	return next;
9883}
9884
9885/*
9886 * Disk writes.
9887 *
9888 * The dependency structures constructed above are most actively used when file
9889 * system blocks are written to disk.  No constraints are placed on when a
9890 * block can be written, but unsatisfied update dependencies are made safe by
9891 * modifying (or replacing) the source memory for the duration of the disk
9892 * write.  When the disk write completes, the memory block is again brought
9893 * up-to-date.
9894 *
9895 * In-core inode structure reclamation.
9896 *
9897 * Because there are a finite number of "in-core" inode structures, they are
9898 * reused regularly.  By transferring all inode-related dependencies to the
9899 * in-memory inode block and indexing them separately (via "inodedep"s), we
9900 * can allow "in-core" inode structures to be reused at any time and avoid
9901 * any increase in contention.
9902 *
9903 * Called just before entering the device driver to initiate a new disk I/O.
9904 * The buffer must be locked, thus, no I/O completion operations can occur
9905 * while we are manipulating its associated dependencies.
9906 */
9907static void
9908softdep_disk_io_initiation(bp)
9909	struct buf *bp;		/* structure describing disk write to occur */
9910{
9911	struct worklist *wk;
9912	struct worklist marker;
9913	struct inodedep *inodedep;
9914	struct freeblks *freeblks;
9915	struct jblkdep *jblkdep;
9916	struct newblk *newblk;
9917	struct ufsmount *ump;
9918
9919	/*
9920	 * We only care about write operations. There should never
9921	 * be dependencies for reads.
9922	 */
9923	if (bp->b_iocmd != BIO_WRITE)
9924		panic("softdep_disk_io_initiation: not write");
9925
9926	if (bp->b_vflags & BV_BKGRDINPROG)
9927		panic("softdep_disk_io_initiation: Writing buffer with "
9928		    "background write in progress: %p", bp);
9929
9930	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
9931		return;
9932	ump = VFSTOUFS(wk->wk_mp);
9933
9934	marker.wk_type = D_LAST + 1;	/* Not a normal workitem */
9935	PHOLD(curproc);			/* Don't swap out kernel stack */
9936	ACQUIRE_LOCK(ump);
9937	/*
9938	 * Do any necessary pre-I/O processing.
9939	 */
9940	for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
9941	     wk = markernext(&marker)) {
9942		LIST_INSERT_AFTER(wk, &marker, wk_list);
9943		switch (wk->wk_type) {
9944
9945		case D_PAGEDEP:
9946			initiate_write_filepage(WK_PAGEDEP(wk), bp);
9947			continue;
9948
9949		case D_INODEDEP:
9950			inodedep = WK_INODEDEP(wk);
9951			if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
9952				initiate_write_inodeblock_ufs1(inodedep, bp);
9953			else
9954				initiate_write_inodeblock_ufs2(inodedep, bp);
9955			continue;
9956
9957		case D_INDIRDEP:
9958			initiate_write_indirdep(WK_INDIRDEP(wk), bp);
9959			continue;
9960
9961		case D_BMSAFEMAP:
9962			initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
9963			continue;
9964
9965		case D_JSEG:
9966			WK_JSEG(wk)->js_buf = NULL;
9967			continue;
9968
9969		case D_FREEBLKS:
9970			freeblks = WK_FREEBLKS(wk);
9971			jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
9972			/*
9973			 * We have to wait for the freeblks to be journaled
9974			 * before we can write an inodeblock with updated
9975			 * pointers.  Be careful to arrange the marker so
9976			 * we revisit the freeblks if it's not removed by
9977			 * the first jwait().
9978			 */
9979			if (jblkdep != NULL) {
9980				LIST_REMOVE(&marker, wk_list);
9981				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9982				jwait(&jblkdep->jb_list, MNT_WAIT);
9983			}
9984			continue;
9985		case D_ALLOCDIRECT:
9986		case D_ALLOCINDIR:
9987			/*
9988			 * We have to wait for the jnewblk to be journaled
9989			 * before we can write to a block if the contents
9990			 * may be confused with an earlier file's indirect
9991			 * at recovery time.  Handle the marker as described
9992			 * above.
9993			 */
9994			newblk = WK_NEWBLK(wk);
9995			if (newblk->nb_jnewblk != NULL &&
9996			    indirblk_lookup(newblk->nb_list.wk_mp,
9997			    newblk->nb_newblkno)) {
9998				LIST_REMOVE(&marker, wk_list);
9999				LIST_INSERT_BEFORE(wk, &marker, wk_list);
10000				jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
10001			}
10002			continue;
10003
10004		case D_SBDEP:
10005			initiate_write_sbdep(WK_SBDEP(wk));
10006			continue;
10007
10008		case D_MKDIR:
10009		case D_FREEWORK:
10010		case D_FREEDEP:
10011		case D_JSEGDEP:
10012			continue;
10013
10014		default:
10015			panic("handle_disk_io_initiation: Unexpected type %s",
10016			    TYPENAME(wk->wk_type));
10017			/* NOTREACHED */
10018		}
10019	}
10020	FREE_LOCK(ump);
10021	PRELE(curproc);			/* Allow swapout of kernel stack */
10022}
10023
10024/*
10025 * Called from within the procedure above to deal with unsatisfied
10026 * allocation dependencies in a directory. The buffer must be locked,
10027 * thus, no I/O completion operations can occur while we are
10028 * manipulating its associated dependencies.
10029 */
10030static void
10031initiate_write_filepage(pagedep, bp)
10032	struct pagedep *pagedep;
10033	struct buf *bp;
10034{
10035	struct jremref *jremref;
10036	struct jmvref *jmvref;
10037	struct dirrem *dirrem;
10038	struct diradd *dap;
10039	struct direct *ep;
10040	int i;
10041
10042	if (pagedep->pd_state & IOSTARTED) {
10043		/*
10044		 * This can only happen if there is a driver that does not
10045		 * understand chaining. Here biodone will reissue the call
10046		 * to strategy for the incomplete buffers.
10047		 */
10048		printf("initiate_write_filepage: already started\n");
10049		return;
10050	}
10051	pagedep->pd_state |= IOSTARTED;
10052	/*
10053	 * Wait for all journal remove dependencies to hit the disk.
10054	 * We can not allow any potentially conflicting directory adds
10055	 * to be visible before removes and rollback is too difficult.
10056	 * The per-filesystem lock may be dropped and re-acquired, however
10057	 * we hold the buf locked so the dependency can not go away.
10058	 */
10059	LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
10060		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
10061			jwait(&jremref->jr_list, MNT_WAIT);
10062	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
10063		jwait(&jmvref->jm_list, MNT_WAIT);
10064	for (i = 0; i < DAHASHSZ; i++) {
10065		LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
10066			ep = (struct direct *)
10067			    ((char *)bp->b_data + dap->da_offset);
10068			if (ep->d_ino != dap->da_newinum)
10069				panic("%s: dir inum %ju != new %ju",
10070				    "initiate_write_filepage",
10071				    (uintmax_t)ep->d_ino,
10072				    (uintmax_t)dap->da_newinum);
10073			if (dap->da_state & DIRCHG)
10074				ep->d_ino = dap->da_previous->dm_oldinum;
10075			else
10076				ep->d_ino = 0;
10077			dap->da_state &= ~ATTACHED;
10078			dap->da_state |= UNDONE;
10079		}
10080	}
10081}
10082
10083/*
10084 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
10085 * Note that any bug fixes made to this routine must be done in the
10086 * version found below.
10087 *
10088 * Called from within the procedure above to deal with unsatisfied
10089 * allocation dependencies in an inodeblock. The buffer must be
10090 * locked, thus, no I/O completion operations can occur while we
10091 * are manipulating its associated dependencies.
10092 */
10093static void
10094initiate_write_inodeblock_ufs1(inodedep, bp)
10095	struct inodedep *inodedep;
10096	struct buf *bp;			/* The inode block */
10097{
10098	struct allocdirect *adp, *lastadp;
10099	struct ufs1_dinode *dp;
10100	struct ufs1_dinode *sip;
10101	struct inoref *inoref;
10102	struct ufsmount *ump;
10103	struct fs *fs;
10104	ufs_lbn_t i;
10105#ifdef INVARIANTS
10106	ufs_lbn_t prevlbn = 0;
10107#endif
10108	int deplist;
10109
10110	if (inodedep->id_state & IOSTARTED)
10111		panic("initiate_write_inodeblock_ufs1: already started");
10112	inodedep->id_state |= IOSTARTED;
10113	fs = inodedep->id_fs;
10114	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10115	LOCK_OWNED(ump);
10116	dp = (struct ufs1_dinode *)bp->b_data +
10117	    ino_to_fsbo(fs, inodedep->id_ino);
10118
10119	/*
10120	 * If we're on the unlinked list but have not yet written our
10121	 * next pointer initialize it here.
10122	 */
10123	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10124		struct inodedep *inon;
10125
10126		inon = TAILQ_NEXT(inodedep, id_unlinked);
10127		dp->di_freelink = inon ? inon->id_ino : 0;
10128	}
10129	/*
10130	 * If the bitmap is not yet written, then the allocated
10131	 * inode cannot be written to disk.
10132	 */
10133	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10134		if (inodedep->id_savedino1 != NULL)
10135			panic("initiate_write_inodeblock_ufs1: I/O underway");
10136		FREE_LOCK(ump);
10137		sip = malloc(sizeof(struct ufs1_dinode),
10138		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10139		ACQUIRE_LOCK(ump);
10140		inodedep->id_savedino1 = sip;
10141		*inodedep->id_savedino1 = *dp;
10142		bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
10143		dp->di_gen = inodedep->id_savedino1->di_gen;
10144		dp->di_freelink = inodedep->id_savedino1->di_freelink;
10145		return;
10146	}
10147	/*
10148	 * If no dependencies, then there is nothing to roll back.
10149	 */
10150	inodedep->id_savedsize = dp->di_size;
10151	inodedep->id_savedextsize = 0;
10152	inodedep->id_savednlink = dp->di_nlink;
10153	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10154	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10155		return;
10156	/*
10157	 * Revert the link count to that of the first unwritten journal entry.
10158	 */
10159	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10160	if (inoref)
10161		dp->di_nlink = inoref->if_nlink;
10162	/*
10163	 * Set the dependencies to busy.
10164	 */
10165	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10166	     adp = TAILQ_NEXT(adp, ad_next)) {
10167#ifdef INVARIANTS
10168		if (deplist != 0 && prevlbn >= adp->ad_offset)
10169			panic("softdep_write_inodeblock: lbn order");
10170		prevlbn = adp->ad_offset;
10171		if (adp->ad_offset < NDADDR &&
10172		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10173			panic("%s: direct pointer #%jd mismatch %d != %jd",
10174			    "softdep_write_inodeblock",
10175			    (intmax_t)adp->ad_offset,
10176			    dp->di_db[adp->ad_offset],
10177			    (intmax_t)adp->ad_newblkno);
10178		if (adp->ad_offset >= NDADDR &&
10179		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10180			panic("%s: indirect pointer #%jd mismatch %d != %jd",
10181			    "softdep_write_inodeblock",
10182			    (intmax_t)adp->ad_offset - NDADDR,
10183			    dp->di_ib[adp->ad_offset - NDADDR],
10184			    (intmax_t)adp->ad_newblkno);
10185		deplist |= 1 << adp->ad_offset;
10186		if ((adp->ad_state & ATTACHED) == 0)
10187			panic("softdep_write_inodeblock: Unknown state 0x%x",
10188			    adp->ad_state);
10189#endif /* INVARIANTS */
10190		adp->ad_state &= ~ATTACHED;
10191		adp->ad_state |= UNDONE;
10192	}
10193	/*
10194	 * The on-disk inode cannot claim to be any larger than the last
10195	 * fragment that has been written. Otherwise, the on-disk inode
10196	 * might have fragments that were not the last block in the file
10197	 * which would corrupt the filesystem.
10198	 */
10199	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10200	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10201		if (adp->ad_offset >= NDADDR)
10202			break;
10203		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10204		/* keep going until hitting a rollback to a frag */
10205		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10206			continue;
10207		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10208		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10209#ifdef INVARIANTS
10210			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10211				panic("softdep_write_inodeblock: lost dep1");
10212#endif /* INVARIANTS */
10213			dp->di_db[i] = 0;
10214		}
10215		for (i = 0; i < NIADDR; i++) {
10216#ifdef INVARIANTS
10217			if (dp->di_ib[i] != 0 &&
10218			    (deplist & ((1 << NDADDR) << i)) == 0)
10219				panic("softdep_write_inodeblock: lost dep2");
10220#endif /* INVARIANTS */
10221			dp->di_ib[i] = 0;
10222		}
10223		return;
10224	}
10225	/*
10226	 * If we have zero'ed out the last allocated block of the file,
10227	 * roll back the size to the last currently allocated block.
10228	 * We know that this last allocated block is a full-sized as
10229	 * we already checked for fragments in the loop above.
10230	 */
10231	if (lastadp != NULL &&
10232	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10233		for (i = lastadp->ad_offset; i >= 0; i--)
10234			if (dp->di_db[i] != 0)
10235				break;
10236		dp->di_size = (i + 1) * fs->fs_bsize;
10237	}
10238	/*
10239	 * The only dependencies are for indirect blocks.
10240	 *
10241	 * The file size for indirect block additions is not guaranteed.
10242	 * Such a guarantee would be non-trivial to achieve. The conventional
10243	 * synchronous write implementation also does not make this guarantee.
10244	 * Fsck should catch and fix discrepancies. Arguably, the file size
10245	 * can be over-estimated without destroying integrity when the file
10246	 * moves into the indirect blocks (i.e., is large). If we want to
10247	 * postpone fsck, we are stuck with this argument.
10248	 */
10249	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10250		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10251}
10252
10253/*
10254 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10255 * Note that any bug fixes made to this routine must be done in the
10256 * version found above.
10257 *
10258 * Called from within the procedure above to deal with unsatisfied
10259 * allocation dependencies in an inodeblock. The buffer must be
10260 * locked, thus, no I/O completion operations can occur while we
10261 * are manipulating its associated dependencies.
10262 */
10263static void
10264initiate_write_inodeblock_ufs2(inodedep, bp)
10265	struct inodedep *inodedep;
10266	struct buf *bp;			/* The inode block */
10267{
10268	struct allocdirect *adp, *lastadp;
10269	struct ufs2_dinode *dp;
10270	struct ufs2_dinode *sip;
10271	struct inoref *inoref;
10272	struct ufsmount *ump;
10273	struct fs *fs;
10274	ufs_lbn_t i;
10275#ifdef INVARIANTS
10276	ufs_lbn_t prevlbn = 0;
10277#endif
10278	int deplist;
10279
10280	if (inodedep->id_state & IOSTARTED)
10281		panic("initiate_write_inodeblock_ufs2: already started");
10282	inodedep->id_state |= IOSTARTED;
10283	fs = inodedep->id_fs;
10284	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10285	LOCK_OWNED(ump);
10286	dp = (struct ufs2_dinode *)bp->b_data +
10287	    ino_to_fsbo(fs, inodedep->id_ino);
10288
10289	/*
10290	 * If we're on the unlinked list but have not yet written our
10291	 * next pointer initialize it here.
10292	 */
10293	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10294		struct inodedep *inon;
10295
10296		inon = TAILQ_NEXT(inodedep, id_unlinked);
10297		dp->di_freelink = inon ? inon->id_ino : 0;
10298	}
10299	/*
10300	 * If the bitmap is not yet written, then the allocated
10301	 * inode cannot be written to disk.
10302	 */
10303	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10304		if (inodedep->id_savedino2 != NULL)
10305			panic("initiate_write_inodeblock_ufs2: I/O underway");
10306		FREE_LOCK(ump);
10307		sip = malloc(sizeof(struct ufs2_dinode),
10308		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10309		ACQUIRE_LOCK(ump);
10310		inodedep->id_savedino2 = sip;
10311		*inodedep->id_savedino2 = *dp;
10312		bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10313		dp->di_gen = inodedep->id_savedino2->di_gen;
10314		dp->di_freelink = inodedep->id_savedino2->di_freelink;
10315		return;
10316	}
10317	/*
10318	 * If no dependencies, then there is nothing to roll back.
10319	 */
10320	inodedep->id_savedsize = dp->di_size;
10321	inodedep->id_savedextsize = dp->di_extsize;
10322	inodedep->id_savednlink = dp->di_nlink;
10323	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10324	    TAILQ_EMPTY(&inodedep->id_extupdt) &&
10325	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10326		return;
10327	/*
10328	 * Revert the link count to that of the first unwritten journal entry.
10329	 */
10330	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10331	if (inoref)
10332		dp->di_nlink = inoref->if_nlink;
10333
10334	/*
10335	 * Set the ext data dependencies to busy.
10336	 */
10337	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10338	     adp = TAILQ_NEXT(adp, ad_next)) {
10339#ifdef INVARIANTS
10340		if (deplist != 0 && prevlbn >= adp->ad_offset)
10341			panic("softdep_write_inodeblock: lbn order");
10342		prevlbn = adp->ad_offset;
10343		if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10344			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10345			    "softdep_write_inodeblock",
10346			    (intmax_t)adp->ad_offset,
10347			    (intmax_t)dp->di_extb[adp->ad_offset],
10348			    (intmax_t)adp->ad_newblkno);
10349		deplist |= 1 << adp->ad_offset;
10350		if ((adp->ad_state & ATTACHED) == 0)
10351			panic("softdep_write_inodeblock: Unknown state 0x%x",
10352			    adp->ad_state);
10353#endif /* INVARIANTS */
10354		adp->ad_state &= ~ATTACHED;
10355		adp->ad_state |= UNDONE;
10356	}
10357	/*
10358	 * The on-disk inode cannot claim to be any larger than the last
10359	 * fragment that has been written. Otherwise, the on-disk inode
10360	 * might have fragments that were not the last block in the ext
10361	 * data which would corrupt the filesystem.
10362	 */
10363	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10364	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10365		dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10366		/* keep going until hitting a rollback to a frag */
10367		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10368			continue;
10369		dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10370		for (i = adp->ad_offset + 1; i < NXADDR; i++) {
10371#ifdef INVARIANTS
10372			if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10373				panic("softdep_write_inodeblock: lost dep1");
10374#endif /* INVARIANTS */
10375			dp->di_extb[i] = 0;
10376		}
10377		lastadp = NULL;
10378		break;
10379	}
10380	/*
10381	 * If we have zero'ed out the last allocated block of the ext
10382	 * data, roll back the size to the last currently allocated block.
10383	 * We know that this last allocated block is a full-sized as
10384	 * we already checked for fragments in the loop above.
10385	 */
10386	if (lastadp != NULL &&
10387	    dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10388		for (i = lastadp->ad_offset; i >= 0; i--)
10389			if (dp->di_extb[i] != 0)
10390				break;
10391		dp->di_extsize = (i + 1) * fs->fs_bsize;
10392	}
10393	/*
10394	 * Set the file data dependencies to busy.
10395	 */
10396	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10397	     adp = TAILQ_NEXT(adp, ad_next)) {
10398#ifdef INVARIANTS
10399		if (deplist != 0 && prevlbn >= adp->ad_offset)
10400			panic("softdep_write_inodeblock: lbn order");
10401		if ((adp->ad_state & ATTACHED) == 0)
10402			panic("inodedep %p and adp %p not attached", inodedep, adp);
10403		prevlbn = adp->ad_offset;
10404		if (adp->ad_offset < NDADDR &&
10405		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10406			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10407			    "softdep_write_inodeblock",
10408			    (intmax_t)adp->ad_offset,
10409			    (intmax_t)dp->di_db[adp->ad_offset],
10410			    (intmax_t)adp->ad_newblkno);
10411		if (adp->ad_offset >= NDADDR &&
10412		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10413			panic("%s indirect pointer #%jd mismatch %jd != %jd",
10414			    "softdep_write_inodeblock:",
10415			    (intmax_t)adp->ad_offset - NDADDR,
10416			    (intmax_t)dp->di_ib[adp->ad_offset - NDADDR],
10417			    (intmax_t)adp->ad_newblkno);
10418		deplist |= 1 << adp->ad_offset;
10419		if ((adp->ad_state & ATTACHED) == 0)
10420			panic("softdep_write_inodeblock: Unknown state 0x%x",
10421			    adp->ad_state);
10422#endif /* INVARIANTS */
10423		adp->ad_state &= ~ATTACHED;
10424		adp->ad_state |= UNDONE;
10425	}
10426	/*
10427	 * The on-disk inode cannot claim to be any larger than the last
10428	 * fragment that has been written. Otherwise, the on-disk inode
10429	 * might have fragments that were not the last block in the file
10430	 * which would corrupt the filesystem.
10431	 */
10432	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10433	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10434		if (adp->ad_offset >= NDADDR)
10435			break;
10436		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10437		/* keep going until hitting a rollback to a frag */
10438		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10439			continue;
10440		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10441		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10442#ifdef INVARIANTS
10443			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10444				panic("softdep_write_inodeblock: lost dep2");
10445#endif /* INVARIANTS */
10446			dp->di_db[i] = 0;
10447		}
10448		for (i = 0; i < NIADDR; i++) {
10449#ifdef INVARIANTS
10450			if (dp->di_ib[i] != 0 &&
10451			    (deplist & ((1 << NDADDR) << i)) == 0)
10452				panic("softdep_write_inodeblock: lost dep3");
10453#endif /* INVARIANTS */
10454			dp->di_ib[i] = 0;
10455		}
10456		return;
10457	}
10458	/*
10459	 * If we have zero'ed out the last allocated block of the file,
10460	 * roll back the size to the last currently allocated block.
10461	 * We know that this last allocated block is a full-sized as
10462	 * we already checked for fragments in the loop above.
10463	 */
10464	if (lastadp != NULL &&
10465	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10466		for (i = lastadp->ad_offset; i >= 0; i--)
10467			if (dp->di_db[i] != 0)
10468				break;
10469		dp->di_size = (i + 1) * fs->fs_bsize;
10470	}
10471	/*
10472	 * The only dependencies are for indirect blocks.
10473	 *
10474	 * The file size for indirect block additions is not guaranteed.
10475	 * Such a guarantee would be non-trivial to achieve. The conventional
10476	 * synchronous write implementation also does not make this guarantee.
10477	 * Fsck should catch and fix discrepancies. Arguably, the file size
10478	 * can be over-estimated without destroying integrity when the file
10479	 * moves into the indirect blocks (i.e., is large). If we want to
10480	 * postpone fsck, we are stuck with this argument.
10481	 */
10482	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10483		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10484}
10485
10486/*
10487 * Cancel an indirdep as a result of truncation.  Release all of the
10488 * children allocindirs and place their journal work on the appropriate
10489 * list.
10490 */
10491static void
10492cancel_indirdep(indirdep, bp, freeblks)
10493	struct indirdep *indirdep;
10494	struct buf *bp;
10495	struct freeblks *freeblks;
10496{
10497	struct allocindir *aip;
10498
10499	/*
10500	 * None of the indirect pointers will ever be visible,
10501	 * so they can simply be tossed. GOINGAWAY ensures
10502	 * that allocated pointers will be saved in the buffer
10503	 * cache until they are freed. Note that they will
10504	 * only be able to be found by their physical address
10505	 * since the inode mapping the logical address will
10506	 * be gone. The save buffer used for the safe copy
10507	 * was allocated in setup_allocindir_phase2 using
10508	 * the physical address so it could be used for this
10509	 * purpose. Hence we swap the safe copy with the real
10510	 * copy, allowing the safe copy to be freed and holding
10511	 * on to the real copy for later use in indir_trunc.
10512	 */
10513	if (indirdep->ir_state & GOINGAWAY)
10514		panic("cancel_indirdep: already gone");
10515	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10516		indirdep->ir_state |= DEPCOMPLETE;
10517		LIST_REMOVE(indirdep, ir_next);
10518	}
10519	indirdep->ir_state |= GOINGAWAY;
10520	/*
10521	 * Pass in bp for blocks still have journal writes
10522	 * pending so we can cancel them on their own.
10523	 */
10524	while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
10525		cancel_allocindir(aip, bp, freeblks, 0);
10526	while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0)
10527		cancel_allocindir(aip, NULL, freeblks, 0);
10528	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0)
10529		cancel_allocindir(aip, NULL, freeblks, 0);
10530	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0)
10531		cancel_allocindir(aip, NULL, freeblks, 0);
10532	/*
10533	 * If there are pending partial truncations we need to keep the
10534	 * old block copy around until they complete.  This is because
10535	 * the current b_data is not a perfect superset of the available
10536	 * blocks.
10537	 */
10538	if (TAILQ_EMPTY(&indirdep->ir_trunc))
10539		bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10540	else
10541		bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10542	WORKLIST_REMOVE(&indirdep->ir_list);
10543	WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10544	indirdep->ir_bp = NULL;
10545	indirdep->ir_freeblks = freeblks;
10546}
10547
10548/*
10549 * Free an indirdep once it no longer has new pointers to track.
10550 */
10551static void
10552free_indirdep(indirdep)
10553	struct indirdep *indirdep;
10554{
10555
10556	KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10557	    ("free_indirdep: Indir trunc list not empty."));
10558	KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10559	    ("free_indirdep: Complete head not empty."));
10560	KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10561	    ("free_indirdep: write head not empty."));
10562	KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10563	    ("free_indirdep: done head not empty."));
10564	KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10565	    ("free_indirdep: deplist head not empty."));
10566	KASSERT((indirdep->ir_state & DEPCOMPLETE),
10567	    ("free_indirdep: %p still on newblk list.", indirdep));
10568	KASSERT(indirdep->ir_saveddata == NULL,
10569	    ("free_indirdep: %p still has saved data.", indirdep));
10570	if (indirdep->ir_state & ONWORKLIST)
10571		WORKLIST_REMOVE(&indirdep->ir_list);
10572	WORKITEM_FREE(indirdep, D_INDIRDEP);
10573}
10574
10575/*
10576 * Called before a write to an indirdep.  This routine is responsible for
10577 * rolling back pointers to a safe state which includes only those
10578 * allocindirs which have been completed.
10579 */
10580static void
10581initiate_write_indirdep(indirdep, bp)
10582	struct indirdep *indirdep;
10583	struct buf *bp;
10584{
10585	struct ufsmount *ump;
10586
10587	indirdep->ir_state |= IOSTARTED;
10588	if (indirdep->ir_state & GOINGAWAY)
10589		panic("disk_io_initiation: indirdep gone");
10590	/*
10591	 * If there are no remaining dependencies, this will be writing
10592	 * the real pointers.
10593	 */
10594	if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10595	    TAILQ_EMPTY(&indirdep->ir_trunc))
10596		return;
10597	/*
10598	 * Replace up-to-date version with safe version.
10599	 */
10600	if (indirdep->ir_saveddata == NULL) {
10601		ump = VFSTOUFS(indirdep->ir_list.wk_mp);
10602		LOCK_OWNED(ump);
10603		FREE_LOCK(ump);
10604		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10605		    M_SOFTDEP_FLAGS);
10606		ACQUIRE_LOCK(ump);
10607	}
10608	indirdep->ir_state &= ~ATTACHED;
10609	indirdep->ir_state |= UNDONE;
10610	bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10611	bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10612	    bp->b_bcount);
10613}
10614
10615/*
10616 * Called when an inode has been cleared in a cg bitmap.  This finally
10617 * eliminates any canceled jaddrefs
10618 */
10619void
10620softdep_setup_inofree(mp, bp, ino, wkhd)
10621	struct mount *mp;
10622	struct buf *bp;
10623	ino_t ino;
10624	struct workhead *wkhd;
10625{
10626	struct worklist *wk, *wkn;
10627	struct inodedep *inodedep;
10628	struct ufsmount *ump;
10629	uint8_t *inosused;
10630	struct cg *cgp;
10631	struct fs *fs;
10632
10633	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
10634	    ("softdep_setup_inofree called on non-softdep filesystem"));
10635	ump = VFSTOUFS(mp);
10636	ACQUIRE_LOCK(ump);
10637	fs = ump->um_fs;
10638	cgp = (struct cg *)bp->b_data;
10639	inosused = cg_inosused(cgp);
10640	if (isset(inosused, ino % fs->fs_ipg))
10641		panic("softdep_setup_inofree: inode %ju not freed.",
10642		    (uintmax_t)ino);
10643	if (inodedep_lookup(mp, ino, 0, &inodedep))
10644		panic("softdep_setup_inofree: ino %ju has existing inodedep %p",
10645		    (uintmax_t)ino, inodedep);
10646	if (wkhd) {
10647		LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10648			if (wk->wk_type != D_JADDREF)
10649				continue;
10650			WORKLIST_REMOVE(wk);
10651			/*
10652			 * We can free immediately even if the jaddref
10653			 * isn't attached in a background write as now
10654			 * the bitmaps are reconciled.
10655			 */
10656			wk->wk_state |= COMPLETE | ATTACHED;
10657			free_jaddref(WK_JADDREF(wk));
10658		}
10659		jwork_move(&bp->b_dep, wkhd);
10660	}
10661	FREE_LOCK(ump);
10662}
10663
10664
10665/*
10666 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10667 * map.  Any dependencies waiting for the write to clear are added to the
10668 * buf's list and any jnewblks that are being canceled are discarded
10669 * immediately.
10670 */
10671void
10672softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
10673	struct mount *mp;
10674	struct buf *bp;
10675	ufs2_daddr_t blkno;
10676	int frags;
10677	struct workhead *wkhd;
10678{
10679	struct bmsafemap *bmsafemap;
10680	struct jnewblk *jnewblk;
10681	struct ufsmount *ump;
10682	struct worklist *wk;
10683	struct fs *fs;
10684#ifdef SUJ_DEBUG
10685	uint8_t *blksfree;
10686	struct cg *cgp;
10687	ufs2_daddr_t jstart;
10688	ufs2_daddr_t jend;
10689	ufs2_daddr_t end;
10690	long bno;
10691	int i;
10692#endif
10693
10694	CTR3(KTR_SUJ,
10695	    "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
10696	    blkno, frags, wkhd);
10697
10698	ump = VFSTOUFS(mp);
10699	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
10700	    ("softdep_setup_blkfree called on non-softdep filesystem"));
10701	ACQUIRE_LOCK(ump);
10702	/* Lookup the bmsafemap so we track when it is dirty. */
10703	fs = ump->um_fs;
10704	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10705	/*
10706	 * Detach any jnewblks which have been canceled.  They must linger
10707	 * until the bitmap is cleared again by ffs_blkfree() to prevent
10708	 * an unjournaled allocation from hitting the disk.
10709	 */
10710	if (wkhd) {
10711		while ((wk = LIST_FIRST(wkhd)) != NULL) {
10712			CTR2(KTR_SUJ,
10713			    "softdep_setup_blkfree: blkno %jd wk type %d",
10714			    blkno, wk->wk_type);
10715			WORKLIST_REMOVE(wk);
10716			if (wk->wk_type != D_JNEWBLK) {
10717				WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
10718				continue;
10719			}
10720			jnewblk = WK_JNEWBLK(wk);
10721			KASSERT(jnewblk->jn_state & GOINGAWAY,
10722			    ("softdep_setup_blkfree: jnewblk not canceled."));
10723#ifdef SUJ_DEBUG
10724			/*
10725			 * Assert that this block is free in the bitmap
10726			 * before we discard the jnewblk.
10727			 */
10728			cgp = (struct cg *)bp->b_data;
10729			blksfree = cg_blksfree(cgp);
10730			bno = dtogd(fs, jnewblk->jn_blkno);
10731			for (i = jnewblk->jn_oldfrags;
10732			    i < jnewblk->jn_frags; i++) {
10733				if (isset(blksfree, bno + i))
10734					continue;
10735				panic("softdep_setup_blkfree: not free");
10736			}
10737#endif
10738			/*
10739			 * Even if it's not attached we can free immediately
10740			 * as the new bitmap is correct.
10741			 */
10742			wk->wk_state |= COMPLETE | ATTACHED;
10743			free_jnewblk(jnewblk);
10744		}
10745	}
10746
10747#ifdef SUJ_DEBUG
10748	/*
10749	 * Assert that we are not freeing a block which has an outstanding
10750	 * allocation dependency.
10751	 */
10752	fs = VFSTOUFS(mp)->um_fs;
10753	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10754	end = blkno + frags;
10755	LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10756		/*
10757		 * Don't match against blocks that will be freed when the
10758		 * background write is done.
10759		 */
10760		if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
10761		    (COMPLETE | DEPCOMPLETE))
10762			continue;
10763		jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
10764		jend = jnewblk->jn_blkno + jnewblk->jn_frags;
10765		if ((blkno >= jstart && blkno < jend) ||
10766		    (end > jstart && end <= jend)) {
10767			printf("state 0x%X %jd - %d %d dep %p\n",
10768			    jnewblk->jn_state, jnewblk->jn_blkno,
10769			    jnewblk->jn_oldfrags, jnewblk->jn_frags,
10770			    jnewblk->jn_dep);
10771			panic("softdep_setup_blkfree: "
10772			    "%jd-%jd(%d) overlaps with %jd-%jd",
10773			    blkno, end, frags, jstart, jend);
10774		}
10775	}
10776#endif
10777	FREE_LOCK(ump);
10778}
10779
10780/*
10781 * Revert a block allocation when the journal record that describes it
10782 * is not yet written.
10783 */
10784static int
10785jnewblk_rollback(jnewblk, fs, cgp, blksfree)
10786	struct jnewblk *jnewblk;
10787	struct fs *fs;
10788	struct cg *cgp;
10789	uint8_t *blksfree;
10790{
10791	ufs1_daddr_t fragno;
10792	long cgbno, bbase;
10793	int frags, blk;
10794	int i;
10795
10796	frags = 0;
10797	cgbno = dtogd(fs, jnewblk->jn_blkno);
10798	/*
10799	 * We have to test which frags need to be rolled back.  We may
10800	 * be operating on a stale copy when doing background writes.
10801	 */
10802	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
10803		if (isclr(blksfree, cgbno + i))
10804			frags++;
10805	if (frags == 0)
10806		return (0);
10807	/*
10808	 * This is mostly ffs_blkfree() sans some validation and
10809	 * superblock updates.
10810	 */
10811	if (frags == fs->fs_frag) {
10812		fragno = fragstoblks(fs, cgbno);
10813		ffs_setblock(fs, blksfree, fragno);
10814		ffs_clusteracct(fs, cgp, fragno, 1);
10815		cgp->cg_cs.cs_nbfree++;
10816	} else {
10817		cgbno += jnewblk->jn_oldfrags;
10818		bbase = cgbno - fragnum(fs, cgbno);
10819		/* Decrement the old frags.  */
10820		blk = blkmap(fs, blksfree, bbase);
10821		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
10822		/* Deallocate the fragment */
10823		for (i = 0; i < frags; i++)
10824			setbit(blksfree, cgbno + i);
10825		cgp->cg_cs.cs_nffree += frags;
10826		/* Add back in counts associated with the new frags */
10827		blk = blkmap(fs, blksfree, bbase);
10828		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
10829		/* If a complete block has been reassembled, account for it. */
10830		fragno = fragstoblks(fs, bbase);
10831		if (ffs_isblock(fs, blksfree, fragno)) {
10832			cgp->cg_cs.cs_nffree -= fs->fs_frag;
10833			ffs_clusteracct(fs, cgp, fragno, 1);
10834			cgp->cg_cs.cs_nbfree++;
10835		}
10836	}
10837	stat_jnewblk++;
10838	jnewblk->jn_state &= ~ATTACHED;
10839	jnewblk->jn_state |= UNDONE;
10840
10841	return (frags);
10842}
10843
10844static void
10845initiate_write_bmsafemap(bmsafemap, bp)
10846	struct bmsafemap *bmsafemap;
10847	struct buf *bp;			/* The cg block. */
10848{
10849	struct jaddref *jaddref;
10850	struct jnewblk *jnewblk;
10851	uint8_t *inosused;
10852	uint8_t *blksfree;
10853	struct cg *cgp;
10854	struct fs *fs;
10855	ino_t ino;
10856
10857	if (bmsafemap->sm_state & IOSTARTED)
10858		return;
10859	bmsafemap->sm_state |= IOSTARTED;
10860	/*
10861	 * Clear any inode allocations which are pending journal writes.
10862	 */
10863	if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
10864		cgp = (struct cg *)bp->b_data;
10865		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10866		inosused = cg_inosused(cgp);
10867		LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
10868			ino = jaddref->ja_ino % fs->fs_ipg;
10869			if (isset(inosused, ino)) {
10870				if ((jaddref->ja_mode & IFMT) == IFDIR)
10871					cgp->cg_cs.cs_ndir--;
10872				cgp->cg_cs.cs_nifree++;
10873				clrbit(inosused, ino);
10874				jaddref->ja_state &= ~ATTACHED;
10875				jaddref->ja_state |= UNDONE;
10876				stat_jaddref++;
10877			} else
10878				panic("initiate_write_bmsafemap: inode %ju "
10879				    "marked free", (uintmax_t)jaddref->ja_ino);
10880		}
10881	}
10882	/*
10883	 * Clear any block allocations which are pending journal writes.
10884	 */
10885	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
10886		cgp = (struct cg *)bp->b_data;
10887		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10888		blksfree = cg_blksfree(cgp);
10889		LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10890			if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
10891				continue;
10892			panic("initiate_write_bmsafemap: block %jd "
10893			    "marked free", jnewblk->jn_blkno);
10894		}
10895	}
10896	/*
10897	 * Move allocation lists to the written lists so they can be
10898	 * cleared once the block write is complete.
10899	 */
10900	LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
10901	    inodedep, id_deps);
10902	LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
10903	    newblk, nb_deps);
10904	LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
10905	    wk_list);
10906}
10907
10908/*
10909 * This routine is called during the completion interrupt
10910 * service routine for a disk write (from the procedure called
10911 * by the device driver to inform the filesystem caches of
10912 * a request completion).  It should be called early in this
10913 * procedure, before the block is made available to other
10914 * processes or other routines are called.
10915 *
10916 */
10917static void
10918softdep_disk_write_complete(bp)
10919	struct buf *bp;		/* describes the completed disk write */
10920{
10921	struct worklist *wk;
10922	struct worklist *owk;
10923	struct ufsmount *ump;
10924	struct workhead reattach;
10925	struct freeblks *freeblks;
10926	struct buf *sbp;
10927
10928	/*
10929	 * If an error occurred while doing the write, then the data
10930	 * has not hit the disk and the dependencies cannot be unrolled.
10931	 */
10932	if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0)
10933		return;
10934	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
10935		return;
10936	ump = VFSTOUFS(wk->wk_mp);
10937	LIST_INIT(&reattach);
10938	/*
10939	 * This lock must not be released anywhere in this code segment.
10940	 */
10941	sbp = NULL;
10942	owk = NULL;
10943	ACQUIRE_LOCK(ump);
10944	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
10945		WORKLIST_REMOVE(wk);
10946		atomic_add_long(&dep_write[wk->wk_type], 1);
10947		if (wk == owk)
10948			panic("duplicate worklist: %p\n", wk);
10949		owk = wk;
10950		switch (wk->wk_type) {
10951
10952		case D_PAGEDEP:
10953			if (handle_written_filepage(WK_PAGEDEP(wk), bp))
10954				WORKLIST_INSERT(&reattach, wk);
10955			continue;
10956
10957		case D_INODEDEP:
10958			if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
10959				WORKLIST_INSERT(&reattach, wk);
10960			continue;
10961
10962		case D_BMSAFEMAP:
10963			if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp))
10964				WORKLIST_INSERT(&reattach, wk);
10965			continue;
10966
10967		case D_MKDIR:
10968			handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
10969			continue;
10970
10971		case D_ALLOCDIRECT:
10972			wk->wk_state |= COMPLETE;
10973			handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
10974			continue;
10975
10976		case D_ALLOCINDIR:
10977			wk->wk_state |= COMPLETE;
10978			handle_allocindir_partdone(WK_ALLOCINDIR(wk));
10979			continue;
10980
10981		case D_INDIRDEP:
10982			if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp))
10983				WORKLIST_INSERT(&reattach, wk);
10984			continue;
10985
10986		case D_FREEBLKS:
10987			wk->wk_state |= COMPLETE;
10988			freeblks = WK_FREEBLKS(wk);
10989			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
10990			    LIST_EMPTY(&freeblks->fb_jblkdephd))
10991				add_to_worklist(wk, WK_NODELAY);
10992			continue;
10993
10994		case D_FREEWORK:
10995			handle_written_freework(WK_FREEWORK(wk));
10996			break;
10997
10998		case D_JSEGDEP:
10999			free_jsegdep(WK_JSEGDEP(wk));
11000			continue;
11001
11002		case D_JSEG:
11003			handle_written_jseg(WK_JSEG(wk), bp);
11004			continue;
11005
11006		case D_SBDEP:
11007			if (handle_written_sbdep(WK_SBDEP(wk), bp))
11008				WORKLIST_INSERT(&reattach, wk);
11009			continue;
11010
11011		case D_FREEDEP:
11012			free_freedep(WK_FREEDEP(wk));
11013			continue;
11014
11015		default:
11016			panic("handle_disk_write_complete: Unknown type %s",
11017			    TYPENAME(wk->wk_type));
11018			/* NOTREACHED */
11019		}
11020	}
11021	/*
11022	 * Reattach any requests that must be redone.
11023	 */
11024	while ((wk = LIST_FIRST(&reattach)) != NULL) {
11025		WORKLIST_REMOVE(wk);
11026		WORKLIST_INSERT(&bp->b_dep, wk);
11027	}
11028	FREE_LOCK(ump);
11029	if (sbp)
11030		brelse(sbp);
11031}
11032
11033/*
11034 * Called from within softdep_disk_write_complete above. Note that
11035 * this routine is always called from interrupt level with further
11036 * splbio interrupts blocked.
11037 */
11038static void
11039handle_allocdirect_partdone(adp, wkhd)
11040	struct allocdirect *adp;	/* the completed allocdirect */
11041	struct workhead *wkhd;		/* Work to do when inode is writtne. */
11042{
11043	struct allocdirectlst *listhead;
11044	struct allocdirect *listadp;
11045	struct inodedep *inodedep;
11046	long bsize;
11047
11048	if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11049		return;
11050	/*
11051	 * The on-disk inode cannot claim to be any larger than the last
11052	 * fragment that has been written. Otherwise, the on-disk inode
11053	 * might have fragments that were not the last block in the file
11054	 * which would corrupt the filesystem. Thus, we cannot free any
11055	 * allocdirects after one whose ad_oldblkno claims a fragment as
11056	 * these blocks must be rolled back to zero before writing the inode.
11057	 * We check the currently active set of allocdirects in id_inoupdt
11058	 * or id_extupdt as appropriate.
11059	 */
11060	inodedep = adp->ad_inodedep;
11061	bsize = inodedep->id_fs->fs_bsize;
11062	if (adp->ad_state & EXTDATA)
11063		listhead = &inodedep->id_extupdt;
11064	else
11065		listhead = &inodedep->id_inoupdt;
11066	TAILQ_FOREACH(listadp, listhead, ad_next) {
11067		/* found our block */
11068		if (listadp == adp)
11069			break;
11070		/* continue if ad_oldlbn is not a fragment */
11071		if (listadp->ad_oldsize == 0 ||
11072		    listadp->ad_oldsize == bsize)
11073			continue;
11074		/* hit a fragment */
11075		return;
11076	}
11077	/*
11078	 * If we have reached the end of the current list without
11079	 * finding the just finished dependency, then it must be
11080	 * on the future dependency list. Future dependencies cannot
11081	 * be freed until they are moved to the current list.
11082	 */
11083	if (listadp == NULL) {
11084#ifdef DEBUG
11085		if (adp->ad_state & EXTDATA)
11086			listhead = &inodedep->id_newextupdt;
11087		else
11088			listhead = &inodedep->id_newinoupdt;
11089		TAILQ_FOREACH(listadp, listhead, ad_next)
11090			/* found our block */
11091			if (listadp == adp)
11092				break;
11093		if (listadp == NULL)
11094			panic("handle_allocdirect_partdone: lost dep");
11095#endif /* DEBUG */
11096		return;
11097	}
11098	/*
11099	 * If we have found the just finished dependency, then queue
11100	 * it along with anything that follows it that is complete.
11101	 * Since the pointer has not yet been written in the inode
11102	 * as the dependency prevents it, place the allocdirect on the
11103	 * bufwait list where it will be freed once the pointer is
11104	 * valid.
11105	 */
11106	if (wkhd == NULL)
11107		wkhd = &inodedep->id_bufwait;
11108	for (; adp; adp = listadp) {
11109		listadp = TAILQ_NEXT(adp, ad_next);
11110		if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11111			return;
11112		TAILQ_REMOVE(listhead, adp, ad_next);
11113		WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
11114	}
11115}
11116
11117/*
11118 * Called from within softdep_disk_write_complete above.  This routine
11119 * completes successfully written allocindirs.
11120 */
11121static void
11122handle_allocindir_partdone(aip)
11123	struct allocindir *aip;		/* the completed allocindir */
11124{
11125	struct indirdep *indirdep;
11126
11127	if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
11128		return;
11129	indirdep = aip->ai_indirdep;
11130	LIST_REMOVE(aip, ai_next);
11131	/*
11132	 * Don't set a pointer while the buffer is undergoing IO or while
11133	 * we have active truncations.
11134	 */
11135	if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
11136		LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
11137		return;
11138	}
11139	if (indirdep->ir_state & UFS1FMT)
11140		((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11141		    aip->ai_newblkno;
11142	else
11143		((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11144		    aip->ai_newblkno;
11145	/*
11146	 * Await the pointer write before freeing the allocindir.
11147	 */
11148	LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
11149}
11150
11151/*
11152 * Release segments held on a jwork list.
11153 */
11154static void
11155handle_jwork(wkhd)
11156	struct workhead *wkhd;
11157{
11158	struct worklist *wk;
11159
11160	while ((wk = LIST_FIRST(wkhd)) != NULL) {
11161		WORKLIST_REMOVE(wk);
11162		switch (wk->wk_type) {
11163		case D_JSEGDEP:
11164			free_jsegdep(WK_JSEGDEP(wk));
11165			continue;
11166		case D_FREEDEP:
11167			free_freedep(WK_FREEDEP(wk));
11168			continue;
11169		case D_FREEFRAG:
11170			rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
11171			WORKITEM_FREE(wk, D_FREEFRAG);
11172			continue;
11173		case D_FREEWORK:
11174			handle_written_freework(WK_FREEWORK(wk));
11175			continue;
11176		default:
11177			panic("handle_jwork: Unknown type %s\n",
11178			    TYPENAME(wk->wk_type));
11179		}
11180	}
11181}
11182
11183/*
11184 * Handle the bufwait list on an inode when it is safe to release items
11185 * held there.  This normally happens after an inode block is written but
11186 * may be delayed and handled later if there are pending journal items that
11187 * are not yet safe to be released.
11188 */
11189static struct freefile *
11190handle_bufwait(inodedep, refhd)
11191	struct inodedep *inodedep;
11192	struct workhead *refhd;
11193{
11194	struct jaddref *jaddref;
11195	struct freefile *freefile;
11196	struct worklist *wk;
11197
11198	freefile = NULL;
11199	while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
11200		WORKLIST_REMOVE(wk);
11201		switch (wk->wk_type) {
11202		case D_FREEFILE:
11203			/*
11204			 * We defer adding freefile to the worklist
11205			 * until all other additions have been made to
11206			 * ensure that it will be done after all the
11207			 * old blocks have been freed.
11208			 */
11209			if (freefile != NULL)
11210				panic("handle_bufwait: freefile");
11211			freefile = WK_FREEFILE(wk);
11212			continue;
11213
11214		case D_MKDIR:
11215			handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
11216			continue;
11217
11218		case D_DIRADD:
11219			diradd_inode_written(WK_DIRADD(wk), inodedep);
11220			continue;
11221
11222		case D_FREEFRAG:
11223			wk->wk_state |= COMPLETE;
11224			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
11225				add_to_worklist(wk, 0);
11226			continue;
11227
11228		case D_DIRREM:
11229			wk->wk_state |= COMPLETE;
11230			add_to_worklist(wk, 0);
11231			continue;
11232
11233		case D_ALLOCDIRECT:
11234		case D_ALLOCINDIR:
11235			free_newblk(WK_NEWBLK(wk));
11236			continue;
11237
11238		case D_JNEWBLK:
11239			wk->wk_state |= COMPLETE;
11240			free_jnewblk(WK_JNEWBLK(wk));
11241			continue;
11242
11243		/*
11244		 * Save freed journal segments and add references on
11245		 * the supplied list which will delay their release
11246		 * until the cg bitmap is cleared on disk.
11247		 */
11248		case D_JSEGDEP:
11249			if (refhd == NULL)
11250				free_jsegdep(WK_JSEGDEP(wk));
11251			else
11252				WORKLIST_INSERT(refhd, wk);
11253			continue;
11254
11255		case D_JADDREF:
11256			jaddref = WK_JADDREF(wk);
11257			TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11258			    if_deps);
11259			/*
11260			 * Transfer any jaddrefs to the list to be freed with
11261			 * the bitmap if we're handling a removed file.
11262			 */
11263			if (refhd == NULL) {
11264				wk->wk_state |= COMPLETE;
11265				free_jaddref(jaddref);
11266			} else
11267				WORKLIST_INSERT(refhd, wk);
11268			continue;
11269
11270		default:
11271			panic("handle_bufwait: Unknown type %p(%s)",
11272			    wk, TYPENAME(wk->wk_type));
11273			/* NOTREACHED */
11274		}
11275	}
11276	return (freefile);
11277}
11278/*
11279 * Called from within softdep_disk_write_complete above to restore
11280 * in-memory inode block contents to their most up-to-date state. Note
11281 * that this routine is always called from interrupt level with further
11282 * splbio interrupts blocked.
11283 */
11284static int
11285handle_written_inodeblock(inodedep, bp)
11286	struct inodedep *inodedep;
11287	struct buf *bp;		/* buffer containing the inode block */
11288{
11289	struct freefile *freefile;
11290	struct allocdirect *adp, *nextadp;
11291	struct ufs1_dinode *dp1 = NULL;
11292	struct ufs2_dinode *dp2 = NULL;
11293	struct workhead wkhd;
11294	int hadchanges, fstype;
11295	ino_t freelink;
11296
11297	LIST_INIT(&wkhd);
11298	hadchanges = 0;
11299	freefile = NULL;
11300	if ((inodedep->id_state & IOSTARTED) == 0)
11301		panic("handle_written_inodeblock: not started");
11302	inodedep->id_state &= ~IOSTARTED;
11303	if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11304		fstype = UFS1;
11305		dp1 = (struct ufs1_dinode *)bp->b_data +
11306		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11307		freelink = dp1->di_freelink;
11308	} else {
11309		fstype = UFS2;
11310		dp2 = (struct ufs2_dinode *)bp->b_data +
11311		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11312		freelink = dp2->di_freelink;
11313	}
11314	/*
11315	 * Leave this inodeblock dirty until it's in the list.
11316	 */
11317	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) {
11318		struct inodedep *inon;
11319
11320		inon = TAILQ_NEXT(inodedep, id_unlinked);
11321		if ((inon == NULL && freelink == 0) ||
11322		    (inon && inon->id_ino == freelink)) {
11323			if (inon)
11324				inon->id_state |= UNLINKPREV;
11325			inodedep->id_state |= UNLINKNEXT;
11326		}
11327		hadchanges = 1;
11328	}
11329	/*
11330	 * If we had to rollback the inode allocation because of
11331	 * bitmaps being incomplete, then simply restore it.
11332	 * Keep the block dirty so that it will not be reclaimed until
11333	 * all associated dependencies have been cleared and the
11334	 * corresponding updates written to disk.
11335	 */
11336	if (inodedep->id_savedino1 != NULL) {
11337		hadchanges = 1;
11338		if (fstype == UFS1)
11339			*dp1 = *inodedep->id_savedino1;
11340		else
11341			*dp2 = *inodedep->id_savedino2;
11342		free(inodedep->id_savedino1, M_SAVEDINO);
11343		inodedep->id_savedino1 = NULL;
11344		if ((bp->b_flags & B_DELWRI) == 0)
11345			stat_inode_bitmap++;
11346		bdirty(bp);
11347		/*
11348		 * If the inode is clear here and GOINGAWAY it will never
11349		 * be written.  Process the bufwait and clear any pending
11350		 * work which may include the freefile.
11351		 */
11352		if (inodedep->id_state & GOINGAWAY)
11353			goto bufwait;
11354		return (1);
11355	}
11356	inodedep->id_state |= COMPLETE;
11357	/*
11358	 * Roll forward anything that had to be rolled back before
11359	 * the inode could be updated.
11360	 */
11361	for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11362		nextadp = TAILQ_NEXT(adp, ad_next);
11363		if (adp->ad_state & ATTACHED)
11364			panic("handle_written_inodeblock: new entry");
11365		if (fstype == UFS1) {
11366			if (adp->ad_offset < NDADDR) {
11367				if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11368					panic("%s %s #%jd mismatch %d != %jd",
11369					    "handle_written_inodeblock:",
11370					    "direct pointer",
11371					    (intmax_t)adp->ad_offset,
11372					    dp1->di_db[adp->ad_offset],
11373					    (intmax_t)adp->ad_oldblkno);
11374				dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11375			} else {
11376				if (dp1->di_ib[adp->ad_offset - NDADDR] != 0)
11377					panic("%s: %s #%jd allocated as %d",
11378					    "handle_written_inodeblock",
11379					    "indirect pointer",
11380					    (intmax_t)adp->ad_offset - NDADDR,
11381					    dp1->di_ib[adp->ad_offset - NDADDR]);
11382				dp1->di_ib[adp->ad_offset - NDADDR] =
11383				    adp->ad_newblkno;
11384			}
11385		} else {
11386			if (adp->ad_offset < NDADDR) {
11387				if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11388					panic("%s: %s #%jd %s %jd != %jd",
11389					    "handle_written_inodeblock",
11390					    "direct pointer",
11391					    (intmax_t)adp->ad_offset, "mismatch",
11392					    (intmax_t)dp2->di_db[adp->ad_offset],
11393					    (intmax_t)adp->ad_oldblkno);
11394				dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11395			} else {
11396				if (dp2->di_ib[adp->ad_offset - NDADDR] != 0)
11397					panic("%s: %s #%jd allocated as %jd",
11398					    "handle_written_inodeblock",
11399					    "indirect pointer",
11400					    (intmax_t)adp->ad_offset - NDADDR,
11401					    (intmax_t)
11402					    dp2->di_ib[adp->ad_offset - NDADDR]);
11403				dp2->di_ib[adp->ad_offset - NDADDR] =
11404				    adp->ad_newblkno;
11405			}
11406		}
11407		adp->ad_state &= ~UNDONE;
11408		adp->ad_state |= ATTACHED;
11409		hadchanges = 1;
11410	}
11411	for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11412		nextadp = TAILQ_NEXT(adp, ad_next);
11413		if (adp->ad_state & ATTACHED)
11414			panic("handle_written_inodeblock: new entry");
11415		if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11416			panic("%s: direct pointers #%jd %s %jd != %jd",
11417			    "handle_written_inodeblock",
11418			    (intmax_t)adp->ad_offset, "mismatch",
11419			    (intmax_t)dp2->di_extb[adp->ad_offset],
11420			    (intmax_t)adp->ad_oldblkno);
11421		dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11422		adp->ad_state &= ~UNDONE;
11423		adp->ad_state |= ATTACHED;
11424		hadchanges = 1;
11425	}
11426	if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11427		stat_direct_blk_ptrs++;
11428	/*
11429	 * Reset the file size to its most up-to-date value.
11430	 */
11431	if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11432		panic("handle_written_inodeblock: bad size");
11433	if (inodedep->id_savednlink > LINK_MAX)
11434		panic("handle_written_inodeblock: Invalid link count "
11435		    "%d for inodedep %p", inodedep->id_savednlink, inodedep);
11436	if (fstype == UFS1) {
11437		if (dp1->di_nlink != inodedep->id_savednlink) {
11438			dp1->di_nlink = inodedep->id_savednlink;
11439			hadchanges = 1;
11440		}
11441		if (dp1->di_size != inodedep->id_savedsize) {
11442			dp1->di_size = inodedep->id_savedsize;
11443			hadchanges = 1;
11444		}
11445	} else {
11446		if (dp2->di_nlink != inodedep->id_savednlink) {
11447			dp2->di_nlink = inodedep->id_savednlink;
11448			hadchanges = 1;
11449		}
11450		if (dp2->di_size != inodedep->id_savedsize) {
11451			dp2->di_size = inodedep->id_savedsize;
11452			hadchanges = 1;
11453		}
11454		if (dp2->di_extsize != inodedep->id_savedextsize) {
11455			dp2->di_extsize = inodedep->id_savedextsize;
11456			hadchanges = 1;
11457		}
11458	}
11459	inodedep->id_savedsize = -1;
11460	inodedep->id_savedextsize = -1;
11461	inodedep->id_savednlink = -1;
11462	/*
11463	 * If there were any rollbacks in the inode block, then it must be
11464	 * marked dirty so that its will eventually get written back in
11465	 * its correct form.
11466	 */
11467	if (hadchanges)
11468		bdirty(bp);
11469bufwait:
11470	/*
11471	 * Process any allocdirects that completed during the update.
11472	 */
11473	if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11474		handle_allocdirect_partdone(adp, &wkhd);
11475	if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11476		handle_allocdirect_partdone(adp, &wkhd);
11477	/*
11478	 * Process deallocations that were held pending until the
11479	 * inode had been written to disk. Freeing of the inode
11480	 * is delayed until after all blocks have been freed to
11481	 * avoid creation of new <vfsid, inum, lbn> triples
11482	 * before the old ones have been deleted.  Completely
11483	 * unlinked inodes are not processed until the unlinked
11484	 * inode list is written or the last reference is removed.
11485	 */
11486	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11487		freefile = handle_bufwait(inodedep, NULL);
11488		if (freefile && !LIST_EMPTY(&wkhd)) {
11489			WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11490			freefile = NULL;
11491		}
11492	}
11493	/*
11494	 * Move rolled forward dependency completions to the bufwait list
11495	 * now that those that were already written have been processed.
11496	 */
11497	if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11498		panic("handle_written_inodeblock: bufwait but no changes");
11499	jwork_move(&inodedep->id_bufwait, &wkhd);
11500
11501	if (freefile != NULL) {
11502		/*
11503		 * If the inode is goingaway it was never written.  Fake up
11504		 * the state here so free_inodedep() can succeed.
11505		 */
11506		if (inodedep->id_state & GOINGAWAY)
11507			inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11508		if (free_inodedep(inodedep) == 0)
11509			panic("handle_written_inodeblock: live inodedep %p",
11510			    inodedep);
11511		add_to_worklist(&freefile->fx_list, 0);
11512		return (0);
11513	}
11514
11515	/*
11516	 * If no outstanding dependencies, free it.
11517	 */
11518	if (free_inodedep(inodedep) ||
11519	    (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11520	     TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11521	     TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11522	     LIST_FIRST(&inodedep->id_bufwait) == 0))
11523		return (0);
11524	return (hadchanges);
11525}
11526
11527static int
11528handle_written_indirdep(indirdep, bp, bpp)
11529	struct indirdep *indirdep;
11530	struct buf *bp;
11531	struct buf **bpp;
11532{
11533	struct allocindir *aip;
11534	struct buf *sbp;
11535	int chgs;
11536
11537	if (indirdep->ir_state & GOINGAWAY)
11538		panic("handle_written_indirdep: indirdep gone");
11539	if ((indirdep->ir_state & IOSTARTED) == 0)
11540		panic("handle_written_indirdep: IO not started");
11541	chgs = 0;
11542	/*
11543	 * If there were rollbacks revert them here.
11544	 */
11545	if (indirdep->ir_saveddata) {
11546		bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11547		if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11548			free(indirdep->ir_saveddata, M_INDIRDEP);
11549			indirdep->ir_saveddata = NULL;
11550		}
11551		chgs = 1;
11552	}
11553	indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11554	indirdep->ir_state |= ATTACHED;
11555	/*
11556	 * Move allocindirs with written pointers to the completehd if
11557	 * the indirdep's pointer is not yet written.  Otherwise
11558	 * free them here.
11559	 */
11560	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) {
11561		LIST_REMOVE(aip, ai_next);
11562		if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11563			LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11564			    ai_next);
11565			newblk_freefrag(&aip->ai_block);
11566			continue;
11567		}
11568		free_newblk(&aip->ai_block);
11569	}
11570	/*
11571	 * Move allocindirs that have finished dependency processing from
11572	 * the done list to the write list after updating the pointers.
11573	 */
11574	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11575		while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
11576			handle_allocindir_partdone(aip);
11577			if (aip == LIST_FIRST(&indirdep->ir_donehd))
11578				panic("disk_write_complete: not gone");
11579			chgs = 1;
11580		}
11581	}
11582	/*
11583	 * Preserve the indirdep if there were any changes or if it is not
11584	 * yet valid on disk.
11585	 */
11586	if (chgs) {
11587		stat_indir_blk_ptrs++;
11588		bdirty(bp);
11589		return (1);
11590	}
11591	/*
11592	 * If there were no changes we can discard the savedbp and detach
11593	 * ourselves from the buf.  We are only carrying completed pointers
11594	 * in this case.
11595	 */
11596	sbp = indirdep->ir_savebp;
11597	sbp->b_flags |= B_INVAL | B_NOCACHE;
11598	indirdep->ir_savebp = NULL;
11599	indirdep->ir_bp = NULL;
11600	if (*bpp != NULL)
11601		panic("handle_written_indirdep: bp already exists.");
11602	*bpp = sbp;
11603	/*
11604	 * The indirdep may not be freed until its parent points at it.
11605	 */
11606	if (indirdep->ir_state & DEPCOMPLETE)
11607		free_indirdep(indirdep);
11608
11609	return (0);
11610}
11611
11612/*
11613 * Process a diradd entry after its dependent inode has been written.
11614 * This routine must be called with splbio interrupts blocked.
11615 */
11616static void
11617diradd_inode_written(dap, inodedep)
11618	struct diradd *dap;
11619	struct inodedep *inodedep;
11620{
11621
11622	dap->da_state |= COMPLETE;
11623	complete_diradd(dap);
11624	WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
11625}
11626
11627/*
11628 * Returns true if the bmsafemap will have rollbacks when written.  Must only
11629 * be called with the per-filesystem lock and the buf lock on the cg held.
11630 */
11631static int
11632bmsafemap_backgroundwrite(bmsafemap, bp)
11633	struct bmsafemap *bmsafemap;
11634	struct buf *bp;
11635{
11636	int dirty;
11637
11638	LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp));
11639	dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
11640	    !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
11641	/*
11642	 * If we're initiating a background write we need to process the
11643	 * rollbacks as they exist now, not as they exist when IO starts.
11644	 * No other consumers will look at the contents of the shadowed
11645	 * buf so this is safe to do here.
11646	 */
11647	if (bp->b_xflags & BX_BKGRDMARKER)
11648		initiate_write_bmsafemap(bmsafemap, bp);
11649
11650	return (dirty);
11651}
11652
11653/*
11654 * Re-apply an allocation when a cg write is complete.
11655 */
11656static int
11657jnewblk_rollforward(jnewblk, fs, cgp, blksfree)
11658	struct jnewblk *jnewblk;
11659	struct fs *fs;
11660	struct cg *cgp;
11661	uint8_t *blksfree;
11662{
11663	ufs1_daddr_t fragno;
11664	ufs2_daddr_t blkno;
11665	long cgbno, bbase;
11666	int frags, blk;
11667	int i;
11668
11669	frags = 0;
11670	cgbno = dtogd(fs, jnewblk->jn_blkno);
11671	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
11672		if (isclr(blksfree, cgbno + i))
11673			panic("jnewblk_rollforward: re-allocated fragment");
11674		frags++;
11675	}
11676	if (frags == fs->fs_frag) {
11677		blkno = fragstoblks(fs, cgbno);
11678		ffs_clrblock(fs, blksfree, (long)blkno);
11679		ffs_clusteracct(fs, cgp, blkno, -1);
11680		cgp->cg_cs.cs_nbfree--;
11681	} else {
11682		bbase = cgbno - fragnum(fs, cgbno);
11683		cgbno += jnewblk->jn_oldfrags;
11684                /* If a complete block had been reassembled, account for it. */
11685		fragno = fragstoblks(fs, bbase);
11686		if (ffs_isblock(fs, blksfree, fragno)) {
11687			cgp->cg_cs.cs_nffree += fs->fs_frag;
11688			ffs_clusteracct(fs, cgp, fragno, -1);
11689			cgp->cg_cs.cs_nbfree--;
11690		}
11691		/* Decrement the old frags.  */
11692		blk = blkmap(fs, blksfree, bbase);
11693		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11694		/* Allocate the fragment */
11695		for (i = 0; i < frags; i++)
11696			clrbit(blksfree, cgbno + i);
11697		cgp->cg_cs.cs_nffree -= frags;
11698		/* Add back in counts associated with the new frags */
11699		blk = blkmap(fs, blksfree, bbase);
11700		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11701	}
11702	return (frags);
11703}
11704
11705/*
11706 * Complete a write to a bmsafemap structure.  Roll forward any bitmap
11707 * changes if it's not a background write.  Set all written dependencies
11708 * to DEPCOMPLETE and free the structure if possible.
11709 */
11710static int
11711handle_written_bmsafemap(bmsafemap, bp)
11712	struct bmsafemap *bmsafemap;
11713	struct buf *bp;
11714{
11715	struct newblk *newblk;
11716	struct inodedep *inodedep;
11717	struct jaddref *jaddref, *jatmp;
11718	struct jnewblk *jnewblk, *jntmp;
11719	struct ufsmount *ump;
11720	uint8_t *inosused;
11721	uint8_t *blksfree;
11722	struct cg *cgp;
11723	struct fs *fs;
11724	ino_t ino;
11725	int foreground;
11726	int chgs;
11727
11728	if ((bmsafemap->sm_state & IOSTARTED) == 0)
11729		panic("initiate_write_bmsafemap: Not started\n");
11730	ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
11731	chgs = 0;
11732	bmsafemap->sm_state &= ~IOSTARTED;
11733	foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0;
11734	/*
11735	 * Release journal work that was waiting on the write.
11736	 */
11737	handle_jwork(&bmsafemap->sm_freewr);
11738
11739	/*
11740	 * Restore unwritten inode allocation pending jaddref writes.
11741	 */
11742	if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
11743		cgp = (struct cg *)bp->b_data;
11744		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11745		inosused = cg_inosused(cgp);
11746		LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
11747		    ja_bmdeps, jatmp) {
11748			if ((jaddref->ja_state & UNDONE) == 0)
11749				continue;
11750			ino = jaddref->ja_ino % fs->fs_ipg;
11751			if (isset(inosused, ino))
11752				panic("handle_written_bmsafemap: "
11753				    "re-allocated inode");
11754			/* Do the roll-forward only if it's a real copy. */
11755			if (foreground) {
11756				if ((jaddref->ja_mode & IFMT) == IFDIR)
11757					cgp->cg_cs.cs_ndir++;
11758				cgp->cg_cs.cs_nifree--;
11759				setbit(inosused, ino);
11760				chgs = 1;
11761			}
11762			jaddref->ja_state &= ~UNDONE;
11763			jaddref->ja_state |= ATTACHED;
11764			free_jaddref(jaddref);
11765		}
11766	}
11767	/*
11768	 * Restore any block allocations which are pending journal writes.
11769	 */
11770	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11771		cgp = (struct cg *)bp->b_data;
11772		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11773		blksfree = cg_blksfree(cgp);
11774		LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
11775		    jntmp) {
11776			if ((jnewblk->jn_state & UNDONE) == 0)
11777				continue;
11778			/* Do the roll-forward only if it's a real copy. */
11779			if (foreground &&
11780			    jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
11781				chgs = 1;
11782			jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
11783			jnewblk->jn_state |= ATTACHED;
11784			free_jnewblk(jnewblk);
11785		}
11786	}
11787	while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
11788		newblk->nb_state |= DEPCOMPLETE;
11789		newblk->nb_state &= ~ONDEPLIST;
11790		newblk->nb_bmsafemap = NULL;
11791		LIST_REMOVE(newblk, nb_deps);
11792		if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
11793			handle_allocdirect_partdone(
11794			    WK_ALLOCDIRECT(&newblk->nb_list), NULL);
11795		else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
11796			handle_allocindir_partdone(
11797			    WK_ALLOCINDIR(&newblk->nb_list));
11798		else if (newblk->nb_list.wk_type != D_NEWBLK)
11799			panic("handle_written_bmsafemap: Unexpected type: %s",
11800			    TYPENAME(newblk->nb_list.wk_type));
11801	}
11802	while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
11803		inodedep->id_state |= DEPCOMPLETE;
11804		inodedep->id_state &= ~ONDEPLIST;
11805		LIST_REMOVE(inodedep, id_deps);
11806		inodedep->id_bmsafemap = NULL;
11807	}
11808	LIST_REMOVE(bmsafemap, sm_next);
11809	if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
11810	    LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
11811	    LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
11812	    LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
11813	    LIST_EMPTY(&bmsafemap->sm_freehd)) {
11814		LIST_REMOVE(bmsafemap, sm_hash);
11815		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
11816		return (0);
11817	}
11818	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
11819	if (foreground)
11820		bdirty(bp);
11821	return (1);
11822}
11823
11824/*
11825 * Try to free a mkdir dependency.
11826 */
11827static void
11828complete_mkdir(mkdir)
11829	struct mkdir *mkdir;
11830{
11831	struct diradd *dap;
11832
11833	if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
11834		return;
11835	LIST_REMOVE(mkdir, md_mkdirs);
11836	dap = mkdir->md_diradd;
11837	dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
11838	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
11839		dap->da_state |= DEPCOMPLETE;
11840		complete_diradd(dap);
11841	}
11842	WORKITEM_FREE(mkdir, D_MKDIR);
11843}
11844
11845/*
11846 * Handle the completion of a mkdir dependency.
11847 */
11848static void
11849handle_written_mkdir(mkdir, type)
11850	struct mkdir *mkdir;
11851	int type;
11852{
11853
11854	if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
11855		panic("handle_written_mkdir: bad type");
11856	mkdir->md_state |= COMPLETE;
11857	complete_mkdir(mkdir);
11858}
11859
11860static int
11861free_pagedep(pagedep)
11862	struct pagedep *pagedep;
11863{
11864	int i;
11865
11866	if (pagedep->pd_state & NEWBLOCK)
11867		return (0);
11868	if (!LIST_EMPTY(&pagedep->pd_dirremhd))
11869		return (0);
11870	for (i = 0; i < DAHASHSZ; i++)
11871		if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
11872			return (0);
11873	if (!LIST_EMPTY(&pagedep->pd_pendinghd))
11874		return (0);
11875	if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
11876		return (0);
11877	if (pagedep->pd_state & ONWORKLIST)
11878		WORKLIST_REMOVE(&pagedep->pd_list);
11879	LIST_REMOVE(pagedep, pd_hash);
11880	WORKITEM_FREE(pagedep, D_PAGEDEP);
11881
11882	return (1);
11883}
11884
11885/*
11886 * Called from within softdep_disk_write_complete above.
11887 * A write operation was just completed. Removed inodes can
11888 * now be freed and associated block pointers may be committed.
11889 * Note that this routine is always called from interrupt level
11890 * with further splbio interrupts blocked.
11891 */
11892static int
11893handle_written_filepage(pagedep, bp)
11894	struct pagedep *pagedep;
11895	struct buf *bp;		/* buffer containing the written page */
11896{
11897	struct dirrem *dirrem;
11898	struct diradd *dap, *nextdap;
11899	struct direct *ep;
11900	int i, chgs;
11901
11902	if ((pagedep->pd_state & IOSTARTED) == 0)
11903		panic("handle_written_filepage: not started");
11904	pagedep->pd_state &= ~IOSTARTED;
11905	/*
11906	 * Process any directory removals that have been committed.
11907	 */
11908	while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
11909		LIST_REMOVE(dirrem, dm_next);
11910		dirrem->dm_state |= COMPLETE;
11911		dirrem->dm_dirinum = pagedep->pd_ino;
11912		KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
11913		    ("handle_written_filepage: Journal entries not written."));
11914		add_to_worklist(&dirrem->dm_list, 0);
11915	}
11916	/*
11917	 * Free any directory additions that have been committed.
11918	 * If it is a newly allocated block, we have to wait until
11919	 * the on-disk directory inode claims the new block.
11920	 */
11921	if ((pagedep->pd_state & NEWBLOCK) == 0)
11922		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
11923			free_diradd(dap, NULL);
11924	/*
11925	 * Uncommitted directory entries must be restored.
11926	 */
11927	for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
11928		for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
11929		     dap = nextdap) {
11930			nextdap = LIST_NEXT(dap, da_pdlist);
11931			if (dap->da_state & ATTACHED)
11932				panic("handle_written_filepage: attached");
11933			ep = (struct direct *)
11934			    ((char *)bp->b_data + dap->da_offset);
11935			ep->d_ino = dap->da_newinum;
11936			dap->da_state &= ~UNDONE;
11937			dap->da_state |= ATTACHED;
11938			chgs = 1;
11939			/*
11940			 * If the inode referenced by the directory has
11941			 * been written out, then the dependency can be
11942			 * moved to the pending list.
11943			 */
11944			if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
11945				LIST_REMOVE(dap, da_pdlist);
11946				LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
11947				    da_pdlist);
11948			}
11949		}
11950	}
11951	/*
11952	 * If there were any rollbacks in the directory, then it must be
11953	 * marked dirty so that its will eventually get written back in
11954	 * its correct form.
11955	 */
11956	if (chgs) {
11957		if ((bp->b_flags & B_DELWRI) == 0)
11958			stat_dir_entry++;
11959		bdirty(bp);
11960		return (1);
11961	}
11962	/*
11963	 * If we are not waiting for a new directory block to be
11964	 * claimed by its inode, then the pagedep will be freed.
11965	 * Otherwise it will remain to track any new entries on
11966	 * the page in case they are fsync'ed.
11967	 */
11968	free_pagedep(pagedep);
11969	return (0);
11970}
11971
11972/*
11973 * Writing back in-core inode structures.
11974 *
11975 * The filesystem only accesses an inode's contents when it occupies an
11976 * "in-core" inode structure.  These "in-core" structures are separate from
11977 * the page frames used to cache inode blocks.  Only the latter are
11978 * transferred to/from the disk.  So, when the updated contents of the
11979 * "in-core" inode structure are copied to the corresponding in-memory inode
11980 * block, the dependencies are also transferred.  The following procedure is
11981 * called when copying a dirty "in-core" inode to a cached inode block.
11982 */
11983
11984/*
11985 * Called when an inode is loaded from disk. If the effective link count
11986 * differed from the actual link count when it was last flushed, then we
11987 * need to ensure that the correct effective link count is put back.
11988 */
11989void
11990softdep_load_inodeblock(ip)
11991	struct inode *ip;	/* the "in_core" copy of the inode */
11992{
11993	struct inodedep *inodedep;
11994
11995	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
11996	    ("softdep_load_inodeblock called on non-softdep filesystem"));
11997	/*
11998	 * Check for alternate nlink count.
11999	 */
12000	ip->i_effnlink = ip->i_nlink;
12001	ACQUIRE_LOCK(ip->i_ump);
12002	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
12003	    &inodedep) == 0) {
12004		FREE_LOCK(ip->i_ump);
12005		return;
12006	}
12007	ip->i_effnlink -= inodedep->id_nlinkdelta;
12008	FREE_LOCK(ip->i_ump);
12009}
12010
12011/*
12012 * This routine is called just before the "in-core" inode
12013 * information is to be copied to the in-memory inode block.
12014 * Recall that an inode block contains several inodes. If
12015 * the force flag is set, then the dependencies will be
12016 * cleared so that the update can always be made. Note that
12017 * the buffer is locked when this routine is called, so we
12018 * will never be in the middle of writing the inode block
12019 * to disk.
12020 */
12021void
12022softdep_update_inodeblock(ip, bp, waitfor)
12023	struct inode *ip;	/* the "in_core" copy of the inode */
12024	struct buf *bp;		/* the buffer containing the inode block */
12025	int waitfor;		/* nonzero => update must be allowed */
12026{
12027	struct inodedep *inodedep;
12028	struct inoref *inoref;
12029	struct ufsmount *ump;
12030	struct worklist *wk;
12031	struct mount *mp;
12032	struct buf *ibp;
12033	struct fs *fs;
12034	int error;
12035
12036	ump = ip->i_ump;
12037	mp = UFSTOVFS(ump);
12038	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
12039	    ("softdep_update_inodeblock called on non-softdep filesystem"));
12040	fs = ip->i_fs;
12041	/*
12042	 * Preserve the freelink that is on disk.  clear_unlinked_inodedep()
12043	 * does not have access to the in-core ip so must write directly into
12044	 * the inode block buffer when setting freelink.
12045	 */
12046	if (fs->fs_magic == FS_UFS1_MAGIC)
12047		DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data +
12048		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12049	else
12050		DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data +
12051		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12052	/*
12053	 * If the effective link count is not equal to the actual link
12054	 * count, then we must track the difference in an inodedep while
12055	 * the inode is (potentially) tossed out of the cache. Otherwise,
12056	 * if there is no existing inodedep, then there are no dependencies
12057	 * to track.
12058	 */
12059	ACQUIRE_LOCK(ump);
12060again:
12061	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12062		FREE_LOCK(ump);
12063		if (ip->i_effnlink != ip->i_nlink)
12064			panic("softdep_update_inodeblock: bad link count");
12065		return;
12066	}
12067	if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
12068		panic("softdep_update_inodeblock: bad delta");
12069	/*
12070	 * If we're flushing all dependencies we must also move any waiting
12071	 * for journal writes onto the bufwait list prior to I/O.
12072	 */
12073	if (waitfor) {
12074		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12075			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12076			    == DEPCOMPLETE) {
12077				jwait(&inoref->if_list, MNT_WAIT);
12078				goto again;
12079			}
12080		}
12081	}
12082	/*
12083	 * Changes have been initiated. Anything depending on these
12084	 * changes cannot occur until this inode has been written.
12085	 */
12086	inodedep->id_state &= ~COMPLETE;
12087	if ((inodedep->id_state & ONWORKLIST) == 0)
12088		WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
12089	/*
12090	 * Any new dependencies associated with the incore inode must
12091	 * now be moved to the list associated with the buffer holding
12092	 * the in-memory copy of the inode. Once merged process any
12093	 * allocdirects that are completed by the merger.
12094	 */
12095	merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
12096	if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
12097		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
12098		    NULL);
12099	merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
12100	if (!TAILQ_EMPTY(&inodedep->id_extupdt))
12101		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
12102		    NULL);
12103	/*
12104	 * Now that the inode has been pushed into the buffer, the
12105	 * operations dependent on the inode being written to disk
12106	 * can be moved to the id_bufwait so that they will be
12107	 * processed when the buffer I/O completes.
12108	 */
12109	while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
12110		WORKLIST_REMOVE(wk);
12111		WORKLIST_INSERT(&inodedep->id_bufwait, wk);
12112	}
12113	/*
12114	 * Newly allocated inodes cannot be written until the bitmap
12115	 * that allocates them have been written (indicated by
12116	 * DEPCOMPLETE being set in id_state). If we are doing a
12117	 * forced sync (e.g., an fsync on a file), we force the bitmap
12118	 * to be written so that the update can be done.
12119	 */
12120	if (waitfor == 0) {
12121		FREE_LOCK(ump);
12122		return;
12123	}
12124retry:
12125	if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
12126		FREE_LOCK(ump);
12127		return;
12128	}
12129	ibp = inodedep->id_bmsafemap->sm_buf;
12130	ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT);
12131	if (ibp == NULL) {
12132		/*
12133		 * If ibp came back as NULL, the dependency could have been
12134		 * freed while we slept.  Look it up again, and check to see
12135		 * that it has completed.
12136		 */
12137		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
12138			goto retry;
12139		FREE_LOCK(ump);
12140		return;
12141	}
12142	FREE_LOCK(ump);
12143	if ((error = bwrite(ibp)) != 0)
12144		softdep_error("softdep_update_inodeblock: bwrite", error);
12145}
12146
12147/*
12148 * Merge the a new inode dependency list (such as id_newinoupdt) into an
12149 * old inode dependency list (such as id_inoupdt). This routine must be
12150 * called with splbio interrupts blocked.
12151 */
12152static void
12153merge_inode_lists(newlisthead, oldlisthead)
12154	struct allocdirectlst *newlisthead;
12155	struct allocdirectlst *oldlisthead;
12156{
12157	struct allocdirect *listadp, *newadp;
12158
12159	newadp = TAILQ_FIRST(newlisthead);
12160	for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
12161		if (listadp->ad_offset < newadp->ad_offset) {
12162			listadp = TAILQ_NEXT(listadp, ad_next);
12163			continue;
12164		}
12165		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12166		TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
12167		if (listadp->ad_offset == newadp->ad_offset) {
12168			allocdirect_merge(oldlisthead, newadp,
12169			    listadp);
12170			listadp = newadp;
12171		}
12172		newadp = TAILQ_FIRST(newlisthead);
12173	}
12174	while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
12175		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12176		TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
12177	}
12178}
12179
12180/*
12181 * If we are doing an fsync, then we must ensure that any directory
12182 * entries for the inode have been written after the inode gets to disk.
12183 */
12184int
12185softdep_fsync(vp)
12186	struct vnode *vp;	/* the "in_core" copy of the inode */
12187{
12188	struct inodedep *inodedep;
12189	struct pagedep *pagedep;
12190	struct inoref *inoref;
12191	struct ufsmount *ump;
12192	struct worklist *wk;
12193	struct diradd *dap;
12194	struct mount *mp;
12195	struct vnode *pvp;
12196	struct inode *ip;
12197	struct buf *bp;
12198	struct fs *fs;
12199	struct thread *td = curthread;
12200	int error, flushparent, pagedep_new_block;
12201	ino_t parentino;
12202	ufs_lbn_t lbn;
12203
12204	ip = VTOI(vp);
12205	fs = ip->i_fs;
12206	ump = ip->i_ump;
12207	mp = vp->v_mount;
12208	if (MOUNTEDSOFTDEP(mp) == 0)
12209		return (0);
12210	ACQUIRE_LOCK(ump);
12211restart:
12212	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12213		FREE_LOCK(ump);
12214		return (0);
12215	}
12216	TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12217		if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12218		    == DEPCOMPLETE) {
12219			jwait(&inoref->if_list, MNT_WAIT);
12220			goto restart;
12221		}
12222	}
12223	if (!LIST_EMPTY(&inodedep->id_inowait) ||
12224	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
12225	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
12226	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
12227	    !TAILQ_EMPTY(&inodedep->id_newinoupdt))
12228		panic("softdep_fsync: pending ops %p", inodedep);
12229	for (error = 0, flushparent = 0; ; ) {
12230		if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
12231			break;
12232		if (wk->wk_type != D_DIRADD)
12233			panic("softdep_fsync: Unexpected type %s",
12234			    TYPENAME(wk->wk_type));
12235		dap = WK_DIRADD(wk);
12236		/*
12237		 * Flush our parent if this directory entry has a MKDIR_PARENT
12238		 * dependency or is contained in a newly allocated block.
12239		 */
12240		if (dap->da_state & DIRCHG)
12241			pagedep = dap->da_previous->dm_pagedep;
12242		else
12243			pagedep = dap->da_pagedep;
12244		parentino = pagedep->pd_ino;
12245		lbn = pagedep->pd_lbn;
12246		if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
12247			panic("softdep_fsync: dirty");
12248		if ((dap->da_state & MKDIR_PARENT) ||
12249		    (pagedep->pd_state & NEWBLOCK))
12250			flushparent = 1;
12251		else
12252			flushparent = 0;
12253		/*
12254		 * If we are being fsync'ed as part of vgone'ing this vnode,
12255		 * then we will not be able to release and recover the
12256		 * vnode below, so we just have to give up on writing its
12257		 * directory entry out. It will eventually be written, just
12258		 * not now, but then the user was not asking to have it
12259		 * written, so we are not breaking any promises.
12260		 */
12261		if (vp->v_iflag & VI_DOOMED)
12262			break;
12263		/*
12264		 * We prevent deadlock by always fetching inodes from the
12265		 * root, moving down the directory tree. Thus, when fetching
12266		 * our parent directory, we first try to get the lock. If
12267		 * that fails, we must unlock ourselves before requesting
12268		 * the lock on our parent. See the comment in ufs_lookup
12269		 * for details on possible races.
12270		 */
12271		FREE_LOCK(ump);
12272		if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp,
12273		    FFSV_FORCEINSMQ)) {
12274			error = vfs_busy(mp, MBF_NOWAIT);
12275			if (error != 0) {
12276				vfs_ref(mp);
12277				VOP_UNLOCK(vp, 0);
12278				error = vfs_busy(mp, 0);
12279				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12280				vfs_rel(mp);
12281				if (error != 0)
12282					return (ENOENT);
12283				if (vp->v_iflag & VI_DOOMED) {
12284					vfs_unbusy(mp);
12285					return (ENOENT);
12286				}
12287			}
12288			VOP_UNLOCK(vp, 0);
12289			error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE,
12290			    &pvp, FFSV_FORCEINSMQ);
12291			vfs_unbusy(mp);
12292			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12293			if (vp->v_iflag & VI_DOOMED) {
12294				if (error == 0)
12295					vput(pvp);
12296				error = ENOENT;
12297			}
12298			if (error != 0)
12299				return (error);
12300		}
12301		/*
12302		 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12303		 * that are contained in direct blocks will be resolved by
12304		 * doing a ffs_update. Pagedeps contained in indirect blocks
12305		 * may require a complete sync'ing of the directory. So, we
12306		 * try the cheap and fast ffs_update first, and if that fails,
12307		 * then we do the slower ffs_syncvnode of the directory.
12308		 */
12309		if (flushparent) {
12310			int locked;
12311
12312			if ((error = ffs_update(pvp, 1)) != 0) {
12313				vput(pvp);
12314				return (error);
12315			}
12316			ACQUIRE_LOCK(ump);
12317			locked = 1;
12318			if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12319				if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12320					if (wk->wk_type != D_DIRADD)
12321						panic("softdep_fsync: Unexpected type %s",
12322						      TYPENAME(wk->wk_type));
12323					dap = WK_DIRADD(wk);
12324					if (dap->da_state & DIRCHG)
12325						pagedep = dap->da_previous->dm_pagedep;
12326					else
12327						pagedep = dap->da_pagedep;
12328					pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12329					FREE_LOCK(ump);
12330					locked = 0;
12331					if (pagedep_new_block && (error =
12332					    ffs_syncvnode(pvp, MNT_WAIT, 0))) {
12333						vput(pvp);
12334						return (error);
12335					}
12336				}
12337			}
12338			if (locked)
12339				FREE_LOCK(ump);
12340		}
12341		/*
12342		 * Flush directory page containing the inode's name.
12343		 */
12344		error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12345		    &bp);
12346		if (error == 0)
12347			error = bwrite(bp);
12348		else
12349			brelse(bp);
12350		vput(pvp);
12351		if (error != 0)
12352			return (error);
12353		ACQUIRE_LOCK(ump);
12354		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12355			break;
12356	}
12357	FREE_LOCK(ump);
12358	return (0);
12359}
12360
12361/*
12362 * Flush all the dirty bitmaps associated with the block device
12363 * before flushing the rest of the dirty blocks so as to reduce
12364 * the number of dependencies that will have to be rolled back.
12365 *
12366 * XXX Unused?
12367 */
12368void
12369softdep_fsync_mountdev(vp)
12370	struct vnode *vp;
12371{
12372	struct buf *bp, *nbp;
12373	struct worklist *wk;
12374	struct bufobj *bo;
12375
12376	if (!vn_isdisk(vp, NULL))
12377		panic("softdep_fsync_mountdev: vnode not a disk");
12378	bo = &vp->v_bufobj;
12379restart:
12380	BO_LOCK(bo);
12381	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12382		/*
12383		 * If it is already scheduled, skip to the next buffer.
12384		 */
12385		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12386			continue;
12387
12388		if ((bp->b_flags & B_DELWRI) == 0)
12389			panic("softdep_fsync_mountdev: not dirty");
12390		/*
12391		 * We are only interested in bitmaps with outstanding
12392		 * dependencies.
12393		 */
12394		if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12395		    wk->wk_type != D_BMSAFEMAP ||
12396		    (bp->b_vflags & BV_BKGRDINPROG)) {
12397			BUF_UNLOCK(bp);
12398			continue;
12399		}
12400		BO_UNLOCK(bo);
12401		bremfree(bp);
12402		(void) bawrite(bp);
12403		goto restart;
12404	}
12405	drain_output(vp);
12406	BO_UNLOCK(bo);
12407}
12408
12409/*
12410 * Sync all cylinder groups that were dirty at the time this function is
12411 * called.  Newly dirtied cgs will be inserted before the sentinel.  This
12412 * is used to flush freedep activity that may be holding up writes to a
12413 * indirect block.
12414 */
12415static int
12416sync_cgs(mp, waitfor)
12417	struct mount *mp;
12418	int waitfor;
12419{
12420	struct bmsafemap *bmsafemap;
12421	struct bmsafemap *sentinel;
12422	struct ufsmount *ump;
12423	struct buf *bp;
12424	int error;
12425
12426	sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12427	sentinel->sm_cg = -1;
12428	ump = VFSTOUFS(mp);
12429	error = 0;
12430	ACQUIRE_LOCK(ump);
12431	LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12432	for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12433	    bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12434		/* Skip sentinels and cgs with no work to release. */
12435		if (bmsafemap->sm_cg == -1 ||
12436		    (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12437		    LIST_EMPTY(&bmsafemap->sm_freewr))) {
12438			LIST_REMOVE(sentinel, sm_next);
12439			LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12440			continue;
12441		}
12442		/*
12443		 * If we don't get the lock and we're waiting try again, if
12444		 * not move on to the next buf and try to sync it.
12445		 */
12446		bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor);
12447		if (bp == NULL && waitfor == MNT_WAIT)
12448			continue;
12449		LIST_REMOVE(sentinel, sm_next);
12450		LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12451		if (bp == NULL)
12452			continue;
12453		FREE_LOCK(ump);
12454		if (waitfor == MNT_NOWAIT)
12455			bawrite(bp);
12456		else
12457			error = bwrite(bp);
12458		ACQUIRE_LOCK(ump);
12459		if (error)
12460			break;
12461	}
12462	LIST_REMOVE(sentinel, sm_next);
12463	FREE_LOCK(ump);
12464	free(sentinel, M_BMSAFEMAP);
12465	return (error);
12466}
12467
12468/*
12469 * This routine is called when we are trying to synchronously flush a
12470 * file. This routine must eliminate any filesystem metadata dependencies
12471 * so that the syncing routine can succeed.
12472 */
12473int
12474softdep_sync_metadata(struct vnode *vp)
12475{
12476	struct inode *ip;
12477	int error;
12478
12479	ip = VTOI(vp);
12480	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
12481	    ("softdep_sync_metadata called on non-softdep filesystem"));
12482	/*
12483	 * Ensure that any direct block dependencies have been cleared,
12484	 * truncations are started, and inode references are journaled.
12485	 */
12486	ACQUIRE_LOCK(ip->i_ump);
12487	/*
12488	 * Write all journal records to prevent rollbacks on devvp.
12489	 */
12490	if (vp->v_type == VCHR)
12491		softdep_flushjournal(vp->v_mount);
12492	error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number);
12493	/*
12494	 * Ensure that all truncates are written so we won't find deps on
12495	 * indirect blocks.
12496	 */
12497	process_truncates(vp);
12498	FREE_LOCK(ip->i_ump);
12499
12500	return (error);
12501}
12502
12503/*
12504 * This routine is called when we are attempting to sync a buf with
12505 * dependencies.  If waitfor is MNT_NOWAIT it attempts to schedule any
12506 * other IO it can but returns EBUSY if the buffer is not yet able to
12507 * be written.  Dependencies which will not cause rollbacks will always
12508 * return 0.
12509 */
12510int
12511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
12512{
12513	struct indirdep *indirdep;
12514	struct pagedep *pagedep;
12515	struct allocindir *aip;
12516	struct newblk *newblk;
12517	struct ufsmount *ump;
12518	struct buf *nbp;
12519	struct worklist *wk;
12520	int i, error;
12521
12522	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12523	    ("softdep_sync_buf called on non-softdep filesystem"));
12524	/*
12525	 * For VCHR we just don't want to force flush any dependencies that
12526	 * will cause rollbacks.
12527	 */
12528	if (vp->v_type == VCHR) {
12529		if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12530			return (EBUSY);
12531		return (0);
12532	}
12533	ump = VTOI(vp)->i_ump;
12534	ACQUIRE_LOCK(ump);
12535	/*
12536	 * As we hold the buffer locked, none of its dependencies
12537	 * will disappear.
12538	 */
12539	error = 0;
12540top:
12541	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12542		switch (wk->wk_type) {
12543
12544		case D_ALLOCDIRECT:
12545		case D_ALLOCINDIR:
12546			newblk = WK_NEWBLK(wk);
12547			if (newblk->nb_jnewblk != NULL) {
12548				if (waitfor == MNT_NOWAIT) {
12549					error = EBUSY;
12550					goto out_unlock;
12551				}
12552				jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12553				goto top;
12554			}
12555			if (newblk->nb_state & DEPCOMPLETE ||
12556			    waitfor == MNT_NOWAIT)
12557				continue;
12558			nbp = newblk->nb_bmsafemap->sm_buf;
12559			nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12560			if (nbp == NULL)
12561				goto top;
12562			FREE_LOCK(ump);
12563			if ((error = bwrite(nbp)) != 0)
12564				goto out;
12565			ACQUIRE_LOCK(ump);
12566			continue;
12567
12568		case D_INDIRDEP:
12569			indirdep = WK_INDIRDEP(wk);
12570			if (waitfor == MNT_NOWAIT) {
12571				if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
12572				    !LIST_EMPTY(&indirdep->ir_deplisthd)) {
12573					error = EBUSY;
12574					goto out_unlock;
12575				}
12576			}
12577			if (!TAILQ_EMPTY(&indirdep->ir_trunc))
12578				panic("softdep_sync_buf: truncation pending.");
12579		restart:
12580			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
12581				newblk = (struct newblk *)aip;
12582				if (newblk->nb_jnewblk != NULL) {
12583					jwait(&newblk->nb_jnewblk->jn_list,
12584					    waitfor);
12585					goto restart;
12586				}
12587				if (newblk->nb_state & DEPCOMPLETE)
12588					continue;
12589				nbp = newblk->nb_bmsafemap->sm_buf;
12590				nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12591				if (nbp == NULL)
12592					goto restart;
12593				FREE_LOCK(ump);
12594				if ((error = bwrite(nbp)) != 0)
12595					goto out;
12596				ACQUIRE_LOCK(ump);
12597				goto restart;
12598			}
12599			continue;
12600
12601		case D_PAGEDEP:
12602			/*
12603			 * Only flush directory entries in synchronous passes.
12604			 */
12605			if (waitfor != MNT_WAIT) {
12606				error = EBUSY;
12607				goto out_unlock;
12608			}
12609			/*
12610			 * While syncing snapshots, we must allow recursive
12611			 * lookups.
12612			 */
12613			BUF_AREC(bp);
12614			/*
12615			 * We are trying to sync a directory that may
12616			 * have dependencies on both its own metadata
12617			 * and/or dependencies on the inodes of any
12618			 * recently allocated files. We walk its diradd
12619			 * lists pushing out the associated inode.
12620			 */
12621			pagedep = WK_PAGEDEP(wk);
12622			for (i = 0; i < DAHASHSZ; i++) {
12623				if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
12624					continue;
12625				if ((error = flush_pagedep_deps(vp, wk->wk_mp,
12626				    &pagedep->pd_diraddhd[i]))) {
12627					BUF_NOREC(bp);
12628					goto out_unlock;
12629				}
12630			}
12631			BUF_NOREC(bp);
12632			continue;
12633
12634		case D_FREEWORK:
12635		case D_FREEDEP:
12636		case D_JSEGDEP:
12637		case D_JNEWBLK:
12638			continue;
12639
12640		default:
12641			panic("softdep_sync_buf: Unknown type %s",
12642			    TYPENAME(wk->wk_type));
12643			/* NOTREACHED */
12644		}
12645	}
12646out_unlock:
12647	FREE_LOCK(ump);
12648out:
12649	return (error);
12650}
12651
12652/*
12653 * Flush the dependencies associated with an inodedep.
12654 * Called with splbio blocked.
12655 */
12656static int
12657flush_inodedep_deps(vp, mp, ino)
12658	struct vnode *vp;
12659	struct mount *mp;
12660	ino_t ino;
12661{
12662	struct inodedep *inodedep;
12663	struct inoref *inoref;
12664	struct ufsmount *ump;
12665	int error, waitfor;
12666
12667	/*
12668	 * This work is done in two passes. The first pass grabs most
12669	 * of the buffers and begins asynchronously writing them. The
12670	 * only way to wait for these asynchronous writes is to sleep
12671	 * on the filesystem vnode which may stay busy for a long time
12672	 * if the filesystem is active. So, instead, we make a second
12673	 * pass over the dependencies blocking on each write. In the
12674	 * usual case we will be blocking against a write that we
12675	 * initiated, so when it is done the dependency will have been
12676	 * resolved. Thus the second pass is expected to end quickly.
12677	 * We give a brief window at the top of the loop to allow
12678	 * any pending I/O to complete.
12679	 */
12680	ump = VFSTOUFS(mp);
12681	LOCK_OWNED(ump);
12682	for (error = 0, waitfor = MNT_NOWAIT; ; ) {
12683		if (error)
12684			return (error);
12685		FREE_LOCK(ump);
12686		ACQUIRE_LOCK(ump);
12687restart:
12688		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
12689			return (0);
12690		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12691			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12692			    == DEPCOMPLETE) {
12693				jwait(&inoref->if_list, MNT_WAIT);
12694				goto restart;
12695			}
12696		}
12697		if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
12698		    flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
12699		    flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
12700		    flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
12701			continue;
12702		/*
12703		 * If pass2, we are done, otherwise do pass 2.
12704		 */
12705		if (waitfor == MNT_WAIT)
12706			break;
12707		waitfor = MNT_WAIT;
12708	}
12709	/*
12710	 * Try freeing inodedep in case all dependencies have been removed.
12711	 */
12712	if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
12713		(void) free_inodedep(inodedep);
12714	return (0);
12715}
12716
12717/*
12718 * Flush an inode dependency list.
12719 * Called with splbio blocked.
12720 */
12721static int
12722flush_deplist(listhead, waitfor, errorp)
12723	struct allocdirectlst *listhead;
12724	int waitfor;
12725	int *errorp;
12726{
12727	struct allocdirect *adp;
12728	struct newblk *newblk;
12729	struct ufsmount *ump;
12730	struct buf *bp;
12731
12732	if ((adp = TAILQ_FIRST(listhead)) == NULL)
12733		return (0);
12734	ump = VFSTOUFS(adp->ad_list.wk_mp);
12735	LOCK_OWNED(ump);
12736	TAILQ_FOREACH(adp, listhead, ad_next) {
12737		newblk = (struct newblk *)adp;
12738		if (newblk->nb_jnewblk != NULL) {
12739			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12740			return (1);
12741		}
12742		if (newblk->nb_state & DEPCOMPLETE)
12743			continue;
12744		bp = newblk->nb_bmsafemap->sm_buf;
12745		bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor);
12746		if (bp == NULL) {
12747			if (waitfor == MNT_NOWAIT)
12748				continue;
12749			return (1);
12750		}
12751		FREE_LOCK(ump);
12752		if (waitfor == MNT_NOWAIT)
12753			bawrite(bp);
12754		else
12755			*errorp = bwrite(bp);
12756		ACQUIRE_LOCK(ump);
12757		return (1);
12758	}
12759	return (0);
12760}
12761
12762/*
12763 * Flush dependencies associated with an allocdirect block.
12764 */
12765static int
12766flush_newblk_dep(vp, mp, lbn)
12767	struct vnode *vp;
12768	struct mount *mp;
12769	ufs_lbn_t lbn;
12770{
12771	struct newblk *newblk;
12772	struct ufsmount *ump;
12773	struct bufobj *bo;
12774	struct inode *ip;
12775	struct buf *bp;
12776	ufs2_daddr_t blkno;
12777	int error;
12778
12779	error = 0;
12780	bo = &vp->v_bufobj;
12781	ip = VTOI(vp);
12782	blkno = DIP(ip, i_db[lbn]);
12783	if (blkno == 0)
12784		panic("flush_newblk_dep: Missing block");
12785	ump = VFSTOUFS(mp);
12786	ACQUIRE_LOCK(ump);
12787	/*
12788	 * Loop until all dependencies related to this block are satisfied.
12789	 * We must be careful to restart after each sleep in case a write
12790	 * completes some part of this process for us.
12791	 */
12792	for (;;) {
12793		if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
12794			FREE_LOCK(ump);
12795			break;
12796		}
12797		if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
12798			panic("flush_newblk_deps: Bad newblk %p", newblk);
12799		/*
12800		 * Flush the journal.
12801		 */
12802		if (newblk->nb_jnewblk != NULL) {
12803			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12804			continue;
12805		}
12806		/*
12807		 * Write the bitmap dependency.
12808		 */
12809		if ((newblk->nb_state & DEPCOMPLETE) == 0) {
12810			bp = newblk->nb_bmsafemap->sm_buf;
12811			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
12812			if (bp == NULL)
12813				continue;
12814			FREE_LOCK(ump);
12815			error = bwrite(bp);
12816			if (error)
12817				break;
12818			ACQUIRE_LOCK(ump);
12819			continue;
12820		}
12821		/*
12822		 * Write the buffer.
12823		 */
12824		FREE_LOCK(ump);
12825		BO_LOCK(bo);
12826		bp = gbincore(bo, lbn);
12827		if (bp != NULL) {
12828			error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
12829			    LK_INTERLOCK, BO_LOCKPTR(bo));
12830			if (error == ENOLCK) {
12831				ACQUIRE_LOCK(ump);
12832				continue; /* Slept, retry */
12833			}
12834			if (error != 0)
12835				break;	/* Failed */
12836			if (bp->b_flags & B_DELWRI) {
12837				bremfree(bp);
12838				error = bwrite(bp);
12839				if (error)
12840					break;
12841			} else
12842				BUF_UNLOCK(bp);
12843		} else
12844			BO_UNLOCK(bo);
12845		/*
12846		 * We have to wait for the direct pointers to
12847		 * point at the newdirblk before the dependency
12848		 * will go away.
12849		 */
12850		error = ffs_update(vp, 1);
12851		if (error)
12852			break;
12853		ACQUIRE_LOCK(ump);
12854	}
12855	return (error);
12856}
12857
12858/*
12859 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
12860 * Called with splbio blocked.
12861 */
12862static int
12863flush_pagedep_deps(pvp, mp, diraddhdp)
12864	struct vnode *pvp;
12865	struct mount *mp;
12866	struct diraddhd *diraddhdp;
12867{
12868	struct inodedep *inodedep;
12869	struct inoref *inoref;
12870	struct ufsmount *ump;
12871	struct diradd *dap;
12872	struct vnode *vp;
12873	int error = 0;
12874	struct buf *bp;
12875	ino_t inum;
12876	struct diraddhd unfinished;
12877
12878	LIST_INIT(&unfinished);
12879	ump = VFSTOUFS(mp);
12880	LOCK_OWNED(ump);
12881restart:
12882	while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
12883		/*
12884		 * Flush ourselves if this directory entry
12885		 * has a MKDIR_PARENT dependency.
12886		 */
12887		if (dap->da_state & MKDIR_PARENT) {
12888			FREE_LOCK(ump);
12889			if ((error = ffs_update(pvp, 1)) != 0)
12890				break;
12891			ACQUIRE_LOCK(ump);
12892			/*
12893			 * If that cleared dependencies, go on to next.
12894			 */
12895			if (dap != LIST_FIRST(diraddhdp))
12896				continue;
12897			/*
12898			 * All MKDIR_PARENT dependencies and all the
12899			 * NEWBLOCK pagedeps that are contained in direct
12900			 * blocks were resolved by doing above ffs_update.
12901			 * Pagedeps contained in indirect blocks may
12902			 * require a complete sync'ing of the directory.
12903			 * We are in the midst of doing a complete sync,
12904			 * so if they are not resolved in this pass we
12905			 * defer them for now as they will be sync'ed by
12906			 * our caller shortly.
12907			 */
12908			LIST_REMOVE(dap, da_pdlist);
12909			LIST_INSERT_HEAD(&unfinished, dap, da_pdlist);
12910			continue;
12911		}
12912		/*
12913		 * A newly allocated directory must have its "." and
12914		 * ".." entries written out before its name can be
12915		 * committed in its parent.
12916		 */
12917		inum = dap->da_newinum;
12918		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12919			panic("flush_pagedep_deps: lost inode1");
12920		/*
12921		 * Wait for any pending journal adds to complete so we don't
12922		 * cause rollbacks while syncing.
12923		 */
12924		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12925			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12926			    == DEPCOMPLETE) {
12927				jwait(&inoref->if_list, MNT_WAIT);
12928				goto restart;
12929			}
12930		}
12931		if (dap->da_state & MKDIR_BODY) {
12932			FREE_LOCK(ump);
12933			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12934			    FFSV_FORCEINSMQ)))
12935				break;
12936			error = flush_newblk_dep(vp, mp, 0);
12937			/*
12938			 * If we still have the dependency we might need to
12939			 * update the vnode to sync the new link count to
12940			 * disk.
12941			 */
12942			if (error == 0 && dap == LIST_FIRST(diraddhdp))
12943				error = ffs_update(vp, 1);
12944			vput(vp);
12945			if (error != 0)
12946				break;
12947			ACQUIRE_LOCK(ump);
12948			/*
12949			 * If that cleared dependencies, go on to next.
12950			 */
12951			if (dap != LIST_FIRST(diraddhdp))
12952				continue;
12953			if (dap->da_state & MKDIR_BODY) {
12954				inodedep_lookup(UFSTOVFS(ump), inum, 0,
12955				    &inodedep);
12956				panic("flush_pagedep_deps: MKDIR_BODY "
12957				    "inodedep %p dap %p vp %p",
12958				    inodedep, dap, vp);
12959			}
12960		}
12961		/*
12962		 * Flush the inode on which the directory entry depends.
12963		 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
12964		 * the only remaining dependency is that the updated inode
12965		 * count must get pushed to disk. The inode has already
12966		 * been pushed into its inode buffer (via VOP_UPDATE) at
12967		 * the time of the reference count change. So we need only
12968		 * locate that buffer, ensure that there will be no rollback
12969		 * caused by a bitmap dependency, then write the inode buffer.
12970		 */
12971retry:
12972		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12973			panic("flush_pagedep_deps: lost inode");
12974		/*
12975		 * If the inode still has bitmap dependencies,
12976		 * push them to disk.
12977		 */
12978		if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
12979			bp = inodedep->id_bmsafemap->sm_buf;
12980			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
12981			if (bp == NULL)
12982				goto retry;
12983			FREE_LOCK(ump);
12984			if ((error = bwrite(bp)) != 0)
12985				break;
12986			ACQUIRE_LOCK(ump);
12987			if (dap != LIST_FIRST(diraddhdp))
12988				continue;
12989		}
12990		/*
12991		 * If the inode is still sitting in a buffer waiting
12992		 * to be written or waiting for the link count to be
12993		 * adjusted update it here to flush it to disk.
12994		 */
12995		if (dap == LIST_FIRST(diraddhdp)) {
12996			FREE_LOCK(ump);
12997			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12998			    FFSV_FORCEINSMQ)))
12999				break;
13000			error = ffs_update(vp, 1);
13001			vput(vp);
13002			if (error)
13003				break;
13004			ACQUIRE_LOCK(ump);
13005		}
13006		/*
13007		 * If we have failed to get rid of all the dependencies
13008		 * then something is seriously wrong.
13009		 */
13010		if (dap == LIST_FIRST(diraddhdp)) {
13011			inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
13012			panic("flush_pagedep_deps: failed to flush "
13013			    "inodedep %p ino %ju dap %p",
13014			    inodedep, (uintmax_t)inum, dap);
13015		}
13016	}
13017	if (error)
13018		ACQUIRE_LOCK(ump);
13019	while ((dap = LIST_FIRST(&unfinished)) != NULL) {
13020		LIST_REMOVE(dap, da_pdlist);
13021		LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist);
13022	}
13023	return (error);
13024}
13025
13026/*
13027 * A large burst of file addition or deletion activity can drive the
13028 * memory load excessively high. First attempt to slow things down
13029 * using the techniques below. If that fails, this routine requests
13030 * the offending operations to fall back to running synchronously
13031 * until the memory load returns to a reasonable level.
13032 */
13033int
13034softdep_slowdown(vp)
13035	struct vnode *vp;
13036{
13037	struct ufsmount *ump;
13038	int jlow;
13039	int max_softdeps_hard;
13040
13041	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
13042	    ("softdep_slowdown called on non-softdep filesystem"));
13043	ump = VFSTOUFS(vp->v_mount);
13044	ACQUIRE_LOCK(ump);
13045	jlow = 0;
13046	/*
13047	 * Check for journal space if needed.
13048	 */
13049	if (DOINGSUJ(vp)) {
13050		if (journal_space(ump, 0) == 0)
13051			jlow = 1;
13052	}
13053	/*
13054	 * If the system is under its limits and our filesystem is
13055	 * not responsible for more than our share of the usage and
13056	 * we are not low on journal space, then no need to slow down.
13057	 */
13058	max_softdeps_hard = max_softdeps * 11 / 10;
13059	if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
13060	    dep_current[D_INODEDEP] < max_softdeps_hard &&
13061	    dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 &&
13062	    dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 &&
13063	    ump->softdep_curdeps[D_DIRREM] <
13064	    (max_softdeps_hard / 2) / stat_flush_threads &&
13065	    ump->softdep_curdeps[D_INODEDEP] <
13066	    max_softdeps_hard / stat_flush_threads &&
13067	    ump->softdep_curdeps[D_INDIRDEP] <
13068	    (max_softdeps_hard / 1000) / stat_flush_threads &&
13069	    ump->softdep_curdeps[D_FREEBLKS] <
13070	    max_softdeps_hard / stat_flush_threads) {
13071		FREE_LOCK(ump);
13072  		return (0);
13073	}
13074	/*
13075	 * If the journal is low or our filesystem is over its limit
13076	 * then speedup the cleanup.
13077	 */
13078	if (ump->softdep_curdeps[D_INDIRDEP] <
13079	    (max_softdeps_hard / 1000) / stat_flush_threads || jlow)
13080		softdep_speedup(ump);
13081	stat_sync_limit_hit += 1;
13082	FREE_LOCK(ump);
13083	/*
13084	 * We only slow down the rate at which new dependencies are
13085	 * generated if we are not using journaling. With journaling,
13086	 * the cleanup should always be sufficient to keep things
13087	 * under control.
13088	 */
13089	if (DOINGSUJ(vp))
13090		return (0);
13091	return (1);
13092}
13093
13094/*
13095 * Called by the allocation routines when they are about to fail
13096 * in the hope that we can free up the requested resource (inodes
13097 * or disk space).
13098 *
13099 * First check to see if the work list has anything on it. If it has,
13100 * clean up entries until we successfully free the requested resource.
13101 * Because this process holds inodes locked, we cannot handle any remove
13102 * requests that might block on a locked inode as that could lead to
13103 * deadlock. If the worklist yields none of the requested resource,
13104 * start syncing out vnodes to free up the needed space.
13105 */
13106int
13107softdep_request_cleanup(fs, vp, cred, resource)
13108	struct fs *fs;
13109	struct vnode *vp;
13110	struct ucred *cred;
13111	int resource;
13112{
13113	struct ufsmount *ump;
13114	struct mount *mp;
13115	struct vnode *lvp, *mvp;
13116	long starttime;
13117	ufs2_daddr_t needed;
13118	int error;
13119
13120	/*
13121	 * If we are being called because of a process doing a
13122	 * copy-on-write, then it is not safe to process any
13123	 * worklist items as we will recurse into the copyonwrite
13124	 * routine.  This will result in an incoherent snapshot.
13125	 * If the vnode that we hold is a snapshot, we must avoid
13126	 * handling other resources that could cause deadlock.
13127	 */
13128	if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
13129		return (0);
13130
13131	if (resource == FLUSH_BLOCKS_WAIT)
13132		stat_cleanup_blkrequests += 1;
13133	else
13134		stat_cleanup_inorequests += 1;
13135
13136	mp = vp->v_mount;
13137	ump = VFSTOUFS(mp);
13138	mtx_assert(UFS_MTX(ump), MA_OWNED);
13139	UFS_UNLOCK(ump);
13140	error = ffs_update(vp, 1);
13141	if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) {
13142		UFS_LOCK(ump);
13143		return (0);
13144	}
13145	/*
13146	 * If we are in need of resources, start by cleaning up
13147	 * any block removals associated with our inode.
13148	 */
13149	ACQUIRE_LOCK(ump);
13150	process_removes(vp);
13151	process_truncates(vp);
13152	FREE_LOCK(ump);
13153	/*
13154	 * Now clean up at least as many resources as we will need.
13155	 *
13156	 * When requested to clean up inodes, the number that are needed
13157	 * is set by the number of simultaneous writers (mnt_writeopcount)
13158	 * plus a bit of slop (2) in case some more writers show up while
13159	 * we are cleaning.
13160	 *
13161	 * When requested to free up space, the amount of space that
13162	 * we need is enough blocks to allocate a full-sized segment
13163	 * (fs_contigsumsize). The number of such segments that will
13164	 * be needed is set by the number of simultaneous writers
13165	 * (mnt_writeopcount) plus a bit of slop (2) in case some more
13166	 * writers show up while we are cleaning.
13167	 *
13168	 * Additionally, if we are unpriviledged and allocating space,
13169	 * we need to ensure that we clean up enough blocks to get the
13170	 * needed number of blocks over the threshhold of the minimum
13171	 * number of blocks required to be kept free by the filesystem
13172	 * (fs_minfree).
13173	 */
13174	if (resource == FLUSH_INODES_WAIT) {
13175		needed = vp->v_mount->mnt_writeopcount + 2;
13176	} else if (resource == FLUSH_BLOCKS_WAIT) {
13177		needed = (vp->v_mount->mnt_writeopcount + 2) *
13178		    fs->fs_contigsumsize;
13179		if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0))
13180			needed += fragstoblks(fs,
13181			    roundup((fs->fs_dsize * fs->fs_minfree / 100) -
13182			    fs->fs_cstotal.cs_nffree, fs->fs_frag));
13183	} else {
13184		UFS_LOCK(ump);
13185		printf("softdep_request_cleanup: Unknown resource type %d\n",
13186		    resource);
13187		return (0);
13188	}
13189	starttime = time_second;
13190retry:
13191	if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
13192	    fs->fs_cstotal.cs_nbfree <= needed) ||
13193	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13194	    fs->fs_cstotal.cs_nifree <= needed)) {
13195		ACQUIRE_LOCK(ump);
13196		if (ump->softdep_on_worklist > 0 &&
13197		    process_worklist_item(UFSTOVFS(ump),
13198		    ump->softdep_on_worklist, LK_NOWAIT) != 0)
13199			stat_worklist_push += 1;
13200		FREE_LOCK(ump);
13201	}
13202	/*
13203	 * If we still need resources and there are no more worklist
13204	 * entries to process to obtain them, we have to start flushing
13205	 * the dirty vnodes to force the release of additional requests
13206	 * to the worklist that we can then process to reap addition
13207	 * resources. We walk the vnodes associated with the mount point
13208	 * until we get the needed worklist requests that we can reap.
13209	 */
13210	if ((resource == FLUSH_BLOCKS_WAIT &&
13211	     fs->fs_cstotal.cs_nbfree <= needed) ||
13212	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13213	     fs->fs_cstotal.cs_nifree <= needed)) {
13214		MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
13215			if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
13216				VI_UNLOCK(lvp);
13217				continue;
13218			}
13219			if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
13220			    curthread))
13221				continue;
13222			if (lvp->v_vflag & VV_NOSYNC) {	/* unlinked */
13223				vput(lvp);
13224				continue;
13225			}
13226			(void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
13227			vput(lvp);
13228		}
13229		lvp = ump->um_devvp;
13230		if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
13231			VOP_FSYNC(lvp, MNT_NOWAIT, curthread);
13232			VOP_UNLOCK(lvp, 0);
13233		}
13234		if (ump->softdep_on_worklist > 0) {
13235			stat_cleanup_retries += 1;
13236			goto retry;
13237		}
13238		stat_cleanup_failures += 1;
13239	}
13240	if (time_second - starttime > stat_cleanup_high_delay)
13241		stat_cleanup_high_delay = time_second - starttime;
13242	UFS_LOCK(ump);
13243	return (1);
13244}
13245
13246static bool
13247softdep_excess_inodes(struct ufsmount *ump)
13248{
13249
13250	return (dep_current[D_INODEDEP] > max_softdeps &&
13251	    ump->softdep_curdeps[D_INODEDEP] > max_softdeps /
13252	    stat_flush_threads);
13253}
13254
13255static bool
13256softdep_excess_dirrem(struct ufsmount *ump)
13257{
13258
13259	return (dep_current[D_DIRREM] > max_softdeps / 2 &&
13260	    ump->softdep_curdeps[D_DIRREM] > (max_softdeps / 2) /
13261	    stat_flush_threads);
13262}
13263
13264static void
13265schedule_cleanup(struct mount *mp)
13266{
13267	struct ufsmount *ump;
13268	struct thread *td;
13269
13270	ump = VFSTOUFS(mp);
13271	LOCK_OWNED(ump);
13272	FREE_LOCK(ump);
13273	td = curthread;
13274	if ((td->td_pflags & TDP_KTHREAD) != 0 &&
13275	    (td->td_proc->p_flag2 & P2_AST_SU) == 0) {
13276		/*
13277		 * No ast is delivered to kernel threads, so nobody
13278		 * would deref the mp.  Some kernel threads
13279		 * explicitely check for AST, e.g. NFS daemon does
13280		 * this in the serving loop.
13281		 */
13282		return;
13283	}
13284	if (td->td_su != NULL)
13285		vfs_rel(td->td_su);
13286	vfs_ref(mp);
13287	td->td_su = mp;
13288	thread_lock(td);
13289	td->td_flags |= TDF_ASTPENDING;
13290	thread_unlock(td);
13291}
13292
13293static void
13294softdep_ast_cleanup_proc(void)
13295{
13296	struct thread *td;
13297	struct mount *mp;
13298	struct ufsmount *ump;
13299	int error;
13300	bool req;
13301
13302	td = curthread;
13303	mp = td->td_su;
13304	if (mp == NULL)
13305		return;
13306	td->td_su = NULL;
13307	error = vfs_busy(mp, MBF_NOWAIT);
13308	vfs_rel(mp);
13309	if (error != 0)
13310		return;
13311	if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) {
13312		ump = VFSTOUFS(mp);
13313		for (;;) {
13314			req = false;
13315			ACQUIRE_LOCK(ump);
13316			if (softdep_excess_inodes(ump)) {
13317				req = true;
13318				request_cleanup(mp, FLUSH_INODES);
13319			}
13320			if (softdep_excess_dirrem(ump)) {
13321				req = true;
13322				request_cleanup(mp, FLUSH_BLOCKS);
13323			}
13324			FREE_LOCK(ump);
13325			if ((td->td_pflags & TDP_KTHREAD) != 0 || !req)
13326				break;
13327		}
13328	}
13329	vfs_unbusy(mp);
13330}
13331
13332/*
13333 * If memory utilization has gotten too high, deliberately slow things
13334 * down and speed up the I/O processing.
13335 */
13336static int
13337request_cleanup(mp, resource)
13338	struct mount *mp;
13339	int resource;
13340{
13341	struct thread *td = curthread;
13342	struct ufsmount *ump;
13343
13344	ump = VFSTOUFS(mp);
13345	LOCK_OWNED(ump);
13346	/*
13347	 * We never hold up the filesystem syncer or buf daemon.
13348	 */
13349	if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
13350		return (0);
13351	/*
13352	 * First check to see if the work list has gotten backlogged.
13353	 * If it has, co-opt this process to help clean up two entries.
13354	 * Because this process may hold inodes locked, we cannot
13355	 * handle any remove requests that might block on a locked
13356	 * inode as that could lead to deadlock.  We set TDP_SOFTDEP
13357	 * to avoid recursively processing the worklist.
13358	 */
13359	if (ump->softdep_on_worklist > max_softdeps / 10) {
13360		td->td_pflags |= TDP_SOFTDEP;
13361		process_worklist_item(mp, 2, LK_NOWAIT);
13362		td->td_pflags &= ~TDP_SOFTDEP;
13363		stat_worklist_push += 2;
13364		return(1);
13365	}
13366	/*
13367	 * Next, we attempt to speed up the syncer process. If that
13368	 * is successful, then we allow the process to continue.
13369	 */
13370	if (softdep_speedup(ump) &&
13371	    resource != FLUSH_BLOCKS_WAIT &&
13372	    resource != FLUSH_INODES_WAIT)
13373		return(0);
13374	/*
13375	 * If we are resource constrained on inode dependencies, try
13376	 * flushing some dirty inodes. Otherwise, we are constrained
13377	 * by file deletions, so try accelerating flushes of directories
13378	 * with removal dependencies. We would like to do the cleanup
13379	 * here, but we probably hold an inode locked at this point and
13380	 * that might deadlock against one that we try to clean. So,
13381	 * the best that we can do is request the syncer daemon to do
13382	 * the cleanup for us.
13383	 */
13384	switch (resource) {
13385
13386	case FLUSH_INODES:
13387	case FLUSH_INODES_WAIT:
13388		ACQUIRE_GBLLOCK(&lk);
13389		stat_ino_limit_push += 1;
13390		req_clear_inodedeps += 1;
13391		FREE_GBLLOCK(&lk);
13392		stat_countp = &stat_ino_limit_hit;
13393		break;
13394
13395	case FLUSH_BLOCKS:
13396	case FLUSH_BLOCKS_WAIT:
13397		ACQUIRE_GBLLOCK(&lk);
13398		stat_blk_limit_push += 1;
13399		req_clear_remove += 1;
13400		FREE_GBLLOCK(&lk);
13401		stat_countp = &stat_blk_limit_hit;
13402		break;
13403
13404	default:
13405		panic("request_cleanup: unknown type");
13406	}
13407	/*
13408	 * Hopefully the syncer daemon will catch up and awaken us.
13409	 * We wait at most tickdelay before proceeding in any case.
13410	 */
13411	ACQUIRE_GBLLOCK(&lk);
13412	FREE_LOCK(ump);
13413	proc_waiting += 1;
13414	if (callout_pending(&softdep_callout) == FALSE)
13415		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13416		    pause_timer, 0);
13417
13418	if ((td->td_pflags & TDP_KTHREAD) == 0)
13419		msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
13420	proc_waiting -= 1;
13421	FREE_GBLLOCK(&lk);
13422	ACQUIRE_LOCK(ump);
13423	return (1);
13424}
13425
13426/*
13427 * Awaken processes pausing in request_cleanup and clear proc_waiting
13428 * to indicate that there is no longer a timer running. Pause_timer
13429 * will be called with the global softdep mutex (&lk) locked.
13430 */
13431static void
13432pause_timer(arg)
13433	void *arg;
13434{
13435
13436	GBLLOCK_OWNED(&lk);
13437	/*
13438	 * The callout_ API has acquired mtx and will hold it around this
13439	 * function call.
13440	 */
13441	*stat_countp += proc_waiting;
13442	wakeup(&proc_waiting);
13443}
13444
13445/*
13446 * If requested, try removing inode or removal dependencies.
13447 */
13448static void
13449check_clear_deps(mp)
13450	struct mount *mp;
13451{
13452
13453	/*
13454	 * If we are suspended, it may be because of our using
13455	 * too many inodedeps, so help clear them out.
13456	 */
13457	if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended)
13458		clear_inodedeps(mp);
13459	/*
13460	 * General requests for cleanup of backed up dependencies
13461	 */
13462	ACQUIRE_GBLLOCK(&lk);
13463	if (req_clear_inodedeps) {
13464		req_clear_inodedeps -= 1;
13465		FREE_GBLLOCK(&lk);
13466		clear_inodedeps(mp);
13467		ACQUIRE_GBLLOCK(&lk);
13468		wakeup(&proc_waiting);
13469	}
13470	if (req_clear_remove) {
13471		req_clear_remove -= 1;
13472		FREE_GBLLOCK(&lk);
13473		clear_remove(mp);
13474		ACQUIRE_GBLLOCK(&lk);
13475		wakeup(&proc_waiting);
13476	}
13477	FREE_GBLLOCK(&lk);
13478}
13479
13480/*
13481 * Flush out a directory with at least one removal dependency in an effort to
13482 * reduce the number of dirrem, freefile, and freeblks dependency structures.
13483 */
13484static void
13485clear_remove(mp)
13486	struct mount *mp;
13487{
13488	struct pagedep_hashhead *pagedephd;
13489	struct pagedep *pagedep;
13490	struct ufsmount *ump;
13491	struct vnode *vp;
13492	struct bufobj *bo;
13493	int error, cnt;
13494	ino_t ino;
13495
13496	ump = VFSTOUFS(mp);
13497	LOCK_OWNED(ump);
13498
13499	for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) {
13500		pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++];
13501		if (ump->pagedep_nextclean > ump->pagedep_hash_size)
13502			ump->pagedep_nextclean = 0;
13503		LIST_FOREACH(pagedep, pagedephd, pd_hash) {
13504			if (LIST_EMPTY(&pagedep->pd_dirremhd))
13505				continue;
13506			ino = pagedep->pd_ino;
13507			if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13508				continue;
13509			FREE_LOCK(ump);
13510
13511			/*
13512			 * Let unmount clear deps
13513			 */
13514			error = vfs_busy(mp, MBF_NOWAIT);
13515			if (error != 0)
13516				goto finish_write;
13517			error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13518			     FFSV_FORCEINSMQ);
13519			vfs_unbusy(mp);
13520			if (error != 0) {
13521				softdep_error("clear_remove: vget", error);
13522				goto finish_write;
13523			}
13524			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13525				softdep_error("clear_remove: fsync", error);
13526			bo = &vp->v_bufobj;
13527			BO_LOCK(bo);
13528			drain_output(vp);
13529			BO_UNLOCK(bo);
13530			vput(vp);
13531		finish_write:
13532			vn_finished_write(mp);
13533			ACQUIRE_LOCK(ump);
13534			return;
13535		}
13536	}
13537}
13538
13539/*
13540 * Clear out a block of dirty inodes in an effort to reduce
13541 * the number of inodedep dependency structures.
13542 */
13543static void
13544clear_inodedeps(mp)
13545	struct mount *mp;
13546{
13547	struct inodedep_hashhead *inodedephd;
13548	struct inodedep *inodedep;
13549	struct ufsmount *ump;
13550	struct vnode *vp;
13551	struct fs *fs;
13552	int error, cnt;
13553	ino_t firstino, lastino, ino;
13554
13555	ump = VFSTOUFS(mp);
13556	fs = ump->um_fs;
13557	LOCK_OWNED(ump);
13558	/*
13559	 * Pick a random inode dependency to be cleared.
13560	 * We will then gather up all the inodes in its block
13561	 * that have dependencies and flush them out.
13562	 */
13563	for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) {
13564		inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++];
13565		if (ump->inodedep_nextclean > ump->inodedep_hash_size)
13566			ump->inodedep_nextclean = 0;
13567		if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
13568			break;
13569	}
13570	if (inodedep == NULL)
13571		return;
13572	/*
13573	 * Find the last inode in the block with dependencies.
13574	 */
13575	firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
13576	for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
13577		if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
13578			break;
13579	/*
13580	 * Asynchronously push all but the last inode with dependencies.
13581	 * Synchronously push the last inode with dependencies to ensure
13582	 * that the inode block gets written to free up the inodedeps.
13583	 */
13584	for (ino = firstino; ino <= lastino; ino++) {
13585		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13586			continue;
13587		if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13588			continue;
13589		FREE_LOCK(ump);
13590		error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
13591		if (error != 0) {
13592			vn_finished_write(mp);
13593			ACQUIRE_LOCK(ump);
13594			return;
13595		}
13596		if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13597		    FFSV_FORCEINSMQ)) != 0) {
13598			softdep_error("clear_inodedeps: vget", error);
13599			vfs_unbusy(mp);
13600			vn_finished_write(mp);
13601			ACQUIRE_LOCK(ump);
13602			return;
13603		}
13604		vfs_unbusy(mp);
13605		if (ino == lastino) {
13606			if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)))
13607				softdep_error("clear_inodedeps: fsync1", error);
13608		} else {
13609			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13610				softdep_error("clear_inodedeps: fsync2", error);
13611			BO_LOCK(&vp->v_bufobj);
13612			drain_output(vp);
13613			BO_UNLOCK(&vp->v_bufobj);
13614		}
13615		vput(vp);
13616		vn_finished_write(mp);
13617		ACQUIRE_LOCK(ump);
13618	}
13619}
13620
13621void
13622softdep_buf_append(bp, wkhd)
13623	struct buf *bp;
13624	struct workhead *wkhd;
13625{
13626	struct worklist *wk;
13627	struct ufsmount *ump;
13628
13629	if ((wk = LIST_FIRST(wkhd)) == NULL)
13630		return;
13631	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13632	    ("softdep_buf_append called on non-softdep filesystem"));
13633	ump = VFSTOUFS(wk->wk_mp);
13634	ACQUIRE_LOCK(ump);
13635	while ((wk = LIST_FIRST(wkhd)) != NULL) {
13636		WORKLIST_REMOVE(wk);
13637		WORKLIST_INSERT(&bp->b_dep, wk);
13638	}
13639	FREE_LOCK(ump);
13640
13641}
13642
13643void
13644softdep_inode_append(ip, cred, wkhd)
13645	struct inode *ip;
13646	struct ucred *cred;
13647	struct workhead *wkhd;
13648{
13649	struct buf *bp;
13650	struct fs *fs;
13651	int error;
13652
13653	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
13654	    ("softdep_inode_append called on non-softdep filesystem"));
13655	fs = ip->i_fs;
13656	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
13657	    (int)fs->fs_bsize, cred, &bp);
13658	if (error) {
13659		bqrelse(bp);
13660		softdep_freework(wkhd);
13661		return;
13662	}
13663	softdep_buf_append(bp, wkhd);
13664	bqrelse(bp);
13665}
13666
13667void
13668softdep_freework(wkhd)
13669	struct workhead *wkhd;
13670{
13671	struct worklist *wk;
13672	struct ufsmount *ump;
13673
13674	if ((wk = LIST_FIRST(wkhd)) == NULL)
13675		return;
13676	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13677	    ("softdep_freework called on non-softdep filesystem"));
13678	ump = VFSTOUFS(wk->wk_mp);
13679	ACQUIRE_LOCK(ump);
13680	handle_jwork(wkhd);
13681	FREE_LOCK(ump);
13682}
13683
13684/*
13685 * Function to determine if the buffer has outstanding dependencies
13686 * that will cause a roll-back if the buffer is written. If wantcount
13687 * is set, return number of dependencies, otherwise just yes or no.
13688 */
13689static int
13690softdep_count_dependencies(bp, wantcount)
13691	struct buf *bp;
13692	int wantcount;
13693{
13694	struct worklist *wk;
13695	struct ufsmount *ump;
13696	struct bmsafemap *bmsafemap;
13697	struct freework *freework;
13698	struct inodedep *inodedep;
13699	struct indirdep *indirdep;
13700	struct freeblks *freeblks;
13701	struct allocindir *aip;
13702	struct pagedep *pagedep;
13703	struct dirrem *dirrem;
13704	struct newblk *newblk;
13705	struct mkdir *mkdir;
13706	struct diradd *dap;
13707	int i, retval;
13708
13709	retval = 0;
13710	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
13711		return (0);
13712	ump = VFSTOUFS(wk->wk_mp);
13713	ACQUIRE_LOCK(ump);
13714	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
13715		switch (wk->wk_type) {
13716
13717		case D_INODEDEP:
13718			inodedep = WK_INODEDEP(wk);
13719			if ((inodedep->id_state & DEPCOMPLETE) == 0) {
13720				/* bitmap allocation dependency */
13721				retval += 1;
13722				if (!wantcount)
13723					goto out;
13724			}
13725			if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
13726				/* direct block pointer dependency */
13727				retval += 1;
13728				if (!wantcount)
13729					goto out;
13730			}
13731			if (TAILQ_FIRST(&inodedep->id_extupdt)) {
13732				/* direct block pointer dependency */
13733				retval += 1;
13734				if (!wantcount)
13735					goto out;
13736			}
13737			if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
13738				/* Add reference dependency. */
13739				retval += 1;
13740				if (!wantcount)
13741					goto out;
13742			}
13743			continue;
13744
13745		case D_INDIRDEP:
13746			indirdep = WK_INDIRDEP(wk);
13747
13748			TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
13749				/* indirect truncation dependency */
13750				retval += 1;
13751				if (!wantcount)
13752					goto out;
13753			}
13754
13755			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13756				/* indirect block pointer dependency */
13757				retval += 1;
13758				if (!wantcount)
13759					goto out;
13760			}
13761			continue;
13762
13763		case D_PAGEDEP:
13764			pagedep = WK_PAGEDEP(wk);
13765			LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
13766				if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
13767					/* Journal remove ref dependency. */
13768					retval += 1;
13769					if (!wantcount)
13770						goto out;
13771				}
13772			}
13773			for (i = 0; i < DAHASHSZ; i++) {
13774
13775				LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
13776					/* directory entry dependency */
13777					retval += 1;
13778					if (!wantcount)
13779						goto out;
13780				}
13781			}
13782			continue;
13783
13784		case D_BMSAFEMAP:
13785			bmsafemap = WK_BMSAFEMAP(wk);
13786			if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
13787				/* Add reference dependency. */
13788				retval += 1;
13789				if (!wantcount)
13790					goto out;
13791			}
13792			if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
13793				/* Allocate block dependency. */
13794				retval += 1;
13795				if (!wantcount)
13796					goto out;
13797			}
13798			continue;
13799
13800		case D_FREEBLKS:
13801			freeblks = WK_FREEBLKS(wk);
13802			if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
13803				/* Freeblk journal dependency. */
13804				retval += 1;
13805				if (!wantcount)
13806					goto out;
13807			}
13808			continue;
13809
13810		case D_ALLOCDIRECT:
13811		case D_ALLOCINDIR:
13812			newblk = WK_NEWBLK(wk);
13813			if (newblk->nb_jnewblk) {
13814				/* Journal allocate dependency. */
13815				retval += 1;
13816				if (!wantcount)
13817					goto out;
13818			}
13819			continue;
13820
13821		case D_MKDIR:
13822			mkdir = WK_MKDIR(wk);
13823			if (mkdir->md_jaddref) {
13824				/* Journal reference dependency. */
13825				retval += 1;
13826				if (!wantcount)
13827					goto out;
13828			}
13829			continue;
13830
13831		case D_FREEWORK:
13832		case D_FREEDEP:
13833		case D_JSEGDEP:
13834		case D_JSEG:
13835		case D_SBDEP:
13836			/* never a dependency on these blocks */
13837			continue;
13838
13839		default:
13840			panic("softdep_count_dependencies: Unexpected type %s",
13841			    TYPENAME(wk->wk_type));
13842			/* NOTREACHED */
13843		}
13844	}
13845out:
13846	FREE_LOCK(ump);
13847	return retval;
13848}
13849
13850/*
13851 * Acquire exclusive access to a buffer.
13852 * Must be called with a locked mtx parameter.
13853 * Return acquired buffer or NULL on failure.
13854 */
13855static struct buf *
13856getdirtybuf(bp, lock, waitfor)
13857	struct buf *bp;
13858	struct rwlock *lock;
13859	int waitfor;
13860{
13861	int error;
13862
13863	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
13864		if (waitfor != MNT_WAIT)
13865			return (NULL);
13866		error = BUF_LOCK(bp,
13867		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock);
13868		/*
13869		 * Even if we sucessfully acquire bp here, we have dropped
13870		 * lock, which may violates our guarantee.
13871		 */
13872		if (error == 0)
13873			BUF_UNLOCK(bp);
13874		else if (error != ENOLCK)
13875			panic("getdirtybuf: inconsistent lock: %d", error);
13876		rw_wlock(lock);
13877		return (NULL);
13878	}
13879	if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13880		if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) {
13881			rw_wunlock(lock);
13882			BO_LOCK(bp->b_bufobj);
13883			BUF_UNLOCK(bp);
13884			if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13885				bp->b_vflags |= BV_BKGRDWAIT;
13886				msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj),
13887				       PRIBIO | PDROP, "getbuf", 0);
13888			} else
13889				BO_UNLOCK(bp->b_bufobj);
13890			rw_wlock(lock);
13891			return (NULL);
13892		}
13893		BUF_UNLOCK(bp);
13894		if (waitfor != MNT_WAIT)
13895			return (NULL);
13896		/*
13897		 * The lock argument must be bp->b_vp's mutex in
13898		 * this case.
13899		 */
13900#ifdef	DEBUG_VFS_LOCKS
13901		if (bp->b_vp->v_type != VCHR)
13902			ASSERT_BO_WLOCKED(bp->b_bufobj);
13903#endif
13904		bp->b_vflags |= BV_BKGRDWAIT;
13905		rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0);
13906		return (NULL);
13907	}
13908	if ((bp->b_flags & B_DELWRI) == 0) {
13909		BUF_UNLOCK(bp);
13910		return (NULL);
13911	}
13912	bremfree(bp);
13913	return (bp);
13914}
13915
13916
13917/*
13918 * Check if it is safe to suspend the file system now.  On entry,
13919 * the vnode interlock for devvp should be held.  Return 0 with
13920 * the mount interlock held if the file system can be suspended now,
13921 * otherwise return EAGAIN with the mount interlock held.
13922 */
13923int
13924softdep_check_suspend(struct mount *mp,
13925		      struct vnode *devvp,
13926		      int softdep_depcnt,
13927		      int softdep_accdepcnt,
13928		      int secondary_writes,
13929		      int secondary_accwrites)
13930{
13931	struct bufobj *bo;
13932	struct ufsmount *ump;
13933	struct inodedep *inodedep;
13934	int error, unlinked;
13935
13936	bo = &devvp->v_bufobj;
13937	ASSERT_BO_WLOCKED(bo);
13938
13939	/*
13940	 * If we are not running with soft updates, then we need only
13941	 * deal with secondary writes as we try to suspend.
13942	 */
13943	if (MOUNTEDSOFTDEP(mp) == 0) {
13944		MNT_ILOCK(mp);
13945		while (mp->mnt_secondary_writes != 0) {
13946			BO_UNLOCK(bo);
13947			msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
13948			    (PUSER - 1) | PDROP, "secwr", 0);
13949			BO_LOCK(bo);
13950			MNT_ILOCK(mp);
13951		}
13952
13953		/*
13954		 * Reasons for needing more work before suspend:
13955		 * - Dirty buffers on devvp.
13956		 * - Secondary writes occurred after start of vnode sync loop
13957		 */
13958		error = 0;
13959		if (bo->bo_numoutput > 0 ||
13960		    bo->bo_dirty.bv_cnt > 0 ||
13961		    secondary_writes != 0 ||
13962		    mp->mnt_secondary_writes != 0 ||
13963		    secondary_accwrites != mp->mnt_secondary_accwrites)
13964			error = EAGAIN;
13965		BO_UNLOCK(bo);
13966		return (error);
13967	}
13968
13969	/*
13970	 * If we are running with soft updates, then we need to coordinate
13971	 * with them as we try to suspend.
13972	 */
13973	ump = VFSTOUFS(mp);
13974	for (;;) {
13975		if (!TRY_ACQUIRE_LOCK(ump)) {
13976			BO_UNLOCK(bo);
13977			ACQUIRE_LOCK(ump);
13978			FREE_LOCK(ump);
13979			BO_LOCK(bo);
13980			continue;
13981		}
13982		MNT_ILOCK(mp);
13983		if (mp->mnt_secondary_writes != 0) {
13984			FREE_LOCK(ump);
13985			BO_UNLOCK(bo);
13986			msleep(&mp->mnt_secondary_writes,
13987			       MNT_MTX(mp),
13988			       (PUSER - 1) | PDROP, "secwr", 0);
13989			BO_LOCK(bo);
13990			continue;
13991		}
13992		break;
13993	}
13994
13995	unlinked = 0;
13996	if (MOUNTEDSUJ(mp)) {
13997		for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked);
13998		    inodedep != NULL;
13999		    inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
14000			if ((inodedep->id_state & (UNLINKED | UNLINKLINKS |
14001			    UNLINKONLIST)) != (UNLINKED | UNLINKLINKS |
14002			    UNLINKONLIST) ||
14003			    !check_inodedep_free(inodedep))
14004				continue;
14005			unlinked++;
14006		}
14007	}
14008
14009	/*
14010	 * Reasons for needing more work before suspend:
14011	 * - Dirty buffers on devvp.
14012	 * - Softdep activity occurred after start of vnode sync loop
14013	 * - Secondary writes occurred after start of vnode sync loop
14014	 */
14015	error = 0;
14016	if (bo->bo_numoutput > 0 ||
14017	    bo->bo_dirty.bv_cnt > 0 ||
14018	    softdep_depcnt != unlinked ||
14019	    ump->softdep_deps != unlinked ||
14020	    softdep_accdepcnt != ump->softdep_accdeps ||
14021	    secondary_writes != 0 ||
14022	    mp->mnt_secondary_writes != 0 ||
14023	    secondary_accwrites != mp->mnt_secondary_accwrites)
14024		error = EAGAIN;
14025	FREE_LOCK(ump);
14026	BO_UNLOCK(bo);
14027	return (error);
14028}
14029
14030
14031/*
14032 * Get the number of dependency structures for the file system, both
14033 * the current number and the total number allocated.  These will
14034 * later be used to detect that softdep processing has occurred.
14035 */
14036void
14037softdep_get_depcounts(struct mount *mp,
14038		      int *softdep_depsp,
14039		      int *softdep_accdepsp)
14040{
14041	struct ufsmount *ump;
14042
14043	if (MOUNTEDSOFTDEP(mp) == 0) {
14044		*softdep_depsp = 0;
14045		*softdep_accdepsp = 0;
14046		return;
14047	}
14048	ump = VFSTOUFS(mp);
14049	ACQUIRE_LOCK(ump);
14050	*softdep_depsp = ump->softdep_deps;
14051	*softdep_accdepsp = ump->softdep_accdeps;
14052	FREE_LOCK(ump);
14053}
14054
14055/*
14056 * Wait for pending output on a vnode to complete.
14057 * Must be called with vnode lock and interlock locked.
14058 *
14059 * XXX: Should just be a call to bufobj_wwait().
14060 */
14061static void
14062drain_output(vp)
14063	struct vnode *vp;
14064{
14065	struct bufobj *bo;
14066
14067	bo = &vp->v_bufobj;
14068	ASSERT_VOP_LOCKED(vp, "drain_output");
14069	ASSERT_BO_WLOCKED(bo);
14070
14071	while (bo->bo_numoutput) {
14072		bo->bo_flag |= BO_WWAIT;
14073		msleep((caddr_t)&bo->bo_numoutput,
14074		    BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0);
14075	}
14076}
14077
14078/*
14079 * Called whenever a buffer that is being invalidated or reallocated
14080 * contains dependencies. This should only happen if an I/O error has
14081 * occurred. The routine is called with the buffer locked.
14082 */
14083static void
14084softdep_deallocate_dependencies(bp)
14085	struct buf *bp;
14086{
14087
14088	if ((bp->b_ioflags & BIO_ERROR) == 0)
14089		panic("softdep_deallocate_dependencies: dangling deps");
14090	if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL)
14091		softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
14092	else
14093		printf("softdep_deallocate_dependencies: "
14094		    "got error %d while accessing filesystem\n", bp->b_error);
14095	if (bp->b_error != ENXIO)
14096		panic("softdep_deallocate_dependencies: unrecovered I/O error");
14097}
14098
14099/*
14100 * Function to handle asynchronous write errors in the filesystem.
14101 */
14102static void
14103softdep_error(func, error)
14104	char *func;
14105	int error;
14106{
14107
14108	/* XXX should do something better! */
14109	printf("%s: got error %d while accessing filesystem\n", func, error);
14110}
14111
14112#ifdef DDB
14113
14114static void
14115inodedep_print(struct inodedep *inodedep, int verbose)
14116{
14117	db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d"
14118	    " saveino %p\n",
14119	    inodedep, inodedep->id_fs, inodedep->id_state,
14120	    (intmax_t)inodedep->id_ino,
14121	    (intmax_t)fsbtodb(inodedep->id_fs,
14122	    ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
14123	    inodedep->id_nlinkdelta, inodedep->id_savednlink,
14124	    inodedep->id_savedino1);
14125
14126	if (verbose == 0)
14127		return;
14128
14129	db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, "
14130	    "mkdiradd %p\n",
14131	    LIST_FIRST(&inodedep->id_pendinghd),
14132	    LIST_FIRST(&inodedep->id_bufwait),
14133	    LIST_FIRST(&inodedep->id_inowait),
14134	    TAILQ_FIRST(&inodedep->id_inoreflst),
14135	    inodedep->id_mkdiradd);
14136	db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n",
14137	    TAILQ_FIRST(&inodedep->id_inoupdt),
14138	    TAILQ_FIRST(&inodedep->id_newinoupdt),
14139	    TAILQ_FIRST(&inodedep->id_extupdt),
14140	    TAILQ_FIRST(&inodedep->id_newextupdt));
14141}
14142
14143DB_SHOW_COMMAND(inodedep, db_show_inodedep)
14144{
14145
14146	if (have_addr == 0) {
14147		db_printf("Address required\n");
14148		return;
14149	}
14150	inodedep_print((struct inodedep*)addr, 1);
14151}
14152
14153DB_SHOW_COMMAND(inodedeps, db_show_inodedeps)
14154{
14155	struct inodedep_hashhead *inodedephd;
14156	struct inodedep *inodedep;
14157	struct ufsmount *ump;
14158	int cnt;
14159
14160	if (have_addr == 0) {
14161		db_printf("Address required\n");
14162		return;
14163	}
14164	ump = (struct ufsmount *)addr;
14165	for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) {
14166		inodedephd = &ump->inodedep_hashtbl[cnt];
14167		LIST_FOREACH(inodedep, inodedephd, id_hash) {
14168			inodedep_print(inodedep, 0);
14169		}
14170	}
14171}
14172
14173DB_SHOW_COMMAND(worklist, db_show_worklist)
14174{
14175	struct worklist *wk;
14176
14177	if (have_addr == 0) {
14178		db_printf("Address required\n");
14179		return;
14180	}
14181	wk = (struct worklist *)addr;
14182	printf("worklist: %p type %s state 0x%X\n",
14183	    wk, TYPENAME(wk->wk_type), wk->wk_state);
14184}
14185
14186DB_SHOW_COMMAND(workhead, db_show_workhead)
14187{
14188	struct workhead *wkhd;
14189	struct worklist *wk;
14190	int i;
14191
14192	if (have_addr == 0) {
14193		db_printf("Address required\n");
14194		return;
14195	}
14196	wkhd = (struct workhead *)addr;
14197	wk = LIST_FIRST(wkhd);
14198	for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list))
14199		db_printf("worklist: %p type %s state 0x%X",
14200		    wk, TYPENAME(wk->wk_type), wk->wk_state);
14201	if (i == 100)
14202		db_printf("workhead overflow");
14203	printf("\n");
14204}
14205
14206
14207DB_SHOW_COMMAND(mkdirs, db_show_mkdirs)
14208{
14209	struct mkdirlist *mkdirlisthd;
14210	struct jaddref *jaddref;
14211	struct diradd *diradd;
14212	struct mkdir *mkdir;
14213
14214	if (have_addr == 0) {
14215		db_printf("Address required\n");
14216		return;
14217	}
14218	mkdirlisthd = (struct mkdirlist *)addr;
14219	LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) {
14220		diradd = mkdir->md_diradd;
14221		db_printf("mkdir: %p state 0x%X dap %p state 0x%X",
14222		    mkdir, mkdir->md_state, diradd, diradd->da_state);
14223		if ((jaddref = mkdir->md_jaddref) != NULL)
14224			db_printf(" jaddref %p jaddref state 0x%X",
14225			    jaddref, jaddref->ja_state);
14226		db_printf("\n");
14227	}
14228}
14229
14230/* exported to ffs_vfsops.c */
14231extern void db_print_ffs(struct ufsmount *ump);
14232void
14233db_print_ffs(struct ufsmount *ump)
14234{
14235	db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n",
14236	    ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname,
14237	    ump->um_devvp, ump->um_fs, ump->softdep_on_worklist,
14238	    ump->softdep_deps, ump->softdep_req);
14239}
14240
14241#endif /* DDB */
14242
14243#endif /* SOFTUPDATES */
14244