ffs_softdep.c revision 298527
1158722Sflz/*-
2158722Sflz * Copyright 1998, 2000 Marshall Kirk McKusick.
3158722Sflz * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
4158722Sflz * All rights reserved.
5158722Sflz *
6158722Sflz * The soft updates code is derived from the appendix of a University
7158722Sflz * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
8158722Sflz * "Soft Updates: A Solution to the Metadata Update Problem in File
9158722Sflz * Systems", CSE-TR-254-95, August 1995).
10158722Sflz *
11158722Sflz * Further information about soft updates can be obtained from:
12158722Sflz *
13158722Sflz *	Marshall Kirk McKusick		http://www.mckusick.com/softdep/
14158722Sflz *	1614 Oxford Street		mckusick@mckusick.com
15158722Sflz *	Berkeley, CA 94709-1608		+1-510-843-9542
16158722Sflz *	USA
17158722Sflz *
18158722Sflz * Redistribution and use in source and binary forms, with or without
19158722Sflz * modification, are permitted provided that the following conditions
20158722Sflz * are met:
21158722Sflz *
22158722Sflz * 1. Redistributions of source code must retain the above copyright
23158722Sflz *    notice, this list of conditions and the following disclaimer.
24158722Sflz * 2. Redistributions in binary form must reproduce the above copyright
25158722Sflz *    notice, this list of conditions and the following disclaimer in the
26158722Sflz *    documentation and/or other materials provided with the distribution.
27158722Sflz *
28158722Sflz * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
29158722Sflz * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30158722Sflz * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31158722Sflz * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32158722Sflz * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33158722Sflz * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34158722Sflz * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35158722Sflz * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36158722Sflz * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37158722Sflz * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38158722Sflz *
39165683Syar *	from: @(#)ffs_softdep.c	9.59 (McKusick) 6/21/00
40165683Syar */
41158722Sflz
42158722Sflz#include <sys/cdefs.h>
43158722Sflz__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_softdep.c 298527 2016-04-24 03:11:52Z pfg $");
44158722Sflz
45158722Sflz#include "opt_ffs.h"
46158722Sflz#include "opt_quota.h"
47158722Sflz#include "opt_ddb.h"
48158722Sflz
49158722Sflz/*
50158722Sflz * For now we want the safety net that the DEBUG flag provides.
51158722Sflz */
52208060Sdougb#ifndef DEBUG
53158722Sflz#define DEBUG
54158722Sflz#endif
55158722Sflz
56158722Sflz#include <sys/param.h>
57158722Sflz#include <sys/kernel.h>
58158722Sflz#include <sys/systm.h>
59158722Sflz#include <sys/bio.h>
60158722Sflz#include <sys/buf.h>
61158722Sflz#include <sys/kdb.h>
62158722Sflz#include <sys/kthread.h>
63158722Sflz#include <sys/ktr.h>
64158722Sflz#include <sys/limits.h>
65158722Sflz#include <sys/lock.h>
66158722Sflz#include <sys/malloc.h>
67158722Sflz#include <sys/mount.h>
68158722Sflz#include <sys/mutex.h>
69158722Sflz#include <sys/namei.h>
70158722Sflz#include <sys/priv.h>
71158722Sflz#include <sys/proc.h>
72158722Sflz#include <sys/rwlock.h>
73158722Sflz#include <sys/stat.h>
74158722Sflz#include <sys/sysctl.h>
75158722Sflz#include <sys/syslog.h>
76158722Sflz#include <sys/vnode.h>
77158722Sflz#include <sys/conf.h>
78158722Sflz
79158722Sflz#include <ufs/ufs/dir.h>
80158722Sflz#include <ufs/ufs/extattr.h>
81158722Sflz#include <ufs/ufs/quota.h>
82158722Sflz#include <ufs/ufs/inode.h>
83158722Sflz#include <ufs/ufs/ufsmount.h>
84158722Sflz#include <ufs/ffs/fs.h>
85158722Sflz#include <ufs/ffs/softdep.h>
86158722Sflz#include <ufs/ffs/ffs_extern.h>
87158722Sflz#include <ufs/ufs/ufs_extern.h>
88158722Sflz
89158722Sflz#include <vm/vm.h>
90158722Sflz#include <vm/vm_extern.h>
91158722Sflz#include <vm/vm_object.h>
92158722Sflz
93158722Sflz#include <geom/geom.h>
94158722Sflz
95158722Sflz#include <ddb/ddb.h>
96158722Sflz
97158722Sflz#define	KTR_SUJ	0	/* Define to KTR_SPARE. */
98158722Sflz
99158722Sflz#ifndef SOFTUPDATES
100158722Sflz
101158722Sflzint
102158722Sflzsoftdep_flushfiles(oldmnt, flags, td)
103158722Sflz	struct mount *oldmnt;
104158722Sflz	int flags;
105158722Sflz	struct thread *td;
106158722Sflz{
107158722Sflz
108158722Sflz	panic("softdep_flushfiles called");
109158722Sflz}
110158722Sflz
111158722Sflzint
112158722Sflzsoftdep_mount(devvp, mp, fs, cred)
113158722Sflz	struct vnode *devvp;
114158722Sflz	struct mount *mp;
115158722Sflz	struct fs *fs;
116158722Sflz	struct ucred *cred;
117158722Sflz{
118158722Sflz
119158722Sflz	return (0);
120158722Sflz}
121158722Sflz
122158722Sflzvoid
123158722Sflzsoftdep_initialize()
124165683Syar{
125165683Syar
126165683Syar	return;
127158722Sflz}
128158722Sflz
129158722Sflzvoid
130158722Sflzsoftdep_uninitialize()
131158722Sflz{
132158722Sflz
133158722Sflz	return;
134158722Sflz}
135158722Sflz
136158722Sflzvoid
137158722Sflzsoftdep_unmount(mp)
138158722Sflz	struct mount *mp;
139158722Sflz{
140158722Sflz
141158722Sflz	panic("softdep_unmount called");
142158722Sflz}
143158722Sflz
144158722Sflzvoid
145158722Sflzsoftdep_setup_sbupdate(ump, fs, bp)
146158722Sflz	struct ufsmount *ump;
147158722Sflz	struct fs *fs;
148158722Sflz	struct buf *bp;
149158722Sflz{
150158722Sflz
151158722Sflz	panic("softdep_setup_sbupdate called");
152158722Sflz}
153158722Sflz
154158722Sflzvoid
155158722Sflzsoftdep_setup_inomapdep(bp, ip, newinum, mode)
156158722Sflz	struct buf *bp;
157158722Sflz	struct inode *ip;
158158722Sflz	ino_t newinum;
159158722Sflz	int mode;
160158722Sflz{
161158722Sflz
162158722Sflz	panic("softdep_setup_inomapdep called");
163158722Sflz}
164158722Sflz
165158722Sflzvoid
166158722Sflzsoftdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
167158722Sflz	struct buf *bp;
168158722Sflz	struct mount *mp;
169158722Sflz	ufs2_daddr_t newblkno;
170158722Sflz	int frags;
171158722Sflz	int oldfrags;
172158722Sflz{
173158722Sflz
174158722Sflz	panic("softdep_setup_blkmapdep called");
175158722Sflz}
176158722Sflz
177158722Sflzvoid
178158722Sflzsoftdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
179158722Sflz	struct inode *ip;
180158722Sflz	ufs_lbn_t lbn;
181158722Sflz	ufs2_daddr_t newblkno;
182158722Sflz	ufs2_daddr_t oldblkno;
183158722Sflz	long newsize;
184158722Sflz	long oldsize;
185158722Sflz	struct buf *bp;
186158722Sflz{
187158722Sflz
188158722Sflz	panic("softdep_setup_allocdirect called");
189158722Sflz}
190158722Sflz
191158722Sflzvoid
192158722Sflzsoftdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
193158722Sflz	struct inode *ip;
194158722Sflz	ufs_lbn_t lbn;
195158722Sflz	ufs2_daddr_t newblkno;
196158722Sflz	ufs2_daddr_t oldblkno;
197158722Sflz	long newsize;
198158722Sflz	long oldsize;
199158722Sflz	struct buf *bp;
200158722Sflz{
201158722Sflz
202158722Sflz	panic("softdep_setup_allocext called");
203158722Sflz}
204158722Sflz
205158722Sflzvoid
206158722Sflzsoftdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
207158722Sflz	struct inode *ip;
208158722Sflz	ufs_lbn_t lbn;
209158722Sflz	struct buf *bp;
210158722Sflz	int ptrno;
211158722Sflz	ufs2_daddr_t newblkno;
212158722Sflz	ufs2_daddr_t oldblkno;
213158722Sflz	struct buf *nbp;
214158722Sflz{
215264438Sdteske
216264438Sdteske	panic("softdep_setup_allocindir_page called");
217264438Sdteske}
218264438Sdteske
219290277Sdteskevoid
220290277Sdteskesoftdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
221264438Sdteske	struct buf *nbp;
222158722Sflz	struct inode *ip;
223264438Sdteske	struct buf *bp;
224158722Sflz	int ptrno;
225208060Sdougb	ufs2_daddr_t newblkno;
226158722Sflz{
227
228	panic("softdep_setup_allocindir_meta called");
229}
230
231void
232softdep_journal_freeblocks(ip, cred, length, flags)
233	struct inode *ip;
234	struct ucred *cred;
235	off_t length;
236	int flags;
237{
238
239	panic("softdep_journal_freeblocks called");
240}
241
242void
243softdep_journal_fsync(ip)
244	struct inode *ip;
245{
246
247	panic("softdep_journal_fsync called");
248}
249
250void
251softdep_setup_freeblocks(ip, length, flags)
252	struct inode *ip;
253	off_t length;
254	int flags;
255{
256
257	panic("softdep_setup_freeblocks called");
258}
259
260void
261softdep_freefile(pvp, ino, mode)
262		struct vnode *pvp;
263		ino_t ino;
264		int mode;
265{
266
267	panic("softdep_freefile called");
268}
269
270int
271softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
272	struct buf *bp;
273	struct inode *dp;
274	off_t diroffset;
275	ino_t newinum;
276	struct buf *newdirbp;
277	int isnewblk;
278{
279
280	panic("softdep_setup_directory_add called");
281}
282
283void
284softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
285	struct buf *bp;
286	struct inode *dp;
287	caddr_t base;
288	caddr_t oldloc;
289	caddr_t newloc;
290	int entrysize;
291{
292
293	panic("softdep_change_directoryentry_offset called");
294}
295
296void
297softdep_setup_remove(bp, dp, ip, isrmdir)
298	struct buf *bp;
299	struct inode *dp;
300	struct inode *ip;
301	int isrmdir;
302{
303
304	panic("softdep_setup_remove called");
305}
306
307void
308softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
309	struct buf *bp;
310	struct inode *dp;
311	struct inode *ip;
312	ino_t newinum;
313	int isrmdir;
314{
315
316	panic("softdep_setup_directory_change called");
317}
318
319void
320softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
321	struct mount *mp;
322	struct buf *bp;
323	ufs2_daddr_t blkno;
324	int frags;
325	struct workhead *wkhd;
326{
327
328	panic("%s called", __FUNCTION__);
329}
330
331void
332softdep_setup_inofree(mp, bp, ino, wkhd)
333	struct mount *mp;
334	struct buf *bp;
335	ino_t ino;
336	struct workhead *wkhd;
337{
338
339	panic("%s called", __FUNCTION__);
340}
341
342void
343softdep_setup_unlink(dp, ip)
344	struct inode *dp;
345	struct inode *ip;
346{
347
348	panic("%s called", __FUNCTION__);
349}
350
351void
352softdep_setup_link(dp, ip)
353	struct inode *dp;
354	struct inode *ip;
355{
356
357	panic("%s called", __FUNCTION__);
358}
359
360void
361softdep_revert_link(dp, ip)
362	struct inode *dp;
363	struct inode *ip;
364{
365
366	panic("%s called", __FUNCTION__);
367}
368
369void
370softdep_setup_rmdir(dp, ip)
371	struct inode *dp;
372	struct inode *ip;
373{
374
375	panic("%s called", __FUNCTION__);
376}
377
378void
379softdep_revert_rmdir(dp, ip)
380	struct inode *dp;
381	struct inode *ip;
382{
383
384	panic("%s called", __FUNCTION__);
385}
386
387void
388softdep_setup_create(dp, ip)
389	struct inode *dp;
390	struct inode *ip;
391{
392
393	panic("%s called", __FUNCTION__);
394}
395
396void
397softdep_revert_create(dp, ip)
398	struct inode *dp;
399	struct inode *ip;
400{
401
402	panic("%s called", __FUNCTION__);
403}
404
405void
406softdep_setup_mkdir(dp, ip)
407	struct inode *dp;
408	struct inode *ip;
409{
410
411	panic("%s called", __FUNCTION__);
412}
413
414void
415softdep_revert_mkdir(dp, ip)
416	struct inode *dp;
417	struct inode *ip;
418{
419
420	panic("%s called", __FUNCTION__);
421}
422
423void
424softdep_setup_dotdot_link(dp, ip)
425	struct inode *dp;
426	struct inode *ip;
427{
428
429	panic("%s called", __FUNCTION__);
430}
431
432int
433softdep_prealloc(vp, waitok)
434	struct vnode *vp;
435	int waitok;
436{
437
438	panic("%s called", __FUNCTION__);
439}
440
441int
442softdep_journal_lookup(mp, vpp)
443	struct mount *mp;
444	struct vnode **vpp;
445{
446
447	return (ENOENT);
448}
449
450void
451softdep_change_linkcnt(ip)
452	struct inode *ip;
453{
454
455	panic("softdep_change_linkcnt called");
456}
457
458void
459softdep_load_inodeblock(ip)
460	struct inode *ip;
461{
462
463	panic("softdep_load_inodeblock called");
464}
465
466void
467softdep_update_inodeblock(ip, bp, waitfor)
468	struct inode *ip;
469	struct buf *bp;
470	int waitfor;
471{
472
473	panic("softdep_update_inodeblock called");
474}
475
476int
477softdep_fsync(vp)
478	struct vnode *vp;	/* the "in_core" copy of the inode */
479{
480
481	return (0);
482}
483
484void
485softdep_fsync_mountdev(vp)
486	struct vnode *vp;
487{
488
489	return;
490}
491
492int
493softdep_flushworklist(oldmnt, countp, td)
494	struct mount *oldmnt;
495	int *countp;
496	struct thread *td;
497{
498
499	*countp = 0;
500	return (0);
501}
502
503int
504softdep_sync_metadata(struct vnode *vp)
505{
506
507	panic("softdep_sync_metadata called");
508}
509
510int
511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
512{
513
514	panic("softdep_sync_buf called");
515}
516
517int
518softdep_slowdown(vp)
519	struct vnode *vp;
520{
521
522	panic("softdep_slowdown called");
523}
524
525int
526softdep_request_cleanup(fs, vp, cred, resource)
527	struct fs *fs;
528	struct vnode *vp;
529	struct ucred *cred;
530	int resource;
531{
532
533	return (0);
534}
535
536int
537softdep_check_suspend(struct mount *mp,
538		      struct vnode *devvp,
539		      int softdep_depcnt,
540		      int softdep_accdepcnt,
541		      int secondary_writes,
542		      int secondary_accwrites)
543{
544	struct bufobj *bo;
545	int error;
546
547	(void) softdep_depcnt,
548	(void) softdep_accdepcnt;
549
550	bo = &devvp->v_bufobj;
551	ASSERT_BO_WLOCKED(bo);
552
553	MNT_ILOCK(mp);
554	while (mp->mnt_secondary_writes != 0) {
555		BO_UNLOCK(bo);
556		msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
557		    (PUSER - 1) | PDROP, "secwr", 0);
558		BO_LOCK(bo);
559		MNT_ILOCK(mp);
560	}
561
562	/*
563	 * Reasons for needing more work before suspend:
564	 * - Dirty buffers on devvp.
565	 * - Secondary writes occurred after start of vnode sync loop
566	 */
567	error = 0;
568	if (bo->bo_numoutput > 0 ||
569	    bo->bo_dirty.bv_cnt > 0 ||
570	    secondary_writes != 0 ||
571	    mp->mnt_secondary_writes != 0 ||
572	    secondary_accwrites != mp->mnt_secondary_accwrites)
573		error = EAGAIN;
574	BO_UNLOCK(bo);
575	return (error);
576}
577
578void
579softdep_get_depcounts(struct mount *mp,
580		      int *softdepactivep,
581		      int *softdepactiveaccp)
582{
583	(void) mp;
584	*softdepactivep = 0;
585	*softdepactiveaccp = 0;
586}
587
588void
589softdep_buf_append(bp, wkhd)
590	struct buf *bp;
591	struct workhead *wkhd;
592{
593
594	panic("softdep_buf_appendwork called");
595}
596
597void
598softdep_inode_append(ip, cred, wkhd)
599	struct inode *ip;
600	struct ucred *cred;
601	struct workhead *wkhd;
602{
603
604	panic("softdep_inode_appendwork called");
605}
606
607void
608softdep_freework(wkhd)
609	struct workhead *wkhd;
610{
611
612	panic("softdep_freework called");
613}
614
615#else
616
617FEATURE(softupdates, "FFS soft-updates support");
618
619static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0,
620    "soft updates stats");
621static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0,
622    "total dependencies allocated");
623static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0,
624    "high use dependencies allocated");
625static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0,
626    "current dependencies allocated");
627static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0,
628    "current dependencies written");
629
630unsigned long dep_current[D_LAST + 1];
631unsigned long dep_highuse[D_LAST + 1];
632unsigned long dep_total[D_LAST + 1];
633unsigned long dep_write[D_LAST + 1];
634
635#define	SOFTDEP_TYPE(type, str, long)					\
636    static MALLOC_DEFINE(M_ ## type, #str, long);			\
637    SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD,	\
638	&dep_total[D_ ## type], 0, "");					\
639    SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, 	\
640	&dep_current[D_ ## type], 0, "");				\
641    SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, 	\
642	&dep_highuse[D_ ## type], 0, "");				\
643    SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, 	\
644	&dep_write[D_ ## type], 0, "");
645
646SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
647SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
648SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
649    "Block or frag allocated from cyl group map");
650SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
651SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
652SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
653SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
654SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
655SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
656SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
657SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
658SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
659SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
660SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
661SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
662SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
663SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
664SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
665SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
666SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
667SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
668SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
669SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
670SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
671SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
672SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
673SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
674
675static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
676
677static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
678static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
679static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data");
680
681#define M_SOFTDEP_FLAGS	(M_WAITOK)
682
683/*
684 * translate from workitem type to memory type
685 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
686 */
687static struct malloc_type *memtype[] = {
688	M_PAGEDEP,
689	M_INODEDEP,
690	M_BMSAFEMAP,
691	M_NEWBLK,
692	M_ALLOCDIRECT,
693	M_INDIRDEP,
694	M_ALLOCINDIR,
695	M_FREEFRAG,
696	M_FREEBLKS,
697	M_FREEFILE,
698	M_DIRADD,
699	M_MKDIR,
700	M_DIRREM,
701	M_NEWDIRBLK,
702	M_FREEWORK,
703	M_FREEDEP,
704	M_JADDREF,
705	M_JREMREF,
706	M_JMVREF,
707	M_JNEWBLK,
708	M_JFREEBLK,
709	M_JFREEFRAG,
710	M_JSEG,
711	M_JSEGDEP,
712	M_SBDEP,
713	M_JTRUNC,
714	M_JFSYNC,
715	M_SENTINEL
716};
717
718#define DtoM(type) (memtype[type])
719
720/*
721 * Names of malloc types.
722 */
723#define TYPENAME(type)  \
724	((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???")
725/*
726 * End system adaptation definitions.
727 */
728
729#define	DOTDOT_OFFSET	offsetof(struct dirtemplate, dotdot_ino)
730#define	DOT_OFFSET	offsetof(struct dirtemplate, dot_ino)
731
732/*
733 * Internal function prototypes.
734 */
735static	void check_clear_deps(struct mount *);
736static	void softdep_error(char *, int);
737static	int softdep_process_worklist(struct mount *, int);
738static	int softdep_waitidle(struct mount *, int);
739static	void drain_output(struct vnode *);
740static	struct buf *getdirtybuf(struct buf *, struct rwlock *, int);
741static	int check_inodedep_free(struct inodedep *);
742static	void clear_remove(struct mount *);
743static	void clear_inodedeps(struct mount *);
744static	void unlinked_inodedep(struct mount *, struct inodedep *);
745static	void clear_unlinked_inodedep(struct inodedep *);
746static	struct inodedep *first_unlinked_inodedep(struct ufsmount *);
747static	int flush_pagedep_deps(struct vnode *, struct mount *,
748	    struct diraddhd *);
749static	int free_pagedep(struct pagedep *);
750static	int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
751static	int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
752static	int flush_deplist(struct allocdirectlst *, int, int *);
753static	int sync_cgs(struct mount *, int);
754static	int handle_written_filepage(struct pagedep *, struct buf *);
755static	int handle_written_sbdep(struct sbdep *, struct buf *);
756static	void initiate_write_sbdep(struct sbdep *);
757static	void diradd_inode_written(struct diradd *, struct inodedep *);
758static	int handle_written_indirdep(struct indirdep *, struct buf *,
759	    struct buf**);
760static	int handle_written_inodeblock(struct inodedep *, struct buf *);
761static	int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
762	    uint8_t *);
763static	int handle_written_bmsafemap(struct bmsafemap *, struct buf *);
764static	void handle_written_jaddref(struct jaddref *);
765static	void handle_written_jremref(struct jremref *);
766static	void handle_written_jseg(struct jseg *, struct buf *);
767static	void handle_written_jnewblk(struct jnewblk *);
768static	void handle_written_jblkdep(struct jblkdep *);
769static	void handle_written_jfreefrag(struct jfreefrag *);
770static	void complete_jseg(struct jseg *);
771static	void complete_jsegs(struct jseg *);
772static	void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
773static	void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
774static	void jremref_write(struct jremref *, struct jseg *, uint8_t *);
775static	void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
776static	void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
777static	void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
778static	void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
779static	void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
780static	void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
781static	inline void inoref_write(struct inoref *, struct jseg *,
782	    struct jrefrec *);
783static	void handle_allocdirect_partdone(struct allocdirect *,
784	    struct workhead *);
785static	struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
786	    struct workhead *);
787static	void indirdep_complete(struct indirdep *);
788static	int indirblk_lookup(struct mount *, ufs2_daddr_t);
789static	void indirblk_insert(struct freework *);
790static	void indirblk_remove(struct freework *);
791static	void handle_allocindir_partdone(struct allocindir *);
792static	void initiate_write_filepage(struct pagedep *, struct buf *);
793static	void initiate_write_indirdep(struct indirdep*, struct buf *);
794static	void handle_written_mkdir(struct mkdir *, int);
795static	int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
796	    uint8_t *);
797static	void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
798static	void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
799static	void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
800static	void handle_workitem_freefile(struct freefile *);
801static	int handle_workitem_remove(struct dirrem *, int);
802static	struct dirrem *newdirrem(struct buf *, struct inode *,
803	    struct inode *, int, struct dirrem **);
804static	struct indirdep *indirdep_lookup(struct mount *, struct inode *,
805	    struct buf *);
806static	void cancel_indirdep(struct indirdep *, struct buf *,
807	    struct freeblks *);
808static	void free_indirdep(struct indirdep *);
809static	void free_diradd(struct diradd *, struct workhead *);
810static	void merge_diradd(struct inodedep *, struct diradd *);
811static	void complete_diradd(struct diradd *);
812static	struct diradd *diradd_lookup(struct pagedep *, int);
813static	struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
814	    struct jremref *);
815static	struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
816	    struct jremref *);
817static	void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
818	    struct jremref *, struct jremref *);
819static	void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
820	    struct jremref *);
821static	void cancel_allocindir(struct allocindir *, struct buf *bp,
822	    struct freeblks *, int);
823static	int setup_trunc_indir(struct freeblks *, struct inode *,
824	    ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
825static	void complete_trunc_indir(struct freework *);
826static	void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
827	    int);
828static	void complete_mkdir(struct mkdir *);
829static	void free_newdirblk(struct newdirblk *);
830static	void free_jremref(struct jremref *);
831static	void free_jaddref(struct jaddref *);
832static	void free_jsegdep(struct jsegdep *);
833static	void free_jsegs(struct jblocks *);
834static	void rele_jseg(struct jseg *);
835static	void free_jseg(struct jseg *, struct jblocks *);
836static	void free_jnewblk(struct jnewblk *);
837static	void free_jblkdep(struct jblkdep *);
838static	void free_jfreefrag(struct jfreefrag *);
839static	void free_freedep(struct freedep *);
840static	void journal_jremref(struct dirrem *, struct jremref *,
841	    struct inodedep *);
842static	void cancel_jnewblk(struct jnewblk *, struct workhead *);
843static	int cancel_jaddref(struct jaddref *, struct inodedep *,
844	    struct workhead *);
845static	void cancel_jfreefrag(struct jfreefrag *);
846static	inline void setup_freedirect(struct freeblks *, struct inode *,
847	    int, int);
848static	inline void setup_freeext(struct freeblks *, struct inode *, int, int);
849static	inline void setup_freeindir(struct freeblks *, struct inode *, int,
850	    ufs_lbn_t, int);
851static	inline struct freeblks *newfreeblks(struct mount *, struct inode *);
852static	void freeblks_free(struct ufsmount *, struct freeblks *, int);
853static	void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
854static	ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
855static	int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
856static	void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
857	    int, int);
858static	void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
859static 	int cancel_pagedep(struct pagedep *, struct freeblks *, int);
860static	int deallocate_dependencies(struct buf *, struct freeblks *, int);
861static	void newblk_freefrag(struct newblk*);
862static	void free_newblk(struct newblk *);
863static	void cancel_allocdirect(struct allocdirectlst *,
864	    struct allocdirect *, struct freeblks *);
865static	int check_inode_unwritten(struct inodedep *);
866static	int free_inodedep(struct inodedep *);
867static	void freework_freeblock(struct freework *);
868static	void freework_enqueue(struct freework *);
869static	int handle_workitem_freeblocks(struct freeblks *, int);
870static	int handle_complete_freeblocks(struct freeblks *, int);
871static	void handle_workitem_indirblk(struct freework *);
872static	void handle_written_freework(struct freework *);
873static	void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
874static	struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
875	    struct workhead *);
876static	struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
877	    struct inodedep *, struct allocindir *, ufs_lbn_t);
878static	struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
879	    ufs2_daddr_t, ufs_lbn_t);
880static	void handle_workitem_freefrag(struct freefrag *);
881static	struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
882	    ufs_lbn_t);
883static	void allocdirect_merge(struct allocdirectlst *,
884	    struct allocdirect *, struct allocdirect *);
885static	struct freefrag *allocindir_merge(struct allocindir *,
886	    struct allocindir *);
887static	int bmsafemap_find(struct bmsafemap_hashhead *, int,
888	    struct bmsafemap **);
889static	struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
890	    int cg, struct bmsafemap *);
891static	int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int,
892	    struct newblk **);
893static	int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
894static	int inodedep_find(struct inodedep_hashhead *, ino_t,
895	    struct inodedep **);
896static	int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
897static	int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
898	    int, struct pagedep **);
899static	int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
900	    struct pagedep **);
901static	void pause_timer(void *);
902static	int request_cleanup(struct mount *, int);
903static	void schedule_cleanup(struct mount *);
904static void softdep_ast_cleanup_proc(void);
905static	int process_worklist_item(struct mount *, int, int);
906static	void process_removes(struct vnode *);
907static	void process_truncates(struct vnode *);
908static	void jwork_move(struct workhead *, struct workhead *);
909static	void jwork_insert(struct workhead *, struct jsegdep *);
910static	void add_to_worklist(struct worklist *, int);
911static	void wake_worklist(struct worklist *);
912static	void wait_worklist(struct worklist *, char *);
913static	void remove_from_worklist(struct worklist *);
914static	void softdep_flush(void *);
915static	void softdep_flushjournal(struct mount *);
916static	int softdep_speedup(struct ufsmount *);
917static	void worklist_speedup(struct mount *);
918static	int journal_mount(struct mount *, struct fs *, struct ucred *);
919static	void journal_unmount(struct ufsmount *);
920static	int journal_space(struct ufsmount *, int);
921static	void journal_suspend(struct ufsmount *);
922static	int journal_unsuspend(struct ufsmount *ump);
923static	void softdep_prelink(struct vnode *, struct vnode *);
924static	void add_to_journal(struct worklist *);
925static	void remove_from_journal(struct worklist *);
926static	bool softdep_excess_items(struct ufsmount *, int);
927static	void softdep_process_journal(struct mount *, struct worklist *, int);
928static	struct jremref *newjremref(struct dirrem *, struct inode *,
929	    struct inode *ip, off_t, nlink_t);
930static	struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
931	    uint16_t);
932static	inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
933	    uint16_t);
934static	inline struct jsegdep *inoref_jseg(struct inoref *);
935static	struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
936static	struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
937	    ufs2_daddr_t, int);
938static	void adjust_newfreework(struct freeblks *, int);
939static	struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
940static	void move_newblock_dep(struct jaddref *, struct inodedep *);
941static	void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
942static	struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
943	    ufs2_daddr_t, long, ufs_lbn_t);
944static	struct freework *newfreework(struct ufsmount *, struct freeblks *,
945	    struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
946static	int jwait(struct worklist *, int);
947static	struct inodedep *inodedep_lookup_ip(struct inode *);
948static	int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
949static	struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
950static	void handle_jwork(struct workhead *);
951static	struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
952	    struct mkdir **);
953static	struct jblocks *jblocks_create(void);
954static	ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
955static	void jblocks_free(struct jblocks *, struct mount *, int);
956static	void jblocks_destroy(struct jblocks *);
957static	void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
958
959/*
960 * Exported softdep operations.
961 */
962static	void softdep_disk_io_initiation(struct buf *);
963static	void softdep_disk_write_complete(struct buf *);
964static	void softdep_deallocate_dependencies(struct buf *);
965static	int softdep_count_dependencies(struct buf *bp, int);
966
967/*
968 * Global lock over all of soft updates.
969 */
970static struct mtx lk;
971MTX_SYSINIT(softdep_lock, &lk, "Global Softdep Lock", MTX_DEF);
972
973#define ACQUIRE_GBLLOCK(lk)	mtx_lock(lk)
974#define FREE_GBLLOCK(lk)	mtx_unlock(lk)
975#define GBLLOCK_OWNED(lk)	mtx_assert((lk), MA_OWNED)
976
977/*
978 * Per-filesystem soft-updates locking.
979 */
980#define LOCK_PTR(ump)		(&(ump)->um_softdep->sd_fslock)
981#define TRY_ACQUIRE_LOCK(ump)	rw_try_wlock(&(ump)->um_softdep->sd_fslock)
982#define ACQUIRE_LOCK(ump)	rw_wlock(&(ump)->um_softdep->sd_fslock)
983#define FREE_LOCK(ump)		rw_wunlock(&(ump)->um_softdep->sd_fslock)
984#define LOCK_OWNED(ump)		rw_assert(&(ump)->um_softdep->sd_fslock, \
985				    RA_WLOCKED)
986
987#define	BUF_AREC(bp)		lockallowrecurse(&(bp)->b_lock)
988#define	BUF_NOREC(bp)		lockdisablerecurse(&(bp)->b_lock)
989
990/*
991 * Worklist queue management.
992 * These routines require that the lock be held.
993 */
994#ifndef /* NOT */ DEBUG
995#define WORKLIST_INSERT(head, item) do {	\
996	(item)->wk_state |= ONWORKLIST;		\
997	LIST_INSERT_HEAD(head, item, wk_list);	\
998} while (0)
999#define WORKLIST_REMOVE(item) do {		\
1000	(item)->wk_state &= ~ONWORKLIST;	\
1001	LIST_REMOVE(item, wk_list);		\
1002} while (0)
1003#define WORKLIST_INSERT_UNLOCKED	WORKLIST_INSERT
1004#define WORKLIST_REMOVE_UNLOCKED	WORKLIST_REMOVE
1005
1006#else /* DEBUG */
1007static	void worklist_insert(struct workhead *, struct worklist *, int);
1008static	void worklist_remove(struct worklist *, int);
1009
1010#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1)
1011#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0)
1012#define WORKLIST_REMOVE(item) worklist_remove(item, 1)
1013#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0)
1014
1015static void
1016worklist_insert(head, item, locked)
1017	struct workhead *head;
1018	struct worklist *item;
1019	int locked;
1020{
1021
1022	if (locked)
1023		LOCK_OWNED(VFSTOUFS(item->wk_mp));
1024	if (item->wk_state & ONWORKLIST)
1025		panic("worklist_insert: %p %s(0x%X) already on list",
1026		    item, TYPENAME(item->wk_type), item->wk_state);
1027	item->wk_state |= ONWORKLIST;
1028	LIST_INSERT_HEAD(head, item, wk_list);
1029}
1030
1031static void
1032worklist_remove(item, locked)
1033	struct worklist *item;
1034	int locked;
1035{
1036
1037	if (locked)
1038		LOCK_OWNED(VFSTOUFS(item->wk_mp));
1039	if ((item->wk_state & ONWORKLIST) == 0)
1040		panic("worklist_remove: %p %s(0x%X) not on list",
1041		    item, TYPENAME(item->wk_type), item->wk_state);
1042	item->wk_state &= ~ONWORKLIST;
1043	LIST_REMOVE(item, wk_list);
1044}
1045#endif /* DEBUG */
1046
1047/*
1048 * Merge two jsegdeps keeping only the oldest one as newer references
1049 * can't be discarded until after older references.
1050 */
1051static inline struct jsegdep *
1052jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1053{
1054	struct jsegdep *swp;
1055
1056	if (two == NULL)
1057		return (one);
1058
1059	if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1060		swp = one;
1061		one = two;
1062		two = swp;
1063	}
1064	WORKLIST_REMOVE(&two->jd_list);
1065	free_jsegdep(two);
1066
1067	return (one);
1068}
1069
1070/*
1071 * If two freedeps are compatible free one to reduce list size.
1072 */
1073static inline struct freedep *
1074freedep_merge(struct freedep *one, struct freedep *two)
1075{
1076	if (two == NULL)
1077		return (one);
1078
1079	if (one->fd_freework == two->fd_freework) {
1080		WORKLIST_REMOVE(&two->fd_list);
1081		free_freedep(two);
1082	}
1083	return (one);
1084}
1085
1086/*
1087 * Move journal work from one list to another.  Duplicate freedeps and
1088 * jsegdeps are coalesced to keep the lists as small as possible.
1089 */
1090static void
1091jwork_move(dst, src)
1092	struct workhead *dst;
1093	struct workhead *src;
1094{
1095	struct freedep *freedep;
1096	struct jsegdep *jsegdep;
1097	struct worklist *wkn;
1098	struct worklist *wk;
1099
1100	KASSERT(dst != src,
1101	    ("jwork_move: dst == src"));
1102	freedep = NULL;
1103	jsegdep = NULL;
1104	LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1105		if (wk->wk_type == D_JSEGDEP)
1106			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1107		if (wk->wk_type == D_FREEDEP)
1108			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1109	}
1110
1111	while ((wk = LIST_FIRST(src)) != NULL) {
1112		WORKLIST_REMOVE(wk);
1113		WORKLIST_INSERT(dst, wk);
1114		if (wk->wk_type == D_JSEGDEP) {
1115			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1116			continue;
1117		}
1118		if (wk->wk_type == D_FREEDEP)
1119			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1120	}
1121}
1122
1123static void
1124jwork_insert(dst, jsegdep)
1125	struct workhead *dst;
1126	struct jsegdep *jsegdep;
1127{
1128	struct jsegdep *jsegdepn;
1129	struct worklist *wk;
1130
1131	LIST_FOREACH(wk, dst, wk_list)
1132		if (wk->wk_type == D_JSEGDEP)
1133			break;
1134	if (wk == NULL) {
1135		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1136		return;
1137	}
1138	jsegdepn = WK_JSEGDEP(wk);
1139	if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1140		WORKLIST_REMOVE(wk);
1141		free_jsegdep(jsegdepn);
1142		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1143	} else
1144		free_jsegdep(jsegdep);
1145}
1146
1147/*
1148 * Routines for tracking and managing workitems.
1149 */
1150static	void workitem_free(struct worklist *, int);
1151static	void workitem_alloc(struct worklist *, int, struct mount *);
1152static	void workitem_reassign(struct worklist *, int);
1153
1154#define	WORKITEM_FREE(item, type) \
1155	workitem_free((struct worklist *)(item), (type))
1156#define	WORKITEM_REASSIGN(item, type) \
1157	workitem_reassign((struct worklist *)(item), (type))
1158
1159static void
1160workitem_free(item, type)
1161	struct worklist *item;
1162	int type;
1163{
1164	struct ufsmount *ump;
1165
1166#ifdef DEBUG
1167	if (item->wk_state & ONWORKLIST)
1168		panic("workitem_free: %s(0x%X) still on list",
1169		    TYPENAME(item->wk_type), item->wk_state);
1170	if (item->wk_type != type && type != D_NEWBLK)
1171		panic("workitem_free: type mismatch %s != %s",
1172		    TYPENAME(item->wk_type), TYPENAME(type));
1173#endif
1174	if (item->wk_state & IOWAITING)
1175		wakeup(item);
1176	ump = VFSTOUFS(item->wk_mp);
1177	LOCK_OWNED(ump);
1178	KASSERT(ump->softdep_deps > 0,
1179	    ("workitem_free: %s: softdep_deps going negative",
1180	    ump->um_fs->fs_fsmnt));
1181	if (--ump->softdep_deps == 0 && ump->softdep_req)
1182		wakeup(&ump->softdep_deps);
1183	KASSERT(dep_current[item->wk_type] > 0,
1184	    ("workitem_free: %s: dep_current[%s] going negative",
1185	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1186	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1187	    ("workitem_free: %s: softdep_curdeps[%s] going negative",
1188	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1189	atomic_subtract_long(&dep_current[item->wk_type], 1);
1190	ump->softdep_curdeps[item->wk_type] -= 1;
1191	free(item, DtoM(type));
1192}
1193
1194static void
1195workitem_alloc(item, type, mp)
1196	struct worklist *item;
1197	int type;
1198	struct mount *mp;
1199{
1200	struct ufsmount *ump;
1201
1202	item->wk_type = type;
1203	item->wk_mp = mp;
1204	item->wk_state = 0;
1205
1206	ump = VFSTOUFS(mp);
1207	ACQUIRE_GBLLOCK(&lk);
1208	dep_current[type]++;
1209	if (dep_current[type] > dep_highuse[type])
1210		dep_highuse[type] = dep_current[type];
1211	dep_total[type]++;
1212	FREE_GBLLOCK(&lk);
1213	ACQUIRE_LOCK(ump);
1214	ump->softdep_curdeps[type] += 1;
1215	ump->softdep_deps++;
1216	ump->softdep_accdeps++;
1217	FREE_LOCK(ump);
1218}
1219
1220static void
1221workitem_reassign(item, newtype)
1222	struct worklist *item;
1223	int newtype;
1224{
1225	struct ufsmount *ump;
1226
1227	ump = VFSTOUFS(item->wk_mp);
1228	LOCK_OWNED(ump);
1229	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1230	    ("workitem_reassign: %s: softdep_curdeps[%s] going negative",
1231	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1232	ump->softdep_curdeps[item->wk_type] -= 1;
1233	ump->softdep_curdeps[newtype] += 1;
1234	KASSERT(dep_current[item->wk_type] > 0,
1235	    ("workitem_reassign: %s: dep_current[%s] going negative",
1236	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1237	ACQUIRE_GBLLOCK(&lk);
1238	dep_current[newtype]++;
1239	dep_current[item->wk_type]--;
1240	if (dep_current[newtype] > dep_highuse[newtype])
1241		dep_highuse[newtype] = dep_current[newtype];
1242	dep_total[newtype]++;
1243	FREE_GBLLOCK(&lk);
1244	item->wk_type = newtype;
1245}
1246
1247/*
1248 * Workitem queue management
1249 */
1250static int max_softdeps;	/* maximum number of structs before slowdown */
1251static int tickdelay = 2;	/* number of ticks to pause during slowdown */
1252static int proc_waiting;	/* tracks whether we have a timeout posted */
1253static int *stat_countp;	/* statistic to count in proc_waiting timeout */
1254static struct callout softdep_callout;
1255static int req_clear_inodedeps;	/* syncer process flush some inodedeps */
1256static int req_clear_remove;	/* syncer process flush some freeblks */
1257static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1258
1259/*
1260 * runtime statistics
1261 */
1262static int stat_flush_threads;	/* number of softdep flushing threads */
1263static int stat_worklist_push;	/* number of worklist cleanups */
1264static int stat_blk_limit_push;	/* number of times block limit neared */
1265static int stat_ino_limit_push;	/* number of times inode limit neared */
1266static int stat_blk_limit_hit;	/* number of times block slowdown imposed */
1267static int stat_ino_limit_hit;	/* number of times inode slowdown imposed */
1268static int stat_sync_limit_hit;	/* number of synchronous slowdowns imposed */
1269static int stat_indir_blk_ptrs;	/* bufs redirtied as indir ptrs not written */
1270static int stat_inode_bitmap;	/* bufs redirtied as inode bitmap not written */
1271static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1272static int stat_dir_entry;	/* bufs redirtied as dir entry cannot write */
1273static int stat_jaddref;	/* bufs redirtied as ino bitmap can not write */
1274static int stat_jnewblk;	/* bufs redirtied as blk bitmap can not write */
1275static int stat_journal_min;	/* Times hit journal min threshold */
1276static int stat_journal_low;	/* Times hit journal low threshold */
1277static int stat_journal_wait;	/* Times blocked in jwait(). */
1278static int stat_jwait_filepage;	/* Times blocked in jwait() for filepage. */
1279static int stat_jwait_freeblks;	/* Times blocked in jwait() for freeblks. */
1280static int stat_jwait_inode;	/* Times blocked in jwait() for inodes. */
1281static int stat_jwait_newblk;	/* Times blocked in jwait() for newblks. */
1282static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1283static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1284static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1285static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1286static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1287static int stat_emptyjblocks; /* Number of potentially empty journal blocks */
1288
1289SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1290    &max_softdeps, 0, "");
1291SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1292    &tickdelay, 0, "");
1293SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD,
1294    &stat_flush_threads, 0, "");
1295SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW,
1296    &stat_worklist_push, 0,"");
1297SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW,
1298    &stat_blk_limit_push, 0,"");
1299SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW,
1300    &stat_ino_limit_push, 0,"");
1301SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW,
1302    &stat_blk_limit_hit, 0, "");
1303SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW,
1304    &stat_ino_limit_hit, 0, "");
1305SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW,
1306    &stat_sync_limit_hit, 0, "");
1307SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW,
1308    &stat_indir_blk_ptrs, 0, "");
1309SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW,
1310    &stat_inode_bitmap, 0, "");
1311SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW,
1312    &stat_direct_blk_ptrs, 0, "");
1313SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW,
1314    &stat_dir_entry, 0, "");
1315SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW,
1316    &stat_jaddref, 0, "");
1317SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW,
1318    &stat_jnewblk, 0, "");
1319SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW,
1320    &stat_journal_low, 0, "");
1321SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW,
1322    &stat_journal_min, 0, "");
1323SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW,
1324    &stat_journal_wait, 0, "");
1325SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW,
1326    &stat_jwait_filepage, 0, "");
1327SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW,
1328    &stat_jwait_freeblks, 0, "");
1329SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW,
1330    &stat_jwait_inode, 0, "");
1331SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW,
1332    &stat_jwait_newblk, 0, "");
1333SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW,
1334    &stat_cleanup_blkrequests, 0, "");
1335SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW,
1336    &stat_cleanup_inorequests, 0, "");
1337SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW,
1338    &stat_cleanup_high_delay, 0, "");
1339SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW,
1340    &stat_cleanup_retries, 0, "");
1341SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW,
1342    &stat_cleanup_failures, 0, "");
1343SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1344    &softdep_flushcache, 0, "");
1345SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD,
1346    &stat_emptyjblocks, 0, "");
1347
1348SYSCTL_DECL(_vfs_ffs);
1349
1350/* Whether to recompute the summary at mount time */
1351static int compute_summary_at_mount = 0;
1352SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1353	   &compute_summary_at_mount, 0, "Recompute summary at mount");
1354static int print_threads = 0;
1355SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW,
1356    &print_threads, 0, "Notify flusher thread start/stop");
1357
1358/* List of all filesystems mounted with soft updates */
1359static TAILQ_HEAD(, mount_softdeps) softdepmounts;
1360
1361/*
1362 * This function cleans the worklist for a filesystem.
1363 * Each filesystem running with soft dependencies gets its own
1364 * thread to run in this function. The thread is started up in
1365 * softdep_mount and shutdown in softdep_unmount. They show up
1366 * as part of the kernel "bufdaemon" process whose process
1367 * entry is available in bufdaemonproc.
1368 */
1369static int searchfailed;
1370extern struct proc *bufdaemonproc;
1371static void
1372softdep_flush(addr)
1373	void *addr;
1374{
1375	struct mount *mp;
1376	struct thread *td;
1377	struct ufsmount *ump;
1378
1379	td = curthread;
1380	td->td_pflags |= TDP_NORUNNINGBUF;
1381	mp = (struct mount *)addr;
1382	ump = VFSTOUFS(mp);
1383	atomic_add_int(&stat_flush_threads, 1);
1384	ACQUIRE_LOCK(ump);
1385	ump->softdep_flags &= ~FLUSH_STARTING;
1386	wakeup(&ump->softdep_flushtd);
1387	FREE_LOCK(ump);
1388	if (print_threads) {
1389		if (stat_flush_threads == 1)
1390			printf("Running %s at pid %d\n", bufdaemonproc->p_comm,
1391			    bufdaemonproc->p_pid);
1392		printf("Start thread %s\n", td->td_name);
1393	}
1394	for (;;) {
1395		while (softdep_process_worklist(mp, 0) > 0 ||
1396		    (MOUNTEDSUJ(mp) &&
1397		    VFSTOUFS(mp)->softdep_jblocks->jb_suspended))
1398			kthread_suspend_check();
1399		ACQUIRE_LOCK(ump);
1400		if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1401			msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM,
1402			    "sdflush", hz / 2);
1403		ump->softdep_flags &= ~FLUSH_CLEANUP;
1404		/*
1405		 * Check to see if we are done and need to exit.
1406		 */
1407		if ((ump->softdep_flags & FLUSH_EXIT) == 0) {
1408			FREE_LOCK(ump);
1409			continue;
1410		}
1411		ump->softdep_flags &= ~FLUSH_EXIT;
1412		FREE_LOCK(ump);
1413		wakeup(&ump->softdep_flags);
1414		if (print_threads)
1415			printf("Stop thread %s: searchfailed %d, did cleanups %d\n", td->td_name, searchfailed, ump->um_softdep->sd_cleanups);
1416		atomic_subtract_int(&stat_flush_threads, 1);
1417		kthread_exit();
1418		panic("kthread_exit failed\n");
1419	}
1420}
1421
1422static void
1423worklist_speedup(mp)
1424	struct mount *mp;
1425{
1426	struct ufsmount *ump;
1427
1428	ump = VFSTOUFS(mp);
1429	LOCK_OWNED(ump);
1430	if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1431		ump->softdep_flags |= FLUSH_CLEANUP;
1432	wakeup(&ump->softdep_flushtd);
1433}
1434
1435static int
1436softdep_speedup(ump)
1437	struct ufsmount *ump;
1438{
1439	struct ufsmount *altump;
1440	struct mount_softdeps *sdp;
1441
1442	LOCK_OWNED(ump);
1443	worklist_speedup(ump->um_mountp);
1444	bd_speedup();
1445	/*
1446	 * If we have global shortages, then we need other
1447	 * filesystems to help with the cleanup. Here we wakeup a
1448	 * flusher thread for a filesystem that is over its fair
1449	 * share of resources.
1450	 */
1451	if (req_clear_inodedeps || req_clear_remove) {
1452		ACQUIRE_GBLLOCK(&lk);
1453		TAILQ_FOREACH(sdp, &softdepmounts, sd_next) {
1454			if ((altump = sdp->sd_ump) == ump)
1455				continue;
1456			if (((req_clear_inodedeps &&
1457			    altump->softdep_curdeps[D_INODEDEP] >
1458			    max_softdeps / stat_flush_threads) ||
1459			    (req_clear_remove &&
1460			    altump->softdep_curdeps[D_DIRREM] >
1461			    (max_softdeps / 2) / stat_flush_threads)) &&
1462			    TRY_ACQUIRE_LOCK(altump))
1463				break;
1464		}
1465		if (sdp == NULL) {
1466			searchfailed++;
1467			FREE_GBLLOCK(&lk);
1468		} else {
1469			/*
1470			 * Move to the end of the list so we pick a
1471			 * different one on out next try.
1472			 */
1473			TAILQ_REMOVE(&softdepmounts, sdp, sd_next);
1474			TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
1475			FREE_GBLLOCK(&lk);
1476			if ((altump->softdep_flags &
1477			    (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1478				altump->softdep_flags |= FLUSH_CLEANUP;
1479			altump->um_softdep->sd_cleanups++;
1480			wakeup(&altump->softdep_flushtd);
1481			FREE_LOCK(altump);
1482		}
1483	}
1484	return (speedup_syncer());
1485}
1486
1487/*
1488 * Add an item to the end of the work queue.
1489 * This routine requires that the lock be held.
1490 * This is the only routine that adds items to the list.
1491 * The following routine is the only one that removes items
1492 * and does so in order from first to last.
1493 */
1494
1495#define	WK_HEAD		0x0001	/* Add to HEAD. */
1496#define	WK_NODELAY	0x0002	/* Process immediately. */
1497
1498static void
1499add_to_worklist(wk, flags)
1500	struct worklist *wk;
1501	int flags;
1502{
1503	struct ufsmount *ump;
1504
1505	ump = VFSTOUFS(wk->wk_mp);
1506	LOCK_OWNED(ump);
1507	if (wk->wk_state & ONWORKLIST)
1508		panic("add_to_worklist: %s(0x%X) already on list",
1509		    TYPENAME(wk->wk_type), wk->wk_state);
1510	wk->wk_state |= ONWORKLIST;
1511	if (ump->softdep_on_worklist == 0) {
1512		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1513		ump->softdep_worklist_tail = wk;
1514	} else if (flags & WK_HEAD) {
1515		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1516	} else {
1517		LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1518		ump->softdep_worklist_tail = wk;
1519	}
1520	ump->softdep_on_worklist += 1;
1521	if (flags & WK_NODELAY)
1522		worklist_speedup(wk->wk_mp);
1523}
1524
1525/*
1526 * Remove the item to be processed. If we are removing the last
1527 * item on the list, we need to recalculate the tail pointer.
1528 */
1529static void
1530remove_from_worklist(wk)
1531	struct worklist *wk;
1532{
1533	struct ufsmount *ump;
1534
1535	ump = VFSTOUFS(wk->wk_mp);
1536	WORKLIST_REMOVE(wk);
1537	if (ump->softdep_worklist_tail == wk)
1538		ump->softdep_worklist_tail =
1539		    (struct worklist *)wk->wk_list.le_prev;
1540	ump->softdep_on_worklist -= 1;
1541}
1542
1543static void
1544wake_worklist(wk)
1545	struct worklist *wk;
1546{
1547	if (wk->wk_state & IOWAITING) {
1548		wk->wk_state &= ~IOWAITING;
1549		wakeup(wk);
1550	}
1551}
1552
1553static void
1554wait_worklist(wk, wmesg)
1555	struct worklist *wk;
1556	char *wmesg;
1557{
1558	struct ufsmount *ump;
1559
1560	ump = VFSTOUFS(wk->wk_mp);
1561	wk->wk_state |= IOWAITING;
1562	msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0);
1563}
1564
1565/*
1566 * Process that runs once per second to handle items in the background queue.
1567 *
1568 * Note that we ensure that everything is done in the order in which they
1569 * appear in the queue. The code below depends on this property to ensure
1570 * that blocks of a file are freed before the inode itself is freed. This
1571 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1572 * until all the old ones have been purged from the dependency lists.
1573 */
1574static int
1575softdep_process_worklist(mp, full)
1576	struct mount *mp;
1577	int full;
1578{
1579	int cnt, matchcnt;
1580	struct ufsmount *ump;
1581	long starttime;
1582
1583	KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1584	if (MOUNTEDSOFTDEP(mp) == 0)
1585		return (0);
1586	matchcnt = 0;
1587	ump = VFSTOUFS(mp);
1588	ACQUIRE_LOCK(ump);
1589	starttime = time_second;
1590	softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0);
1591	check_clear_deps(mp);
1592	while (ump->softdep_on_worklist > 0) {
1593		if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1594			break;
1595		else
1596			matchcnt += cnt;
1597		check_clear_deps(mp);
1598		/*
1599		 * We do not generally want to stop for buffer space, but if
1600		 * we are really being a buffer hog, we will stop and wait.
1601		 */
1602		if (should_yield()) {
1603			FREE_LOCK(ump);
1604			kern_yield(PRI_USER);
1605			bwillwrite();
1606			ACQUIRE_LOCK(ump);
1607		}
1608		/*
1609		 * Never allow processing to run for more than one
1610		 * second. This gives the syncer thread the opportunity
1611		 * to pause if appropriate.
1612		 */
1613		if (!full && starttime != time_second)
1614			break;
1615	}
1616	if (full == 0)
1617		journal_unsuspend(ump);
1618	FREE_LOCK(ump);
1619	return (matchcnt);
1620}
1621
1622/*
1623 * Process all removes associated with a vnode if we are running out of
1624 * journal space.  Any other process which attempts to flush these will
1625 * be unable as we have the vnodes locked.
1626 */
1627static void
1628process_removes(vp)
1629	struct vnode *vp;
1630{
1631	struct inodedep *inodedep;
1632	struct dirrem *dirrem;
1633	struct ufsmount *ump;
1634	struct mount *mp;
1635	ino_t inum;
1636
1637	mp = vp->v_mount;
1638	ump = VFSTOUFS(mp);
1639	LOCK_OWNED(ump);
1640	inum = VTOI(vp)->i_number;
1641	for (;;) {
1642top:
1643		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1644			return;
1645		LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1646			/*
1647			 * If another thread is trying to lock this vnode
1648			 * it will fail but we must wait for it to do so
1649			 * before we can proceed.
1650			 */
1651			if (dirrem->dm_state & INPROGRESS) {
1652				wait_worklist(&dirrem->dm_list, "pwrwait");
1653				goto top;
1654			}
1655			if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1656			    (COMPLETE | ONWORKLIST))
1657				break;
1658		}
1659		if (dirrem == NULL)
1660			return;
1661		remove_from_worklist(&dirrem->dm_list);
1662		FREE_LOCK(ump);
1663		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1664			panic("process_removes: suspended filesystem");
1665		handle_workitem_remove(dirrem, 0);
1666		vn_finished_secondary_write(mp);
1667		ACQUIRE_LOCK(ump);
1668	}
1669}
1670
1671/*
1672 * Process all truncations associated with a vnode if we are running out
1673 * of journal space.  This is called when the vnode lock is already held
1674 * and no other process can clear the truncation.  This function returns
1675 * a value greater than zero if it did any work.
1676 */
1677static void
1678process_truncates(vp)
1679	struct vnode *vp;
1680{
1681	struct inodedep *inodedep;
1682	struct freeblks *freeblks;
1683	struct ufsmount *ump;
1684	struct mount *mp;
1685	ino_t inum;
1686	int cgwait;
1687
1688	mp = vp->v_mount;
1689	ump = VFSTOUFS(mp);
1690	LOCK_OWNED(ump);
1691	inum = VTOI(vp)->i_number;
1692	for (;;) {
1693		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1694			return;
1695		cgwait = 0;
1696		TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1697			/* Journal entries not yet written.  */
1698			if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1699				jwait(&LIST_FIRST(
1700				    &freeblks->fb_jblkdephd)->jb_list,
1701				    MNT_WAIT);
1702				break;
1703			}
1704			/* Another thread is executing this item. */
1705			if (freeblks->fb_state & INPROGRESS) {
1706				wait_worklist(&freeblks->fb_list, "ptrwait");
1707				break;
1708			}
1709			/* Freeblks is waiting on a inode write. */
1710			if ((freeblks->fb_state & COMPLETE) == 0) {
1711				FREE_LOCK(ump);
1712				ffs_update(vp, 1);
1713				ACQUIRE_LOCK(ump);
1714				break;
1715			}
1716			if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1717			    (ALLCOMPLETE | ONWORKLIST)) {
1718				remove_from_worklist(&freeblks->fb_list);
1719				freeblks->fb_state |= INPROGRESS;
1720				FREE_LOCK(ump);
1721				if (vn_start_secondary_write(NULL, &mp,
1722				    V_NOWAIT))
1723					panic("process_truncates: "
1724					    "suspended filesystem");
1725				handle_workitem_freeblocks(freeblks, 0);
1726				vn_finished_secondary_write(mp);
1727				ACQUIRE_LOCK(ump);
1728				break;
1729			}
1730			if (freeblks->fb_cgwait)
1731				cgwait++;
1732		}
1733		if (cgwait) {
1734			FREE_LOCK(ump);
1735			sync_cgs(mp, MNT_WAIT);
1736			ffs_sync_snap(mp, MNT_WAIT);
1737			ACQUIRE_LOCK(ump);
1738			continue;
1739		}
1740		if (freeblks == NULL)
1741			break;
1742	}
1743	return;
1744}
1745
1746/*
1747 * Process one item on the worklist.
1748 */
1749static int
1750process_worklist_item(mp, target, flags)
1751	struct mount *mp;
1752	int target;
1753	int flags;
1754{
1755	struct worklist sentinel;
1756	struct worklist *wk;
1757	struct ufsmount *ump;
1758	int matchcnt;
1759	int error;
1760
1761	KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1762	/*
1763	 * If we are being called because of a process doing a
1764	 * copy-on-write, then it is not safe to write as we may
1765	 * recurse into the copy-on-write routine.
1766	 */
1767	if (curthread->td_pflags & TDP_COWINPROGRESS)
1768		return (-1);
1769	PHOLD(curproc);	/* Don't let the stack go away. */
1770	ump = VFSTOUFS(mp);
1771	LOCK_OWNED(ump);
1772	matchcnt = 0;
1773	sentinel.wk_mp = NULL;
1774	sentinel.wk_type = D_SENTINEL;
1775	LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1776	for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1777	    wk = LIST_NEXT(&sentinel, wk_list)) {
1778		if (wk->wk_type == D_SENTINEL) {
1779			LIST_REMOVE(&sentinel, wk_list);
1780			LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1781			continue;
1782		}
1783		if (wk->wk_state & INPROGRESS)
1784			panic("process_worklist_item: %p already in progress.",
1785			    wk);
1786		wk->wk_state |= INPROGRESS;
1787		remove_from_worklist(wk);
1788		FREE_LOCK(ump);
1789		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1790			panic("process_worklist_item: suspended filesystem");
1791		switch (wk->wk_type) {
1792		case D_DIRREM:
1793			/* removal of a directory entry */
1794			error = handle_workitem_remove(WK_DIRREM(wk), flags);
1795			break;
1796
1797		case D_FREEBLKS:
1798			/* releasing blocks and/or fragments from a file */
1799			error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1800			    flags);
1801			break;
1802
1803		case D_FREEFRAG:
1804			/* releasing a fragment when replaced as a file grows */
1805			handle_workitem_freefrag(WK_FREEFRAG(wk));
1806			error = 0;
1807			break;
1808
1809		case D_FREEFILE:
1810			/* releasing an inode when its link count drops to 0 */
1811			handle_workitem_freefile(WK_FREEFILE(wk));
1812			error = 0;
1813			break;
1814
1815		default:
1816			panic("%s_process_worklist: Unknown type %s",
1817			    "softdep", TYPENAME(wk->wk_type));
1818			/* NOTREACHED */
1819		}
1820		vn_finished_secondary_write(mp);
1821		ACQUIRE_LOCK(ump);
1822		if (error == 0) {
1823			if (++matchcnt == target)
1824				break;
1825			continue;
1826		}
1827		/*
1828		 * We have to retry the worklist item later.  Wake up any
1829		 * waiters who may be able to complete it immediately and
1830		 * add the item back to the head so we don't try to execute
1831		 * it again.
1832		 */
1833		wk->wk_state &= ~INPROGRESS;
1834		wake_worklist(wk);
1835		add_to_worklist(wk, WK_HEAD);
1836	}
1837	LIST_REMOVE(&sentinel, wk_list);
1838	/* Sentinal could've become the tail from remove_from_worklist. */
1839	if (ump->softdep_worklist_tail == &sentinel)
1840		ump->softdep_worklist_tail =
1841		    (struct worklist *)sentinel.wk_list.le_prev;
1842	PRELE(curproc);
1843	return (matchcnt);
1844}
1845
1846/*
1847 * Move dependencies from one buffer to another.
1848 */
1849int
1850softdep_move_dependencies(oldbp, newbp)
1851	struct buf *oldbp;
1852	struct buf *newbp;
1853{
1854	struct worklist *wk, *wktail;
1855	struct ufsmount *ump;
1856	int dirty;
1857
1858	if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL)
1859		return (0);
1860	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
1861	    ("softdep_move_dependencies called on non-softdep filesystem"));
1862	dirty = 0;
1863	wktail = NULL;
1864	ump = VFSTOUFS(wk->wk_mp);
1865	ACQUIRE_LOCK(ump);
1866	while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
1867		LIST_REMOVE(wk, wk_list);
1868		if (wk->wk_type == D_BMSAFEMAP &&
1869		    bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
1870			dirty = 1;
1871		if (wktail == NULL)
1872			LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
1873		else
1874			LIST_INSERT_AFTER(wktail, wk, wk_list);
1875		wktail = wk;
1876	}
1877	FREE_LOCK(ump);
1878
1879	return (dirty);
1880}
1881
1882/*
1883 * Purge the work list of all items associated with a particular mount point.
1884 */
1885int
1886softdep_flushworklist(oldmnt, countp, td)
1887	struct mount *oldmnt;
1888	int *countp;
1889	struct thread *td;
1890{
1891	struct vnode *devvp;
1892	struct ufsmount *ump;
1893	int count, error;
1894
1895	/*
1896	 * Alternately flush the block device associated with the mount
1897	 * point and process any dependencies that the flushing
1898	 * creates. We continue until no more worklist dependencies
1899	 * are found.
1900	 */
1901	*countp = 0;
1902	error = 0;
1903	ump = VFSTOUFS(oldmnt);
1904	devvp = ump->um_devvp;
1905	while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
1906		*countp += count;
1907		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1908		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1909		VOP_UNLOCK(devvp, 0);
1910		if (error != 0)
1911			break;
1912	}
1913	return (error);
1914}
1915
1916#define	SU_WAITIDLE_RETRIES	20
1917static int
1918softdep_waitidle(struct mount *mp, int flags __unused)
1919{
1920	struct ufsmount *ump;
1921	struct vnode *devvp;
1922	struct thread *td;
1923	int error, i;
1924
1925	ump = VFSTOUFS(mp);
1926	devvp = ump->um_devvp;
1927	td = curthread;
1928	error = 0;
1929	ACQUIRE_LOCK(ump);
1930	for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) {
1931		ump->softdep_req = 1;
1932		KASSERT((flags & FORCECLOSE) == 0 ||
1933		    ump->softdep_on_worklist == 0,
1934		    ("softdep_waitidle: work added after flush"));
1935		msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP,
1936		    "softdeps", 10 * hz);
1937		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1938		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1939		VOP_UNLOCK(devvp, 0);
1940		ACQUIRE_LOCK(ump);
1941		if (error != 0)
1942			break;
1943	}
1944	ump->softdep_req = 0;
1945	if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) {
1946		error = EBUSY;
1947		printf("softdep_waitidle: Failed to flush worklist for %p\n",
1948		    mp);
1949	}
1950	FREE_LOCK(ump);
1951	return (error);
1952}
1953
1954/*
1955 * Flush all vnodes and worklist items associated with a specified mount point.
1956 */
1957int
1958softdep_flushfiles(oldmnt, flags, td)
1959	struct mount *oldmnt;
1960	int flags;
1961	struct thread *td;
1962{
1963#ifdef QUOTA
1964	struct ufsmount *ump;
1965	int i;
1966#endif
1967	int error, early, depcount, loopcnt, retry_flush_count, retry;
1968	int morework;
1969
1970	KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0,
1971	    ("softdep_flushfiles called on non-softdep filesystem"));
1972	loopcnt = 10;
1973	retry_flush_count = 3;
1974retry_flush:
1975	error = 0;
1976
1977	/*
1978	 * Alternately flush the vnodes associated with the mount
1979	 * point and process any dependencies that the flushing
1980	 * creates. In theory, this loop can happen at most twice,
1981	 * but we give it a few extra just to be sure.
1982	 */
1983	for (; loopcnt > 0; loopcnt--) {
1984		/*
1985		 * Do another flush in case any vnodes were brought in
1986		 * as part of the cleanup operations.
1987		 */
1988		early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
1989		    MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
1990		if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
1991			break;
1992		if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
1993		    depcount == 0)
1994			break;
1995	}
1996	/*
1997	 * If we are unmounting then it is an error to fail. If we
1998	 * are simply trying to downgrade to read-only, then filesystem
1999	 * activity can keep us busy forever, so we just fail with EBUSY.
2000	 */
2001	if (loopcnt == 0) {
2002		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
2003			panic("softdep_flushfiles: looping");
2004		error = EBUSY;
2005	}
2006	if (!error)
2007		error = softdep_waitidle(oldmnt, flags);
2008	if (!error) {
2009		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
2010			retry = 0;
2011			MNT_ILOCK(oldmnt);
2012			KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0,
2013			    ("softdep_flushfiles: !MNTK_NOINSMNTQ"));
2014			morework = oldmnt->mnt_nvnodelistsize > 0;
2015#ifdef QUOTA
2016			ump = VFSTOUFS(oldmnt);
2017			UFS_LOCK(ump);
2018			for (i = 0; i < MAXQUOTAS; i++) {
2019				if (ump->um_quotas[i] != NULLVP)
2020					morework = 1;
2021			}
2022			UFS_UNLOCK(ump);
2023#endif
2024			if (morework) {
2025				if (--retry_flush_count > 0) {
2026					retry = 1;
2027					loopcnt = 3;
2028				} else
2029					error = EBUSY;
2030			}
2031			MNT_IUNLOCK(oldmnt);
2032			if (retry)
2033				goto retry_flush;
2034		}
2035	}
2036	return (error);
2037}
2038
2039/*
2040 * Structure hashing.
2041 *
2042 * There are four types of structures that can be looked up:
2043 *	1) pagedep structures identified by mount point, inode number,
2044 *	   and logical block.
2045 *	2) inodedep structures identified by mount point and inode number.
2046 *	3) newblk structures identified by mount point and
2047 *	   physical block number.
2048 *	4) bmsafemap structures identified by mount point and
2049 *	   cylinder group number.
2050 *
2051 * The "pagedep" and "inodedep" dependency structures are hashed
2052 * separately from the file blocks and inodes to which they correspond.
2053 * This separation helps when the in-memory copy of an inode or
2054 * file block must be replaced. It also obviates the need to access
2055 * an inode or file page when simply updating (or de-allocating)
2056 * dependency structures. Lookup of newblk structures is needed to
2057 * find newly allocated blocks when trying to associate them with
2058 * their allocdirect or allocindir structure.
2059 *
2060 * The lookup routines optionally create and hash a new instance when
2061 * an existing entry is not found. The bmsafemap lookup routine always
2062 * allocates a new structure if an existing one is not found.
2063 */
2064#define DEPALLOC	0x0001	/* allocate structure if lookup fails */
2065
2066/*
2067 * Structures and routines associated with pagedep caching.
2068 */
2069#define	PAGEDEP_HASH(ump, inum, lbn) \
2070	(&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size])
2071
2072static int
2073pagedep_find(pagedephd, ino, lbn, pagedeppp)
2074	struct pagedep_hashhead *pagedephd;
2075	ino_t ino;
2076	ufs_lbn_t lbn;
2077	struct pagedep **pagedeppp;
2078{
2079	struct pagedep *pagedep;
2080
2081	LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2082		if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) {
2083			*pagedeppp = pagedep;
2084			return (1);
2085		}
2086	}
2087	*pagedeppp = NULL;
2088	return (0);
2089}
2090/*
2091 * Look up a pagedep. Return 1 if found, 0 otherwise.
2092 * If not found, allocate if DEPALLOC flag is passed.
2093 * Found or allocated entry is returned in pagedeppp.
2094 * This routine must be called with splbio interrupts blocked.
2095 */
2096static int
2097pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp)
2098	struct mount *mp;
2099	struct buf *bp;
2100	ino_t ino;
2101	ufs_lbn_t lbn;
2102	int flags;
2103	struct pagedep **pagedeppp;
2104{
2105	struct pagedep *pagedep;
2106	struct pagedep_hashhead *pagedephd;
2107	struct worklist *wk;
2108	struct ufsmount *ump;
2109	int ret;
2110	int i;
2111
2112	ump = VFSTOUFS(mp);
2113	LOCK_OWNED(ump);
2114	if (bp) {
2115		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2116			if (wk->wk_type == D_PAGEDEP) {
2117				*pagedeppp = WK_PAGEDEP(wk);
2118				return (1);
2119			}
2120		}
2121	}
2122	pagedephd = PAGEDEP_HASH(ump, ino, lbn);
2123	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2124	if (ret) {
2125		if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2126			WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2127		return (1);
2128	}
2129	if ((flags & DEPALLOC) == 0)
2130		return (0);
2131	FREE_LOCK(ump);
2132	pagedep = malloc(sizeof(struct pagedep),
2133	    M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2134	workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2135	ACQUIRE_LOCK(ump);
2136	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2137	if (*pagedeppp) {
2138		/*
2139		 * This should never happen since we only create pagedeps
2140		 * with the vnode lock held.  Could be an assert.
2141		 */
2142		WORKITEM_FREE(pagedep, D_PAGEDEP);
2143		return (ret);
2144	}
2145	pagedep->pd_ino = ino;
2146	pagedep->pd_lbn = lbn;
2147	LIST_INIT(&pagedep->pd_dirremhd);
2148	LIST_INIT(&pagedep->pd_pendinghd);
2149	for (i = 0; i < DAHASHSZ; i++)
2150		LIST_INIT(&pagedep->pd_diraddhd[i]);
2151	LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2152	WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2153	*pagedeppp = pagedep;
2154	return (0);
2155}
2156
2157/*
2158 * Structures and routines associated with inodedep caching.
2159 */
2160#define	INODEDEP_HASH(ump, inum) \
2161      (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size])
2162
2163static int
2164inodedep_find(inodedephd, inum, inodedeppp)
2165	struct inodedep_hashhead *inodedephd;
2166	ino_t inum;
2167	struct inodedep **inodedeppp;
2168{
2169	struct inodedep *inodedep;
2170
2171	LIST_FOREACH(inodedep, inodedephd, id_hash)
2172		if (inum == inodedep->id_ino)
2173			break;
2174	if (inodedep) {
2175		*inodedeppp = inodedep;
2176		return (1);
2177	}
2178	*inodedeppp = NULL;
2179
2180	return (0);
2181}
2182/*
2183 * Look up an inodedep. Return 1 if found, 0 if not found.
2184 * If not found, allocate if DEPALLOC flag is passed.
2185 * Found or allocated entry is returned in inodedeppp.
2186 * This routine must be called with splbio interrupts blocked.
2187 */
2188static int
2189inodedep_lookup(mp, inum, flags, inodedeppp)
2190	struct mount *mp;
2191	ino_t inum;
2192	int flags;
2193	struct inodedep **inodedeppp;
2194{
2195	struct inodedep *inodedep;
2196	struct inodedep_hashhead *inodedephd;
2197	struct ufsmount *ump;
2198	struct fs *fs;
2199
2200	ump = VFSTOUFS(mp);
2201	LOCK_OWNED(ump);
2202	fs = ump->um_fs;
2203	inodedephd = INODEDEP_HASH(ump, inum);
2204
2205	if (inodedep_find(inodedephd, inum, inodedeppp))
2206		return (1);
2207	if ((flags & DEPALLOC) == 0)
2208		return (0);
2209	/*
2210	 * If the system is over its limit and our filesystem is
2211	 * responsible for more than our share of that usage and
2212	 * we are not in a rush, request some inodedep cleanup.
2213	 */
2214	if (softdep_excess_items(ump, D_INODEDEP))
2215		schedule_cleanup(mp);
2216	else
2217		FREE_LOCK(ump);
2218	inodedep = malloc(sizeof(struct inodedep),
2219		M_INODEDEP, M_SOFTDEP_FLAGS);
2220	workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2221	ACQUIRE_LOCK(ump);
2222	if (inodedep_find(inodedephd, inum, inodedeppp)) {
2223		WORKITEM_FREE(inodedep, D_INODEDEP);
2224		return (1);
2225	}
2226	inodedep->id_fs = fs;
2227	inodedep->id_ino = inum;
2228	inodedep->id_state = ALLCOMPLETE;
2229	inodedep->id_nlinkdelta = 0;
2230	inodedep->id_savedino1 = NULL;
2231	inodedep->id_savedsize = -1;
2232	inodedep->id_savedextsize = -1;
2233	inodedep->id_savednlink = -1;
2234	inodedep->id_bmsafemap = NULL;
2235	inodedep->id_mkdiradd = NULL;
2236	LIST_INIT(&inodedep->id_dirremhd);
2237	LIST_INIT(&inodedep->id_pendinghd);
2238	LIST_INIT(&inodedep->id_inowait);
2239	LIST_INIT(&inodedep->id_bufwait);
2240	TAILQ_INIT(&inodedep->id_inoreflst);
2241	TAILQ_INIT(&inodedep->id_inoupdt);
2242	TAILQ_INIT(&inodedep->id_newinoupdt);
2243	TAILQ_INIT(&inodedep->id_extupdt);
2244	TAILQ_INIT(&inodedep->id_newextupdt);
2245	TAILQ_INIT(&inodedep->id_freeblklst);
2246	LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2247	*inodedeppp = inodedep;
2248	return (0);
2249}
2250
2251/*
2252 * Structures and routines associated with newblk caching.
2253 */
2254#define	NEWBLK_HASH(ump, inum) \
2255	(&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size])
2256
2257static int
2258newblk_find(newblkhd, newblkno, flags, newblkpp)
2259	struct newblk_hashhead *newblkhd;
2260	ufs2_daddr_t newblkno;
2261	int flags;
2262	struct newblk **newblkpp;
2263{
2264	struct newblk *newblk;
2265
2266	LIST_FOREACH(newblk, newblkhd, nb_hash) {
2267		if (newblkno != newblk->nb_newblkno)
2268			continue;
2269		/*
2270		 * If we're creating a new dependency don't match those that
2271		 * have already been converted to allocdirects.  This is for
2272		 * a frag extend.
2273		 */
2274		if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2275			continue;
2276		break;
2277	}
2278	if (newblk) {
2279		*newblkpp = newblk;
2280		return (1);
2281	}
2282	*newblkpp = NULL;
2283	return (0);
2284}
2285
2286/*
2287 * Look up a newblk. Return 1 if found, 0 if not found.
2288 * If not found, allocate if DEPALLOC flag is passed.
2289 * Found or allocated entry is returned in newblkpp.
2290 */
2291static int
2292newblk_lookup(mp, newblkno, flags, newblkpp)
2293	struct mount *mp;
2294	ufs2_daddr_t newblkno;
2295	int flags;
2296	struct newblk **newblkpp;
2297{
2298	struct newblk *newblk;
2299	struct newblk_hashhead *newblkhd;
2300	struct ufsmount *ump;
2301
2302	ump = VFSTOUFS(mp);
2303	LOCK_OWNED(ump);
2304	newblkhd = NEWBLK_HASH(ump, newblkno);
2305	if (newblk_find(newblkhd, newblkno, flags, newblkpp))
2306		return (1);
2307	if ((flags & DEPALLOC) == 0)
2308		return (0);
2309	if (softdep_excess_items(ump, D_NEWBLK) ||
2310	    softdep_excess_items(ump, D_ALLOCDIRECT) ||
2311	    softdep_excess_items(ump, D_ALLOCINDIR))
2312		schedule_cleanup(mp);
2313	else
2314		FREE_LOCK(ump);
2315	newblk = malloc(sizeof(union allblk), M_NEWBLK,
2316	    M_SOFTDEP_FLAGS | M_ZERO);
2317	workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2318	ACQUIRE_LOCK(ump);
2319	if (newblk_find(newblkhd, newblkno, flags, newblkpp)) {
2320		WORKITEM_FREE(newblk, D_NEWBLK);
2321		return (1);
2322	}
2323	newblk->nb_freefrag = NULL;
2324	LIST_INIT(&newblk->nb_indirdeps);
2325	LIST_INIT(&newblk->nb_newdirblk);
2326	LIST_INIT(&newblk->nb_jwork);
2327	newblk->nb_state = ATTACHED;
2328	newblk->nb_newblkno = newblkno;
2329	LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2330	*newblkpp = newblk;
2331	return (0);
2332}
2333
2334/*
2335 * Structures and routines associated with freed indirect block caching.
2336 */
2337#define	INDIR_HASH(ump, blkno) \
2338	(&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size])
2339
2340/*
2341 * Lookup an indirect block in the indir hash table.  The freework is
2342 * removed and potentially freed.  The caller must do a blocking journal
2343 * write before writing to the blkno.
2344 */
2345static int
2346indirblk_lookup(mp, blkno)
2347	struct mount *mp;
2348	ufs2_daddr_t blkno;
2349{
2350	struct freework *freework;
2351	struct indir_hashhead *wkhd;
2352	struct ufsmount *ump;
2353
2354	ump = VFSTOUFS(mp);
2355	wkhd = INDIR_HASH(ump, blkno);
2356	TAILQ_FOREACH(freework, wkhd, fw_next) {
2357		if (freework->fw_blkno != blkno)
2358			continue;
2359		indirblk_remove(freework);
2360		return (1);
2361	}
2362	return (0);
2363}
2364
2365/*
2366 * Insert an indirect block represented by freework into the indirblk
2367 * hash table so that it may prevent the block from being re-used prior
2368 * to the journal being written.
2369 */
2370static void
2371indirblk_insert(freework)
2372	struct freework *freework;
2373{
2374	struct jblocks *jblocks;
2375	struct jseg *jseg;
2376	struct ufsmount *ump;
2377
2378	ump = VFSTOUFS(freework->fw_list.wk_mp);
2379	jblocks = ump->softdep_jblocks;
2380	jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2381	if (jseg == NULL)
2382		return;
2383
2384	LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2385	TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework,
2386	    fw_next);
2387	freework->fw_state &= ~DEPCOMPLETE;
2388}
2389
2390static void
2391indirblk_remove(freework)
2392	struct freework *freework;
2393{
2394	struct ufsmount *ump;
2395
2396	ump = VFSTOUFS(freework->fw_list.wk_mp);
2397	LIST_REMOVE(freework, fw_segs);
2398	TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next);
2399	freework->fw_state |= DEPCOMPLETE;
2400	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2401		WORKITEM_FREE(freework, D_FREEWORK);
2402}
2403
2404/*
2405 * Executed during filesystem system initialization before
2406 * mounting any filesystems.
2407 */
2408void
2409softdep_initialize()
2410{
2411
2412	TAILQ_INIT(&softdepmounts);
2413#ifdef __LP64__
2414	max_softdeps = desiredvnodes * 4;
2415#else
2416	max_softdeps = desiredvnodes * 2;
2417#endif
2418
2419	/* initialise bioops hack */
2420	bioops.io_start = softdep_disk_io_initiation;
2421	bioops.io_complete = softdep_disk_write_complete;
2422	bioops.io_deallocate = softdep_deallocate_dependencies;
2423	bioops.io_countdeps = softdep_count_dependencies;
2424	softdep_ast_cleanup = softdep_ast_cleanup_proc;
2425
2426	/* Initialize the callout with an mtx. */
2427	callout_init_mtx(&softdep_callout, &lk, 0);
2428}
2429
2430/*
2431 * Executed after all filesystems have been unmounted during
2432 * filesystem module unload.
2433 */
2434void
2435softdep_uninitialize()
2436{
2437
2438	/* clear bioops hack */
2439	bioops.io_start = NULL;
2440	bioops.io_complete = NULL;
2441	bioops.io_deallocate = NULL;
2442	bioops.io_countdeps = NULL;
2443	softdep_ast_cleanup = NULL;
2444
2445	callout_drain(&softdep_callout);
2446}
2447
2448/*
2449 * Called at mount time to notify the dependency code that a
2450 * filesystem wishes to use it.
2451 */
2452int
2453softdep_mount(devvp, mp, fs, cred)
2454	struct vnode *devvp;
2455	struct mount *mp;
2456	struct fs *fs;
2457	struct ucred *cred;
2458{
2459	struct csum_total cstotal;
2460	struct mount_softdeps *sdp;
2461	struct ufsmount *ump;
2462	struct cg *cgp;
2463	struct buf *bp;
2464	int i, error, cyl;
2465
2466	sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA,
2467	    M_WAITOK | M_ZERO);
2468	MNT_ILOCK(mp);
2469	mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2470	if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2471		mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2472			MNTK_SOFTDEP | MNTK_NOASYNC;
2473	}
2474	ump = VFSTOUFS(mp);
2475	ump->um_softdep = sdp;
2476	MNT_IUNLOCK(mp);
2477	rw_init(LOCK_PTR(ump), "Per-Filesystem Softdep Lock");
2478	sdp->sd_ump = ump;
2479	LIST_INIT(&ump->softdep_workitem_pending);
2480	LIST_INIT(&ump->softdep_journal_pending);
2481	TAILQ_INIT(&ump->softdep_unlinked);
2482	LIST_INIT(&ump->softdep_dirtycg);
2483	ump->softdep_worklist_tail = NULL;
2484	ump->softdep_on_worklist = 0;
2485	ump->softdep_deps = 0;
2486	LIST_INIT(&ump->softdep_mkdirlisthd);
2487	ump->pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP,
2488	    &ump->pagedep_hash_size);
2489	ump->pagedep_nextclean = 0;
2490	ump->inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP,
2491	    &ump->inodedep_hash_size);
2492	ump->inodedep_nextclean = 0;
2493	ump->newblk_hashtbl = hashinit(max_softdeps / 2,  M_NEWBLK,
2494	    &ump->newblk_hash_size);
2495	ump->bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP,
2496	    &ump->bmsafemap_hash_size);
2497	i = 1 << (ffs(desiredvnodes / 10) - 1);
2498	ump->indir_hashtbl = malloc(i * sizeof(struct indir_hashhead),
2499	    M_FREEWORK, M_WAITOK);
2500	ump->indir_hash_size = i - 1;
2501	for (i = 0; i <= ump->indir_hash_size; i++)
2502		TAILQ_INIT(&ump->indir_hashtbl[i]);
2503	ACQUIRE_GBLLOCK(&lk);
2504	TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
2505	FREE_GBLLOCK(&lk);
2506	if ((fs->fs_flags & FS_SUJ) &&
2507	    (error = journal_mount(mp, fs, cred)) != 0) {
2508		printf("Failed to start journal: %d\n", error);
2509		softdep_unmount(mp);
2510		return (error);
2511	}
2512	/*
2513	 * Start our flushing thread in the bufdaemon process.
2514	 */
2515	ACQUIRE_LOCK(ump);
2516	ump->softdep_flags |= FLUSH_STARTING;
2517	FREE_LOCK(ump);
2518	kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc,
2519	    &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker",
2520	    mp->mnt_stat.f_mntonname);
2521	ACQUIRE_LOCK(ump);
2522	while ((ump->softdep_flags & FLUSH_STARTING) != 0) {
2523		msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart",
2524		    hz / 2);
2525	}
2526	FREE_LOCK(ump);
2527	/*
2528	 * When doing soft updates, the counters in the
2529	 * superblock may have gotten out of sync. Recomputation
2530	 * can take a long time and can be deferred for background
2531	 * fsck.  However, the old behavior of scanning the cylinder
2532	 * groups and recalculating them at mount time is available
2533	 * by setting vfs.ffs.compute_summary_at_mount to one.
2534	 */
2535	if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2536		return (0);
2537	bzero(&cstotal, sizeof cstotal);
2538	for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2539		if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2540		    fs->fs_cgsize, cred, &bp)) != 0) {
2541			brelse(bp);
2542			softdep_unmount(mp);
2543			return (error);
2544		}
2545		cgp = (struct cg *)bp->b_data;
2546		cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2547		cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2548		cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2549		cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2550		fs->fs_cs(fs, cyl) = cgp->cg_cs;
2551		brelse(bp);
2552	}
2553#ifdef DEBUG
2554	if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2555		printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2556#endif
2557	bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2558	return (0);
2559}
2560
2561void
2562softdep_unmount(mp)
2563	struct mount *mp;
2564{
2565	struct ufsmount *ump;
2566#ifdef INVARIANTS
2567	int i;
2568#endif
2569
2570	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
2571	    ("softdep_unmount called on non-softdep filesystem"));
2572	ump = VFSTOUFS(mp);
2573	MNT_ILOCK(mp);
2574	mp->mnt_flag &= ~MNT_SOFTDEP;
2575	if (MOUNTEDSUJ(mp) == 0) {
2576		MNT_IUNLOCK(mp);
2577	} else {
2578		mp->mnt_flag &= ~MNT_SUJ;
2579		MNT_IUNLOCK(mp);
2580		journal_unmount(ump);
2581	}
2582	/*
2583	 * Shut down our flushing thread. Check for NULL is if
2584	 * softdep_mount errors out before the thread has been created.
2585	 */
2586	if (ump->softdep_flushtd != NULL) {
2587		ACQUIRE_LOCK(ump);
2588		ump->softdep_flags |= FLUSH_EXIT;
2589		wakeup(&ump->softdep_flushtd);
2590		msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM | PDROP,
2591		    "sdwait", 0);
2592		KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0,
2593		    ("Thread shutdown failed"));
2594	}
2595	/*
2596	 * Free up our resources.
2597	 */
2598	ACQUIRE_GBLLOCK(&lk);
2599	TAILQ_REMOVE(&softdepmounts, ump->um_softdep, sd_next);
2600	FREE_GBLLOCK(&lk);
2601	rw_destroy(LOCK_PTR(ump));
2602	hashdestroy(ump->pagedep_hashtbl, M_PAGEDEP, ump->pagedep_hash_size);
2603	hashdestroy(ump->inodedep_hashtbl, M_INODEDEP, ump->inodedep_hash_size);
2604	hashdestroy(ump->newblk_hashtbl, M_NEWBLK, ump->newblk_hash_size);
2605	hashdestroy(ump->bmsafemap_hashtbl, M_BMSAFEMAP,
2606	    ump->bmsafemap_hash_size);
2607	free(ump->indir_hashtbl, M_FREEWORK);
2608#ifdef INVARIANTS
2609	for (i = 0; i <= D_LAST; i++)
2610		KASSERT(ump->softdep_curdeps[i] == 0,
2611		    ("Unmount %s: Dep type %s != 0 (%ld)", ump->um_fs->fs_fsmnt,
2612		    TYPENAME(i), ump->softdep_curdeps[i]));
2613#endif
2614	free(ump->um_softdep, M_MOUNTDATA);
2615}
2616
2617static struct jblocks *
2618jblocks_create(void)
2619{
2620	struct jblocks *jblocks;
2621
2622	jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2623	TAILQ_INIT(&jblocks->jb_segs);
2624	jblocks->jb_avail = 10;
2625	jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2626	    M_JBLOCKS, M_WAITOK | M_ZERO);
2627
2628	return (jblocks);
2629}
2630
2631static ufs2_daddr_t
2632jblocks_alloc(jblocks, bytes, actual)
2633	struct jblocks *jblocks;
2634	int bytes;
2635	int *actual;
2636{
2637	ufs2_daddr_t daddr;
2638	struct jextent *jext;
2639	int freecnt;
2640	int blocks;
2641
2642	blocks = bytes / DEV_BSIZE;
2643	jext = &jblocks->jb_extent[jblocks->jb_head];
2644	freecnt = jext->je_blocks - jblocks->jb_off;
2645	if (freecnt == 0) {
2646		jblocks->jb_off = 0;
2647		if (++jblocks->jb_head > jblocks->jb_used)
2648			jblocks->jb_head = 0;
2649		jext = &jblocks->jb_extent[jblocks->jb_head];
2650		freecnt = jext->je_blocks;
2651	}
2652	if (freecnt > blocks)
2653		freecnt = blocks;
2654	*actual = freecnt * DEV_BSIZE;
2655	daddr = jext->je_daddr + jblocks->jb_off;
2656	jblocks->jb_off += freecnt;
2657	jblocks->jb_free -= freecnt;
2658
2659	return (daddr);
2660}
2661
2662static void
2663jblocks_free(jblocks, mp, bytes)
2664	struct jblocks *jblocks;
2665	struct mount *mp;
2666	int bytes;
2667{
2668
2669	LOCK_OWNED(VFSTOUFS(mp));
2670	jblocks->jb_free += bytes / DEV_BSIZE;
2671	if (jblocks->jb_suspended)
2672		worklist_speedup(mp);
2673	wakeup(jblocks);
2674}
2675
2676static void
2677jblocks_destroy(jblocks)
2678	struct jblocks *jblocks;
2679{
2680
2681	if (jblocks->jb_extent)
2682		free(jblocks->jb_extent, M_JBLOCKS);
2683	free(jblocks, M_JBLOCKS);
2684}
2685
2686static void
2687jblocks_add(jblocks, daddr, blocks)
2688	struct jblocks *jblocks;
2689	ufs2_daddr_t daddr;
2690	int blocks;
2691{
2692	struct jextent *jext;
2693
2694	jblocks->jb_blocks += blocks;
2695	jblocks->jb_free += blocks;
2696	jext = &jblocks->jb_extent[jblocks->jb_used];
2697	/* Adding the first block. */
2698	if (jext->je_daddr == 0) {
2699		jext->je_daddr = daddr;
2700		jext->je_blocks = blocks;
2701		return;
2702	}
2703	/* Extending the last extent. */
2704	if (jext->je_daddr + jext->je_blocks == daddr) {
2705		jext->je_blocks += blocks;
2706		return;
2707	}
2708	/* Adding a new extent. */
2709	if (++jblocks->jb_used == jblocks->jb_avail) {
2710		jblocks->jb_avail *= 2;
2711		jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2712		    M_JBLOCKS, M_WAITOK | M_ZERO);
2713		memcpy(jext, jblocks->jb_extent,
2714		    sizeof(struct jextent) * jblocks->jb_used);
2715		free(jblocks->jb_extent, M_JBLOCKS);
2716		jblocks->jb_extent = jext;
2717	}
2718	jext = &jblocks->jb_extent[jblocks->jb_used];
2719	jext->je_daddr = daddr;
2720	jext->je_blocks = blocks;
2721	return;
2722}
2723
2724int
2725softdep_journal_lookup(mp, vpp)
2726	struct mount *mp;
2727	struct vnode **vpp;
2728{
2729	struct componentname cnp;
2730	struct vnode *dvp;
2731	ino_t sujournal;
2732	int error;
2733
2734	error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp);
2735	if (error)
2736		return (error);
2737	bzero(&cnp, sizeof(cnp));
2738	cnp.cn_nameiop = LOOKUP;
2739	cnp.cn_flags = ISLASTCN;
2740	cnp.cn_thread = curthread;
2741	cnp.cn_cred = curthread->td_ucred;
2742	cnp.cn_pnbuf = SUJ_FILE;
2743	cnp.cn_nameptr = SUJ_FILE;
2744	cnp.cn_namelen = strlen(SUJ_FILE);
2745	error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2746	vput(dvp);
2747	if (error != 0)
2748		return (error);
2749	error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2750	return (error);
2751}
2752
2753/*
2754 * Open and verify the journal file.
2755 */
2756static int
2757journal_mount(mp, fs, cred)
2758	struct mount *mp;
2759	struct fs *fs;
2760	struct ucred *cred;
2761{
2762	struct jblocks *jblocks;
2763	struct ufsmount *ump;
2764	struct vnode *vp;
2765	struct inode *ip;
2766	ufs2_daddr_t blkno;
2767	int bcount;
2768	int error;
2769	int i;
2770
2771	ump = VFSTOUFS(mp);
2772	ump->softdep_journal_tail = NULL;
2773	ump->softdep_on_journal = 0;
2774	ump->softdep_accdeps = 0;
2775	ump->softdep_req = 0;
2776	ump->softdep_jblocks = NULL;
2777	error = softdep_journal_lookup(mp, &vp);
2778	if (error != 0) {
2779		printf("Failed to find journal.  Use tunefs to create one\n");
2780		return (error);
2781	}
2782	ip = VTOI(vp);
2783	if (ip->i_size < SUJ_MIN) {
2784		error = ENOSPC;
2785		goto out;
2786	}
2787	bcount = lblkno(fs, ip->i_size);	/* Only use whole blocks. */
2788	jblocks = jblocks_create();
2789	for (i = 0; i < bcount; i++) {
2790		error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2791		if (error)
2792			break;
2793		jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2794	}
2795	if (error) {
2796		jblocks_destroy(jblocks);
2797		goto out;
2798	}
2799	jblocks->jb_low = jblocks->jb_free / 3;	/* Reserve 33%. */
2800	jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2801	ump->softdep_jblocks = jblocks;
2802out:
2803	if (error == 0) {
2804		MNT_ILOCK(mp);
2805		mp->mnt_flag |= MNT_SUJ;
2806		mp->mnt_flag &= ~MNT_SOFTDEP;
2807		MNT_IUNLOCK(mp);
2808		/*
2809		 * Only validate the journal contents if the
2810		 * filesystem is clean, otherwise we write the logs
2811		 * but they'll never be used.  If the filesystem was
2812		 * still dirty when we mounted it the journal is
2813		 * invalid and a new journal can only be valid if it
2814		 * starts from a clean mount.
2815		 */
2816		if (fs->fs_clean) {
2817			DIP_SET(ip, i_modrev, fs->fs_mtime);
2818			ip->i_flags |= IN_MODIFIED;
2819			ffs_update(vp, 1);
2820		}
2821	}
2822	vput(vp);
2823	return (error);
2824}
2825
2826static void
2827journal_unmount(ump)
2828	struct ufsmount *ump;
2829{
2830
2831	if (ump->softdep_jblocks)
2832		jblocks_destroy(ump->softdep_jblocks);
2833	ump->softdep_jblocks = NULL;
2834}
2835
2836/*
2837 * Called when a journal record is ready to be written.  Space is allocated
2838 * and the journal entry is created when the journal is flushed to stable
2839 * store.
2840 */
2841static void
2842add_to_journal(wk)
2843	struct worklist *wk;
2844{
2845	struct ufsmount *ump;
2846
2847	ump = VFSTOUFS(wk->wk_mp);
2848	LOCK_OWNED(ump);
2849	if (wk->wk_state & ONWORKLIST)
2850		panic("add_to_journal: %s(0x%X) already on list",
2851		    TYPENAME(wk->wk_type), wk->wk_state);
2852	wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2853	if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2854		ump->softdep_jblocks->jb_age = ticks;
2855		LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2856	} else
2857		LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2858	ump->softdep_journal_tail = wk;
2859	ump->softdep_on_journal += 1;
2860}
2861
2862/*
2863 * Remove an arbitrary item for the journal worklist maintain the tail
2864 * pointer.  This happens when a new operation obviates the need to
2865 * journal an old operation.
2866 */
2867static void
2868remove_from_journal(wk)
2869	struct worklist *wk;
2870{
2871	struct ufsmount *ump;
2872
2873	ump = VFSTOUFS(wk->wk_mp);
2874	LOCK_OWNED(ump);
2875#ifdef SUJ_DEBUG
2876	{
2877		struct worklist *wkn;
2878
2879		LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
2880			if (wkn == wk)
2881				break;
2882		if (wkn == NULL)
2883			panic("remove_from_journal: %p is not in journal", wk);
2884	}
2885#endif
2886	/*
2887	 * We emulate a TAILQ to save space in most structures which do not
2888	 * require TAILQ semantics.  Here we must update the tail position
2889	 * when removing the tail which is not the final entry. This works
2890	 * only if the worklist linkage are at the beginning of the structure.
2891	 */
2892	if (ump->softdep_journal_tail == wk)
2893		ump->softdep_journal_tail =
2894		    (struct worklist *)wk->wk_list.le_prev;
2895
2896	WORKLIST_REMOVE(wk);
2897	ump->softdep_on_journal -= 1;
2898}
2899
2900/*
2901 * Check for journal space as well as dependency limits so the prelink
2902 * code can throttle both journaled and non-journaled filesystems.
2903 * Threshold is 0 for low and 1 for min.
2904 */
2905static int
2906journal_space(ump, thresh)
2907	struct ufsmount *ump;
2908	int thresh;
2909{
2910	struct jblocks *jblocks;
2911	int limit, avail;
2912
2913	jblocks = ump->softdep_jblocks;
2914	if (jblocks == NULL)
2915		return (1);
2916	/*
2917	 * We use a tighter restriction here to prevent request_cleanup()
2918	 * running in threads from running into locks we currently hold.
2919	 * We have to be over the limit and our filesystem has to be
2920	 * responsible for more than our share of that usage.
2921	 */
2922	limit = (max_softdeps / 10) * 9;
2923	if (dep_current[D_INODEDEP] > limit &&
2924	    ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads)
2925		return (0);
2926	if (thresh)
2927		thresh = jblocks->jb_min;
2928	else
2929		thresh = jblocks->jb_low;
2930	avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
2931	avail = jblocks->jb_free - avail;
2932
2933	return (avail > thresh);
2934}
2935
2936static void
2937journal_suspend(ump)
2938	struct ufsmount *ump;
2939{
2940	struct jblocks *jblocks;
2941	struct mount *mp;
2942
2943	mp = UFSTOVFS(ump);
2944	jblocks = ump->softdep_jblocks;
2945	MNT_ILOCK(mp);
2946	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
2947		stat_journal_min++;
2948		mp->mnt_kern_flag |= MNTK_SUSPEND;
2949		mp->mnt_susp_owner = ump->softdep_flushtd;
2950	}
2951	jblocks->jb_suspended = 1;
2952	MNT_IUNLOCK(mp);
2953}
2954
2955static int
2956journal_unsuspend(struct ufsmount *ump)
2957{
2958	struct jblocks *jblocks;
2959	struct mount *mp;
2960
2961	mp = UFSTOVFS(ump);
2962	jblocks = ump->softdep_jblocks;
2963
2964	if (jblocks != NULL && jblocks->jb_suspended &&
2965	    journal_space(ump, jblocks->jb_min)) {
2966		jblocks->jb_suspended = 0;
2967		FREE_LOCK(ump);
2968		mp->mnt_susp_owner = curthread;
2969		vfs_write_resume(mp, 0);
2970		ACQUIRE_LOCK(ump);
2971		return (1);
2972	}
2973	return (0);
2974}
2975
2976/*
2977 * Called before any allocation function to be certain that there is
2978 * sufficient space in the journal prior to creating any new records.
2979 * Since in the case of block allocation we may have multiple locked
2980 * buffers at the time of the actual allocation we can not block
2981 * when the journal records are created.  Doing so would create a deadlock
2982 * if any of these buffers needed to be flushed to reclaim space.  Instead
2983 * we require a sufficiently large amount of available space such that
2984 * each thread in the system could have passed this allocation check and
2985 * still have sufficient free space.  With 20% of a minimum journal size
2986 * of 1MB we have 6553 records available.
2987 */
2988int
2989softdep_prealloc(vp, waitok)
2990	struct vnode *vp;
2991	int waitok;
2992{
2993	struct ufsmount *ump;
2994
2995	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
2996	    ("softdep_prealloc called on non-softdep filesystem"));
2997	/*
2998	 * Nothing to do if we are not running journaled soft updates.
2999	 * If we currently hold the snapshot lock, we must avoid handling
3000	 * other resources that could cause deadlock.
3001	 */
3002	if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)))
3003		return (0);
3004	ump = VFSTOUFS(vp->v_mount);
3005	ACQUIRE_LOCK(ump);
3006	if (journal_space(ump, 0)) {
3007		FREE_LOCK(ump);
3008		return (0);
3009	}
3010	stat_journal_low++;
3011	FREE_LOCK(ump);
3012	if (waitok == MNT_NOWAIT)
3013		return (ENOSPC);
3014	/*
3015	 * Attempt to sync this vnode once to flush any journal
3016	 * work attached to it.
3017	 */
3018	if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
3019		ffs_syncvnode(vp, waitok, 0);
3020	ACQUIRE_LOCK(ump);
3021	process_removes(vp);
3022	process_truncates(vp);
3023	if (journal_space(ump, 0) == 0) {
3024		softdep_speedup(ump);
3025		if (journal_space(ump, 1) == 0)
3026			journal_suspend(ump);
3027	}
3028	FREE_LOCK(ump);
3029
3030	return (0);
3031}
3032
3033/*
3034 * Before adjusting a link count on a vnode verify that we have sufficient
3035 * journal space.  If not, process operations that depend on the currently
3036 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
3037 * and softdep flush threads can not acquire these locks to reclaim space.
3038 */
3039static void
3040softdep_prelink(dvp, vp)
3041	struct vnode *dvp;
3042	struct vnode *vp;
3043{
3044	struct ufsmount *ump;
3045
3046	ump = VFSTOUFS(dvp->v_mount);
3047	LOCK_OWNED(ump);
3048	/*
3049	 * Nothing to do if we have sufficient journal space.
3050	 * If we currently hold the snapshot lock, we must avoid
3051	 * handling other resources that could cause deadlock.
3052	 */
3053	if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp))))
3054		return;
3055	stat_journal_low++;
3056	FREE_LOCK(ump);
3057	if (vp)
3058		ffs_syncvnode(vp, MNT_NOWAIT, 0);
3059	ffs_syncvnode(dvp, MNT_WAIT, 0);
3060	ACQUIRE_LOCK(ump);
3061	/* Process vp before dvp as it may create .. removes. */
3062	if (vp) {
3063		process_removes(vp);
3064		process_truncates(vp);
3065	}
3066	process_removes(dvp);
3067	process_truncates(dvp);
3068	softdep_speedup(ump);
3069	process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
3070	if (journal_space(ump, 0) == 0) {
3071		softdep_speedup(ump);
3072		if (journal_space(ump, 1) == 0)
3073			journal_suspend(ump);
3074	}
3075}
3076
3077static void
3078jseg_write(ump, jseg, data)
3079	struct ufsmount *ump;
3080	struct jseg *jseg;
3081	uint8_t *data;
3082{
3083	struct jsegrec *rec;
3084
3085	rec = (struct jsegrec *)data;
3086	rec->jsr_seq = jseg->js_seq;
3087	rec->jsr_oldest = jseg->js_oldseq;
3088	rec->jsr_cnt = jseg->js_cnt;
3089	rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
3090	rec->jsr_crc = 0;
3091	rec->jsr_time = ump->um_fs->fs_mtime;
3092}
3093
3094static inline void
3095inoref_write(inoref, jseg, rec)
3096	struct inoref *inoref;
3097	struct jseg *jseg;
3098	struct jrefrec *rec;
3099{
3100
3101	inoref->if_jsegdep->jd_seg = jseg;
3102	rec->jr_ino = inoref->if_ino;
3103	rec->jr_parent = inoref->if_parent;
3104	rec->jr_nlink = inoref->if_nlink;
3105	rec->jr_mode = inoref->if_mode;
3106	rec->jr_diroff = inoref->if_diroff;
3107}
3108
3109static void
3110jaddref_write(jaddref, jseg, data)
3111	struct jaddref *jaddref;
3112	struct jseg *jseg;
3113	uint8_t *data;
3114{
3115	struct jrefrec *rec;
3116
3117	rec = (struct jrefrec *)data;
3118	rec->jr_op = JOP_ADDREF;
3119	inoref_write(&jaddref->ja_ref, jseg, rec);
3120}
3121
3122static void
3123jremref_write(jremref, jseg, data)
3124	struct jremref *jremref;
3125	struct jseg *jseg;
3126	uint8_t *data;
3127{
3128	struct jrefrec *rec;
3129
3130	rec = (struct jrefrec *)data;
3131	rec->jr_op = JOP_REMREF;
3132	inoref_write(&jremref->jr_ref, jseg, rec);
3133}
3134
3135static void
3136jmvref_write(jmvref, jseg, data)
3137	struct jmvref *jmvref;
3138	struct jseg *jseg;
3139	uint8_t *data;
3140{
3141	struct jmvrec *rec;
3142
3143	rec = (struct jmvrec *)data;
3144	rec->jm_op = JOP_MVREF;
3145	rec->jm_ino = jmvref->jm_ino;
3146	rec->jm_parent = jmvref->jm_parent;
3147	rec->jm_oldoff = jmvref->jm_oldoff;
3148	rec->jm_newoff = jmvref->jm_newoff;
3149}
3150
3151static void
3152jnewblk_write(jnewblk, jseg, data)
3153	struct jnewblk *jnewblk;
3154	struct jseg *jseg;
3155	uint8_t *data;
3156{
3157	struct jblkrec *rec;
3158
3159	jnewblk->jn_jsegdep->jd_seg = jseg;
3160	rec = (struct jblkrec *)data;
3161	rec->jb_op = JOP_NEWBLK;
3162	rec->jb_ino = jnewblk->jn_ino;
3163	rec->jb_blkno = jnewblk->jn_blkno;
3164	rec->jb_lbn = jnewblk->jn_lbn;
3165	rec->jb_frags = jnewblk->jn_frags;
3166	rec->jb_oldfrags = jnewblk->jn_oldfrags;
3167}
3168
3169static void
3170jfreeblk_write(jfreeblk, jseg, data)
3171	struct jfreeblk *jfreeblk;
3172	struct jseg *jseg;
3173	uint8_t *data;
3174{
3175	struct jblkrec *rec;
3176
3177	jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3178	rec = (struct jblkrec *)data;
3179	rec->jb_op = JOP_FREEBLK;
3180	rec->jb_ino = jfreeblk->jf_ino;
3181	rec->jb_blkno = jfreeblk->jf_blkno;
3182	rec->jb_lbn = jfreeblk->jf_lbn;
3183	rec->jb_frags = jfreeblk->jf_frags;
3184	rec->jb_oldfrags = 0;
3185}
3186
3187static void
3188jfreefrag_write(jfreefrag, jseg, data)
3189	struct jfreefrag *jfreefrag;
3190	struct jseg *jseg;
3191	uint8_t *data;
3192{
3193	struct jblkrec *rec;
3194
3195	jfreefrag->fr_jsegdep->jd_seg = jseg;
3196	rec = (struct jblkrec *)data;
3197	rec->jb_op = JOP_FREEBLK;
3198	rec->jb_ino = jfreefrag->fr_ino;
3199	rec->jb_blkno = jfreefrag->fr_blkno;
3200	rec->jb_lbn = jfreefrag->fr_lbn;
3201	rec->jb_frags = jfreefrag->fr_frags;
3202	rec->jb_oldfrags = 0;
3203}
3204
3205static void
3206jtrunc_write(jtrunc, jseg, data)
3207	struct jtrunc *jtrunc;
3208	struct jseg *jseg;
3209	uint8_t *data;
3210{
3211	struct jtrncrec *rec;
3212
3213	jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3214	rec = (struct jtrncrec *)data;
3215	rec->jt_op = JOP_TRUNC;
3216	rec->jt_ino = jtrunc->jt_ino;
3217	rec->jt_size = jtrunc->jt_size;
3218	rec->jt_extsize = jtrunc->jt_extsize;
3219}
3220
3221static void
3222jfsync_write(jfsync, jseg, data)
3223	struct jfsync *jfsync;
3224	struct jseg *jseg;
3225	uint8_t *data;
3226{
3227	struct jtrncrec *rec;
3228
3229	rec = (struct jtrncrec *)data;
3230	rec->jt_op = JOP_SYNC;
3231	rec->jt_ino = jfsync->jfs_ino;
3232	rec->jt_size = jfsync->jfs_size;
3233	rec->jt_extsize = jfsync->jfs_extsize;
3234}
3235
3236static void
3237softdep_flushjournal(mp)
3238	struct mount *mp;
3239{
3240	struct jblocks *jblocks;
3241	struct ufsmount *ump;
3242
3243	if (MOUNTEDSUJ(mp) == 0)
3244		return;
3245	ump = VFSTOUFS(mp);
3246	jblocks = ump->softdep_jblocks;
3247	ACQUIRE_LOCK(ump);
3248	while (ump->softdep_on_journal) {
3249		jblocks->jb_needseg = 1;
3250		softdep_process_journal(mp, NULL, MNT_WAIT);
3251	}
3252	FREE_LOCK(ump);
3253}
3254
3255static void softdep_synchronize_completed(struct bio *);
3256static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3257
3258static void
3259softdep_synchronize_completed(bp)
3260        struct bio *bp;
3261{
3262	struct jseg *oldest;
3263	struct jseg *jseg;
3264	struct ufsmount *ump;
3265
3266	/*
3267	 * caller1 marks the last segment written before we issued the
3268	 * synchronize cache.
3269	 */
3270	jseg = bp->bio_caller1;
3271	if (jseg == NULL) {
3272		g_destroy_bio(bp);
3273		return;
3274	}
3275	ump = VFSTOUFS(jseg->js_list.wk_mp);
3276	ACQUIRE_LOCK(ump);
3277	oldest = NULL;
3278	/*
3279	 * Mark all the journal entries waiting on the synchronize cache
3280	 * as completed so they may continue on.
3281	 */
3282	while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3283		jseg->js_state |= COMPLETE;
3284		oldest = jseg;
3285		jseg = TAILQ_PREV(jseg, jseglst, js_next);
3286	}
3287	/*
3288	 * Restart deferred journal entry processing from the oldest
3289	 * completed jseg.
3290	 */
3291	if (oldest)
3292		complete_jsegs(oldest);
3293
3294	FREE_LOCK(ump);
3295	g_destroy_bio(bp);
3296}
3297
3298/*
3299 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3300 * barriers.  The journal must be written prior to any blocks that depend
3301 * on it and the journal can not be released until the blocks have be
3302 * written.  This code handles both barriers simultaneously.
3303 */
3304static void
3305softdep_synchronize(bp, ump, caller1)
3306	struct bio *bp;
3307	struct ufsmount *ump;
3308	void *caller1;
3309{
3310
3311	bp->bio_cmd = BIO_FLUSH;
3312	bp->bio_flags |= BIO_ORDERED;
3313	bp->bio_data = NULL;
3314	bp->bio_offset = ump->um_cp->provider->mediasize;
3315	bp->bio_length = 0;
3316	bp->bio_done = softdep_synchronize_completed;
3317	bp->bio_caller1 = caller1;
3318	g_io_request(bp,
3319	    (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private);
3320}
3321
3322/*
3323 * Flush some journal records to disk.
3324 */
3325static void
3326softdep_process_journal(mp, needwk, flags)
3327	struct mount *mp;
3328	struct worklist *needwk;
3329	int flags;
3330{
3331	struct jblocks *jblocks;
3332	struct ufsmount *ump;
3333	struct worklist *wk;
3334	struct jseg *jseg;
3335	struct buf *bp;
3336	struct bio *bio;
3337	uint8_t *data;
3338	struct fs *fs;
3339	int shouldflush;
3340	int segwritten;
3341	int jrecmin;	/* Minimum records per block. */
3342	int jrecmax;	/* Maximum records per block. */
3343	int size;
3344	int cnt;
3345	int off;
3346	int devbsize;
3347
3348	if (MOUNTEDSUJ(mp) == 0)
3349		return;
3350	shouldflush = softdep_flushcache;
3351	bio = NULL;
3352	jseg = NULL;
3353	ump = VFSTOUFS(mp);
3354	LOCK_OWNED(ump);
3355	fs = ump->um_fs;
3356	jblocks = ump->softdep_jblocks;
3357	devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3358	/*
3359	 * We write anywhere between a disk block and fs block.  The upper
3360	 * bound is picked to prevent buffer cache fragmentation and limit
3361	 * processing time per I/O.
3362	 */
3363	jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3364	jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3365	segwritten = 0;
3366	for (;;) {
3367		cnt = ump->softdep_on_journal;
3368		/*
3369		 * Criteria for writing a segment:
3370		 * 1) We have a full block.
3371		 * 2) We're called from jwait() and haven't found the
3372		 *    journal item yet.
3373		 * 3) Always write if needseg is set.
3374		 * 4) If we are called from process_worklist and have
3375		 *    not yet written anything we write a partial block
3376		 *    to enforce a 1 second maximum latency on journal
3377		 *    entries.
3378		 */
3379		if (cnt < (jrecmax - 1) && needwk == NULL &&
3380		    jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3381			break;
3382		cnt++;
3383		/*
3384		 * Verify some free journal space.  softdep_prealloc() should
3385		 * guarantee that we don't run out so this is indicative of
3386		 * a problem with the flow control.  Try to recover
3387		 * gracefully in any event.
3388		 */
3389		while (jblocks->jb_free == 0) {
3390			if (flags != MNT_WAIT)
3391				break;
3392			printf("softdep: Out of journal space!\n");
3393			softdep_speedup(ump);
3394			msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz);
3395		}
3396		FREE_LOCK(ump);
3397		jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3398		workitem_alloc(&jseg->js_list, D_JSEG, mp);
3399		LIST_INIT(&jseg->js_entries);
3400		LIST_INIT(&jseg->js_indirs);
3401		jseg->js_state = ATTACHED;
3402		if (shouldflush == 0)
3403			jseg->js_state |= COMPLETE;
3404		else if (bio == NULL)
3405			bio = g_alloc_bio();
3406		jseg->js_jblocks = jblocks;
3407		bp = geteblk(fs->fs_bsize, 0);
3408		ACQUIRE_LOCK(ump);
3409		/*
3410		 * If there was a race while we were allocating the block
3411		 * and jseg the entry we care about was likely written.
3412		 * We bail out in both the WAIT and NOWAIT case and assume
3413		 * the caller will loop if the entry it cares about is
3414		 * not written.
3415		 */
3416		cnt = ump->softdep_on_journal;
3417		if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3418			bp->b_flags |= B_INVAL | B_NOCACHE;
3419			WORKITEM_FREE(jseg, D_JSEG);
3420			FREE_LOCK(ump);
3421			brelse(bp);
3422			ACQUIRE_LOCK(ump);
3423			break;
3424		}
3425		/*
3426		 * Calculate the disk block size required for the available
3427		 * records rounded to the min size.
3428		 */
3429		if (cnt == 0)
3430			size = devbsize;
3431		else if (cnt < jrecmax)
3432			size = howmany(cnt, jrecmin) * devbsize;
3433		else
3434			size = fs->fs_bsize;
3435		/*
3436		 * Allocate a disk block for this journal data and account
3437		 * for truncation of the requested size if enough contiguous
3438		 * space was not available.
3439		 */
3440		bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3441		bp->b_lblkno = bp->b_blkno;
3442		bp->b_offset = bp->b_blkno * DEV_BSIZE;
3443		bp->b_bcount = size;
3444		bp->b_flags &= ~B_INVAL;
3445		bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3446		/*
3447		 * Initialize our jseg with cnt records.  Assign the next
3448		 * sequence number to it and link it in-order.
3449		 */
3450		cnt = MIN(cnt, (size / devbsize) * jrecmin);
3451		jseg->js_buf = bp;
3452		jseg->js_cnt = cnt;
3453		jseg->js_refs = cnt + 1;	/* Self ref. */
3454		jseg->js_size = size;
3455		jseg->js_seq = jblocks->jb_nextseq++;
3456		if (jblocks->jb_oldestseg == NULL)
3457			jblocks->jb_oldestseg = jseg;
3458		jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3459		TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3460		if (jblocks->jb_writeseg == NULL)
3461			jblocks->jb_writeseg = jseg;
3462		/*
3463		 * Start filling in records from the pending list.
3464		 */
3465		data = bp->b_data;
3466		off = 0;
3467
3468		/*
3469		 * Always put a header on the first block.
3470		 * XXX As with below, there might not be a chance to get
3471		 * into the loop.  Ensure that something valid is written.
3472		 */
3473		jseg_write(ump, jseg, data);
3474		off += JREC_SIZE;
3475		data = bp->b_data + off;
3476
3477		/*
3478		 * XXX Something is wrong here.  There's no work to do,
3479		 * but we need to perform and I/O and allow it to complete
3480		 * anyways.
3481		 */
3482		if (LIST_EMPTY(&ump->softdep_journal_pending))
3483			stat_emptyjblocks++;
3484
3485		while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3486		    != NULL) {
3487			if (cnt == 0)
3488				break;
3489			/* Place a segment header on every device block. */
3490			if ((off % devbsize) == 0) {
3491				jseg_write(ump, jseg, data);
3492				off += JREC_SIZE;
3493				data = bp->b_data + off;
3494			}
3495			if (wk == needwk)
3496				needwk = NULL;
3497			remove_from_journal(wk);
3498			wk->wk_state |= INPROGRESS;
3499			WORKLIST_INSERT(&jseg->js_entries, wk);
3500			switch (wk->wk_type) {
3501			case D_JADDREF:
3502				jaddref_write(WK_JADDREF(wk), jseg, data);
3503				break;
3504			case D_JREMREF:
3505				jremref_write(WK_JREMREF(wk), jseg, data);
3506				break;
3507			case D_JMVREF:
3508				jmvref_write(WK_JMVREF(wk), jseg, data);
3509				break;
3510			case D_JNEWBLK:
3511				jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3512				break;
3513			case D_JFREEBLK:
3514				jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3515				break;
3516			case D_JFREEFRAG:
3517				jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3518				break;
3519			case D_JTRUNC:
3520				jtrunc_write(WK_JTRUNC(wk), jseg, data);
3521				break;
3522			case D_JFSYNC:
3523				jfsync_write(WK_JFSYNC(wk), jseg, data);
3524				break;
3525			default:
3526				panic("process_journal: Unknown type %s",
3527				    TYPENAME(wk->wk_type));
3528				/* NOTREACHED */
3529			}
3530			off += JREC_SIZE;
3531			data = bp->b_data + off;
3532			cnt--;
3533		}
3534
3535		/* Clear any remaining space so we don't leak kernel data */
3536		if (size > off)
3537			bzero(data, size - off);
3538
3539		/*
3540		 * Write this one buffer and continue.
3541		 */
3542		segwritten = 1;
3543		jblocks->jb_needseg = 0;
3544		WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3545		FREE_LOCK(ump);
3546		pbgetvp(ump->um_devvp, bp);
3547		/*
3548		 * We only do the blocking wait once we find the journal
3549		 * entry we're looking for.
3550		 */
3551		if (needwk == NULL && flags == MNT_WAIT)
3552			bwrite(bp);
3553		else
3554			bawrite(bp);
3555		ACQUIRE_LOCK(ump);
3556	}
3557	/*
3558	 * If we wrote a segment issue a synchronize cache so the journal
3559	 * is reflected on disk before the data is written.  Since reclaiming
3560	 * journal space also requires writing a journal record this
3561	 * process also enforces a barrier before reclamation.
3562	 */
3563	if (segwritten && shouldflush) {
3564		softdep_synchronize(bio, ump,
3565		    TAILQ_LAST(&jblocks->jb_segs, jseglst));
3566	} else if (bio)
3567		g_destroy_bio(bio);
3568	/*
3569	 * If we've suspended the filesystem because we ran out of journal
3570	 * space either try to sync it here to make some progress or
3571	 * unsuspend it if we already have.
3572	 */
3573	if (flags == 0 && jblocks->jb_suspended) {
3574		if (journal_unsuspend(ump))
3575			return;
3576		FREE_LOCK(ump);
3577		VFS_SYNC(mp, MNT_NOWAIT);
3578		ffs_sbupdate(ump, MNT_WAIT, 0);
3579		ACQUIRE_LOCK(ump);
3580	}
3581}
3582
3583/*
3584 * Complete a jseg, allowing all dependencies awaiting journal writes
3585 * to proceed.  Each journal dependency also attaches a jsegdep to dependent
3586 * structures so that the journal segment can be freed to reclaim space.
3587 */
3588static void
3589complete_jseg(jseg)
3590	struct jseg *jseg;
3591{
3592	struct worklist *wk;
3593	struct jmvref *jmvref;
3594	int waiting;
3595#ifdef INVARIANTS
3596	int i = 0;
3597#endif
3598
3599	while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3600		WORKLIST_REMOVE(wk);
3601		waiting = wk->wk_state & IOWAITING;
3602		wk->wk_state &= ~(INPROGRESS | IOWAITING);
3603		wk->wk_state |= COMPLETE;
3604		KASSERT(i++ < jseg->js_cnt,
3605		    ("handle_written_jseg: overflow %d >= %d",
3606		    i - 1, jseg->js_cnt));
3607		switch (wk->wk_type) {
3608		case D_JADDREF:
3609			handle_written_jaddref(WK_JADDREF(wk));
3610			break;
3611		case D_JREMREF:
3612			handle_written_jremref(WK_JREMREF(wk));
3613			break;
3614		case D_JMVREF:
3615			rele_jseg(jseg);	/* No jsegdep. */
3616			jmvref = WK_JMVREF(wk);
3617			LIST_REMOVE(jmvref, jm_deps);
3618			if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3619				free_pagedep(jmvref->jm_pagedep);
3620			WORKITEM_FREE(jmvref, D_JMVREF);
3621			break;
3622		case D_JNEWBLK:
3623			handle_written_jnewblk(WK_JNEWBLK(wk));
3624			break;
3625		case D_JFREEBLK:
3626			handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3627			break;
3628		case D_JTRUNC:
3629			handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3630			break;
3631		case D_JFSYNC:
3632			rele_jseg(jseg);	/* No jsegdep. */
3633			WORKITEM_FREE(wk, D_JFSYNC);
3634			break;
3635		case D_JFREEFRAG:
3636			handle_written_jfreefrag(WK_JFREEFRAG(wk));
3637			break;
3638		default:
3639			panic("handle_written_jseg: Unknown type %s",
3640			    TYPENAME(wk->wk_type));
3641			/* NOTREACHED */
3642		}
3643		if (waiting)
3644			wakeup(wk);
3645	}
3646	/* Release the self reference so the structure may be freed. */
3647	rele_jseg(jseg);
3648}
3649
3650/*
3651 * Determine which jsegs are ready for completion processing.  Waits for
3652 * synchronize cache to complete as well as forcing in-order completion
3653 * of journal entries.
3654 */
3655static void
3656complete_jsegs(jseg)
3657	struct jseg *jseg;
3658{
3659	struct jblocks *jblocks;
3660	struct jseg *jsegn;
3661
3662	jblocks = jseg->js_jblocks;
3663	/*
3664	 * Don't allow out of order completions.  If this isn't the first
3665	 * block wait for it to write before we're done.
3666	 */
3667	if (jseg != jblocks->jb_writeseg)
3668		return;
3669	/* Iterate through available jsegs processing their entries. */
3670	while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3671		jblocks->jb_oldestwrseq = jseg->js_oldseq;
3672		jsegn = TAILQ_NEXT(jseg, js_next);
3673		complete_jseg(jseg);
3674		jseg = jsegn;
3675	}
3676	jblocks->jb_writeseg = jseg;
3677	/*
3678	 * Attempt to free jsegs now that oldestwrseq may have advanced.
3679	 */
3680	free_jsegs(jblocks);
3681}
3682
3683/*
3684 * Mark a jseg as DEPCOMPLETE and throw away the buffer.  Attempt to handle
3685 * the final completions.
3686 */
3687static void
3688handle_written_jseg(jseg, bp)
3689	struct jseg *jseg;
3690	struct buf *bp;
3691{
3692
3693	if (jseg->js_refs == 0)
3694		panic("handle_written_jseg: No self-reference on %p", jseg);
3695	jseg->js_state |= DEPCOMPLETE;
3696	/*
3697	 * We'll never need this buffer again, set flags so it will be
3698	 * discarded.
3699	 */
3700	bp->b_flags |= B_INVAL | B_NOCACHE;
3701	pbrelvp(bp);
3702	complete_jsegs(jseg);
3703}
3704
3705static inline struct jsegdep *
3706inoref_jseg(inoref)
3707	struct inoref *inoref;
3708{
3709	struct jsegdep *jsegdep;
3710
3711	jsegdep = inoref->if_jsegdep;
3712	inoref->if_jsegdep = NULL;
3713
3714	return (jsegdep);
3715}
3716
3717/*
3718 * Called once a jremref has made it to stable store.  The jremref is marked
3719 * complete and we attempt to free it.  Any pagedeps writes sleeping waiting
3720 * for the jremref to complete will be awoken by free_jremref.
3721 */
3722static void
3723handle_written_jremref(jremref)
3724	struct jremref *jremref;
3725{
3726	struct inodedep *inodedep;
3727	struct jsegdep *jsegdep;
3728	struct dirrem *dirrem;
3729
3730	/* Grab the jsegdep. */
3731	jsegdep = inoref_jseg(&jremref->jr_ref);
3732	/*
3733	 * Remove us from the inoref list.
3734	 */
3735	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
3736	    0, &inodedep) == 0)
3737		panic("handle_written_jremref: Lost inodedep");
3738	TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
3739	/*
3740	 * Complete the dirrem.
3741	 */
3742	dirrem = jremref->jr_dirrem;
3743	jremref->jr_dirrem = NULL;
3744	LIST_REMOVE(jremref, jr_deps);
3745	jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
3746	jwork_insert(&dirrem->dm_jwork, jsegdep);
3747	if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
3748	    (dirrem->dm_state & COMPLETE) != 0)
3749		add_to_worklist(&dirrem->dm_list, 0);
3750	free_jremref(jremref);
3751}
3752
3753/*
3754 * Called once a jaddref has made it to stable store.  The dependency is
3755 * marked complete and any dependent structures are added to the inode
3756 * bufwait list to be completed as soon as it is written.  If a bitmap write
3757 * depends on this entry we move the inode into the inodedephd of the
3758 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
3759 */
3760static void
3761handle_written_jaddref(jaddref)
3762	struct jaddref *jaddref;
3763{
3764	struct jsegdep *jsegdep;
3765	struct inodedep *inodedep;
3766	struct diradd *diradd;
3767	struct mkdir *mkdir;
3768
3769	/* Grab the jsegdep. */
3770	jsegdep = inoref_jseg(&jaddref->ja_ref);
3771	mkdir = NULL;
3772	diradd = NULL;
3773	if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
3774	    0, &inodedep) == 0)
3775		panic("handle_written_jaddref: Lost inodedep.");
3776	if (jaddref->ja_diradd == NULL)
3777		panic("handle_written_jaddref: No dependency");
3778	if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
3779		diradd = jaddref->ja_diradd;
3780		WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
3781	} else if (jaddref->ja_state & MKDIR_PARENT) {
3782		mkdir = jaddref->ja_mkdir;
3783		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
3784	} else if (jaddref->ja_state & MKDIR_BODY)
3785		mkdir = jaddref->ja_mkdir;
3786	else
3787		panic("handle_written_jaddref: Unknown dependency %p",
3788		    jaddref->ja_diradd);
3789	jaddref->ja_diradd = NULL;	/* also clears ja_mkdir */
3790	/*
3791	 * Remove us from the inode list.
3792	 */
3793	TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
3794	/*
3795	 * The mkdir may be waiting on the jaddref to clear before freeing.
3796	 */
3797	if (mkdir) {
3798		KASSERT(mkdir->md_list.wk_type == D_MKDIR,
3799		    ("handle_written_jaddref: Incorrect type for mkdir %s",
3800		    TYPENAME(mkdir->md_list.wk_type)));
3801		mkdir->md_jaddref = NULL;
3802		diradd = mkdir->md_diradd;
3803		mkdir->md_state |= DEPCOMPLETE;
3804		complete_mkdir(mkdir);
3805	}
3806	jwork_insert(&diradd->da_jwork, jsegdep);
3807	if (jaddref->ja_state & NEWBLOCK) {
3808		inodedep->id_state |= ONDEPLIST;
3809		LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
3810		    inodedep, id_deps);
3811	}
3812	free_jaddref(jaddref);
3813}
3814
3815/*
3816 * Called once a jnewblk journal is written.  The allocdirect or allocindir
3817 * is placed in the bmsafemap to await notification of a written bitmap.  If
3818 * the operation was canceled we add the segdep to the appropriate
3819 * dependency to free the journal space once the canceling operation
3820 * completes.
3821 */
3822static void
3823handle_written_jnewblk(jnewblk)
3824	struct jnewblk *jnewblk;
3825{
3826	struct bmsafemap *bmsafemap;
3827	struct freefrag *freefrag;
3828	struct freework *freework;
3829	struct jsegdep *jsegdep;
3830	struct newblk *newblk;
3831
3832	/* Grab the jsegdep. */
3833	jsegdep = jnewblk->jn_jsegdep;
3834	jnewblk->jn_jsegdep = NULL;
3835	if (jnewblk->jn_dep == NULL)
3836		panic("handle_written_jnewblk: No dependency for the segdep.");
3837	switch (jnewblk->jn_dep->wk_type) {
3838	case D_NEWBLK:
3839	case D_ALLOCDIRECT:
3840	case D_ALLOCINDIR:
3841		/*
3842		 * Add the written block to the bmsafemap so it can
3843		 * be notified when the bitmap is on disk.
3844		 */
3845		newblk = WK_NEWBLK(jnewblk->jn_dep);
3846		newblk->nb_jnewblk = NULL;
3847		if ((newblk->nb_state & GOINGAWAY) == 0) {
3848			bmsafemap = newblk->nb_bmsafemap;
3849			newblk->nb_state |= ONDEPLIST;
3850			LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
3851			    nb_deps);
3852		}
3853		jwork_insert(&newblk->nb_jwork, jsegdep);
3854		break;
3855	case D_FREEFRAG:
3856		/*
3857		 * A newblock being removed by a freefrag when replaced by
3858		 * frag extension.
3859		 */
3860		freefrag = WK_FREEFRAG(jnewblk->jn_dep);
3861		freefrag->ff_jdep = NULL;
3862		jwork_insert(&freefrag->ff_jwork, jsegdep);
3863		break;
3864	case D_FREEWORK:
3865		/*
3866		 * A direct block was removed by truncate.
3867		 */
3868		freework = WK_FREEWORK(jnewblk->jn_dep);
3869		freework->fw_jnewblk = NULL;
3870		jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
3871		break;
3872	default:
3873		panic("handle_written_jnewblk: Unknown type %d.",
3874		    jnewblk->jn_dep->wk_type);
3875	}
3876	jnewblk->jn_dep = NULL;
3877	free_jnewblk(jnewblk);
3878}
3879
3880/*
3881 * Cancel a jfreefrag that won't be needed, probably due to colliding with
3882 * an in-flight allocation that has not yet been committed.  Divorce us
3883 * from the freefrag and mark it DEPCOMPLETE so that it may be added
3884 * to the worklist.
3885 */
3886static void
3887cancel_jfreefrag(jfreefrag)
3888	struct jfreefrag *jfreefrag;
3889{
3890	struct freefrag *freefrag;
3891
3892	if (jfreefrag->fr_jsegdep) {
3893		free_jsegdep(jfreefrag->fr_jsegdep);
3894		jfreefrag->fr_jsegdep = NULL;
3895	}
3896	freefrag = jfreefrag->fr_freefrag;
3897	jfreefrag->fr_freefrag = NULL;
3898	free_jfreefrag(jfreefrag);
3899	freefrag->ff_state |= DEPCOMPLETE;
3900	CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
3901}
3902
3903/*
3904 * Free a jfreefrag when the parent freefrag is rendered obsolete.
3905 */
3906static void
3907free_jfreefrag(jfreefrag)
3908	struct jfreefrag *jfreefrag;
3909{
3910
3911	if (jfreefrag->fr_state & INPROGRESS)
3912		WORKLIST_REMOVE(&jfreefrag->fr_list);
3913	else if (jfreefrag->fr_state & ONWORKLIST)
3914		remove_from_journal(&jfreefrag->fr_list);
3915	if (jfreefrag->fr_freefrag != NULL)
3916		panic("free_jfreefrag:  Still attached to a freefrag.");
3917	WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
3918}
3919
3920/*
3921 * Called when the journal write for a jfreefrag completes.  The parent
3922 * freefrag is added to the worklist if this completes its dependencies.
3923 */
3924static void
3925handle_written_jfreefrag(jfreefrag)
3926	struct jfreefrag *jfreefrag;
3927{
3928	struct jsegdep *jsegdep;
3929	struct freefrag *freefrag;
3930
3931	/* Grab the jsegdep. */
3932	jsegdep = jfreefrag->fr_jsegdep;
3933	jfreefrag->fr_jsegdep = NULL;
3934	freefrag = jfreefrag->fr_freefrag;
3935	if (freefrag == NULL)
3936		panic("handle_written_jfreefrag: No freefrag.");
3937	freefrag->ff_state |= DEPCOMPLETE;
3938	freefrag->ff_jdep = NULL;
3939	jwork_insert(&freefrag->ff_jwork, jsegdep);
3940	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
3941		add_to_worklist(&freefrag->ff_list, 0);
3942	jfreefrag->fr_freefrag = NULL;
3943	free_jfreefrag(jfreefrag);
3944}
3945
3946/*
3947 * Called when the journal write for a jfreeblk completes.  The jfreeblk
3948 * is removed from the freeblks list of pending journal writes and the
3949 * jsegdep is moved to the freeblks jwork to be completed when all blocks
3950 * have been reclaimed.
3951 */
3952static void
3953handle_written_jblkdep(jblkdep)
3954	struct jblkdep *jblkdep;
3955{
3956	struct freeblks *freeblks;
3957	struct jsegdep *jsegdep;
3958
3959	/* Grab the jsegdep. */
3960	jsegdep = jblkdep->jb_jsegdep;
3961	jblkdep->jb_jsegdep = NULL;
3962	freeblks = jblkdep->jb_freeblks;
3963	LIST_REMOVE(jblkdep, jb_deps);
3964	jwork_insert(&freeblks->fb_jwork, jsegdep);
3965	/*
3966	 * If the freeblks is all journaled, we can add it to the worklist.
3967	 */
3968	if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
3969	    (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
3970		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
3971
3972	free_jblkdep(jblkdep);
3973}
3974
3975static struct jsegdep *
3976newjsegdep(struct worklist *wk)
3977{
3978	struct jsegdep *jsegdep;
3979
3980	jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
3981	workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
3982	jsegdep->jd_seg = NULL;
3983
3984	return (jsegdep);
3985}
3986
3987static struct jmvref *
3988newjmvref(dp, ino, oldoff, newoff)
3989	struct inode *dp;
3990	ino_t ino;
3991	off_t oldoff;
3992	off_t newoff;
3993{
3994	struct jmvref *jmvref;
3995
3996	jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
3997	workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump));
3998	jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
3999	jmvref->jm_parent = dp->i_number;
4000	jmvref->jm_ino = ino;
4001	jmvref->jm_oldoff = oldoff;
4002	jmvref->jm_newoff = newoff;
4003
4004	return (jmvref);
4005}
4006
4007/*
4008 * Allocate a new jremref that tracks the removal of ip from dp with the
4009 * directory entry offset of diroff.  Mark the entry as ATTACHED and
4010 * DEPCOMPLETE as we have all the information required for the journal write
4011 * and the directory has already been removed from the buffer.  The caller
4012 * is responsible for linking the jremref into the pagedep and adding it
4013 * to the journal to write.  The MKDIR_PARENT flag is set if we're doing
4014 * a DOTDOT addition so handle_workitem_remove() can properly assign
4015 * the jsegdep when we're done.
4016 */
4017static struct jremref *
4018newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip,
4019    off_t diroff, nlink_t nlink)
4020{
4021	struct jremref *jremref;
4022
4023	jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
4024	workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump));
4025	jremref->jr_state = ATTACHED;
4026	newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
4027	   nlink, ip->i_mode);
4028	jremref->jr_dirrem = dirrem;
4029
4030	return (jremref);
4031}
4032
4033static inline void
4034newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff,
4035    nlink_t nlink, uint16_t mode)
4036{
4037
4038	inoref->if_jsegdep = newjsegdep(&inoref->if_list);
4039	inoref->if_diroff = diroff;
4040	inoref->if_ino = ino;
4041	inoref->if_parent = parent;
4042	inoref->if_nlink = nlink;
4043	inoref->if_mode = mode;
4044}
4045
4046/*
4047 * Allocate a new jaddref to track the addition of ino to dp at diroff.  The
4048 * directory offset may not be known until later.  The caller is responsible
4049 * adding the entry to the journal when this information is available.  nlink
4050 * should be the link count prior to the addition and mode is only required
4051 * to have the correct FMT.
4052 */
4053static struct jaddref *
4054newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink,
4055    uint16_t mode)
4056{
4057	struct jaddref *jaddref;
4058
4059	jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
4060	workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump));
4061	jaddref->ja_state = ATTACHED;
4062	jaddref->ja_mkdir = NULL;
4063	newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
4064
4065	return (jaddref);
4066}
4067
4068/*
4069 * Create a new free dependency for a freework.  The caller is responsible
4070 * for adjusting the reference count when it has the lock held.  The freedep
4071 * will track an outstanding bitmap write that will ultimately clear the
4072 * freework to continue.
4073 */
4074static struct freedep *
4075newfreedep(struct freework *freework)
4076{
4077	struct freedep *freedep;
4078
4079	freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
4080	workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
4081	freedep->fd_freework = freework;
4082
4083	return (freedep);
4084}
4085
4086/*
4087 * Free a freedep structure once the buffer it is linked to is written.  If
4088 * this is the last reference to the freework schedule it for completion.
4089 */
4090static void
4091free_freedep(freedep)
4092	struct freedep *freedep;
4093{
4094	struct freework *freework;
4095
4096	freework = freedep->fd_freework;
4097	freework->fw_freeblks->fb_cgwait--;
4098	if (--freework->fw_ref == 0)
4099		freework_enqueue(freework);
4100	WORKITEM_FREE(freedep, D_FREEDEP);
4101}
4102
4103/*
4104 * Allocate a new freework structure that may be a level in an indirect
4105 * when parent is not NULL or a top level block when it is.  The top level
4106 * freework structures are allocated without the per-filesystem lock held
4107 * and before the freeblks is visible outside of softdep_setup_freeblocks().
4108 */
4109static struct freework *
4110newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal)
4111	struct ufsmount *ump;
4112	struct freeblks *freeblks;
4113	struct freework *parent;
4114	ufs_lbn_t lbn;
4115	ufs2_daddr_t nb;
4116	int frags;
4117	int off;
4118	int journal;
4119{
4120	struct freework *freework;
4121
4122	freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
4123	workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
4124	freework->fw_state = ATTACHED;
4125	freework->fw_jnewblk = NULL;
4126	freework->fw_freeblks = freeblks;
4127	freework->fw_parent = parent;
4128	freework->fw_lbn = lbn;
4129	freework->fw_blkno = nb;
4130	freework->fw_frags = frags;
4131	freework->fw_indir = NULL;
4132	freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR)
4133		? 0 : NINDIR(ump->um_fs) + 1;
4134	freework->fw_start = freework->fw_off = off;
4135	if (journal)
4136		newjfreeblk(freeblks, lbn, nb, frags);
4137	if (parent == NULL) {
4138		ACQUIRE_LOCK(ump);
4139		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
4140		freeblks->fb_ref++;
4141		FREE_LOCK(ump);
4142	}
4143
4144	return (freework);
4145}
4146
4147/*
4148 * Eliminate a jfreeblk for a block that does not need journaling.
4149 */
4150static void
4151cancel_jfreeblk(freeblks, blkno)
4152	struct freeblks *freeblks;
4153	ufs2_daddr_t blkno;
4154{
4155	struct jfreeblk *jfreeblk;
4156	struct jblkdep *jblkdep;
4157
4158	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
4159		if (jblkdep->jb_list.wk_type != D_JFREEBLK)
4160			continue;
4161		jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
4162		if (jfreeblk->jf_blkno == blkno)
4163			break;
4164	}
4165	if (jblkdep == NULL)
4166		return;
4167	CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4168	free_jsegdep(jblkdep->jb_jsegdep);
4169	LIST_REMOVE(jblkdep, jb_deps);
4170	WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4171}
4172
4173/*
4174 * Allocate a new jfreeblk to journal top level block pointer when truncating
4175 * a file.  The caller must add this to the worklist when the per-filesystem
4176 * lock is held.
4177 */
4178static struct jfreeblk *
4179newjfreeblk(freeblks, lbn, blkno, frags)
4180	struct freeblks *freeblks;
4181	ufs_lbn_t lbn;
4182	ufs2_daddr_t blkno;
4183	int frags;
4184{
4185	struct jfreeblk *jfreeblk;
4186
4187	jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4188	workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4189	    freeblks->fb_list.wk_mp);
4190	jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4191	jfreeblk->jf_dep.jb_freeblks = freeblks;
4192	jfreeblk->jf_ino = freeblks->fb_inum;
4193	jfreeblk->jf_lbn = lbn;
4194	jfreeblk->jf_blkno = blkno;
4195	jfreeblk->jf_frags = frags;
4196	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4197
4198	return (jfreeblk);
4199}
4200
4201/*
4202 * The journal is only prepared to handle full-size block numbers, so we
4203 * have to adjust the record to reflect the change to a full-size block.
4204 * For example, suppose we have a block made up of fragments 8-15 and
4205 * want to free its last two fragments. We are given a request that says:
4206 *     FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0
4207 * where frags are the number of fragments to free and oldfrags are the
4208 * number of fragments to keep. To block align it, we have to change it to
4209 * have a valid full-size blkno, so it becomes:
4210 *     FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6
4211 */
4212static void
4213adjust_newfreework(freeblks, frag_offset)
4214	struct freeblks *freeblks;
4215	int frag_offset;
4216{
4217	struct jfreeblk *jfreeblk;
4218
4219	KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL &&
4220	    LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK),
4221	    ("adjust_newfreework: Missing freeblks dependency"));
4222
4223	jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd));
4224	jfreeblk->jf_blkno -= frag_offset;
4225	jfreeblk->jf_frags += frag_offset;
4226}
4227
4228/*
4229 * Allocate a new jtrunc to track a partial truncation.
4230 */
4231static struct jtrunc *
4232newjtrunc(freeblks, size, extsize)
4233	struct freeblks *freeblks;
4234	off_t size;
4235	int extsize;
4236{
4237	struct jtrunc *jtrunc;
4238
4239	jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4240	workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4241	    freeblks->fb_list.wk_mp);
4242	jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4243	jtrunc->jt_dep.jb_freeblks = freeblks;
4244	jtrunc->jt_ino = freeblks->fb_inum;
4245	jtrunc->jt_size = size;
4246	jtrunc->jt_extsize = extsize;
4247	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4248
4249	return (jtrunc);
4250}
4251
4252/*
4253 * If we're canceling a new bitmap we have to search for another ref
4254 * to move into the bmsafemap dep.  This might be better expressed
4255 * with another structure.
4256 */
4257static void
4258move_newblock_dep(jaddref, inodedep)
4259	struct jaddref *jaddref;
4260	struct inodedep *inodedep;
4261{
4262	struct inoref *inoref;
4263	struct jaddref *jaddrefn;
4264
4265	jaddrefn = NULL;
4266	for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4267	    inoref = TAILQ_NEXT(inoref, if_deps)) {
4268		if ((jaddref->ja_state & NEWBLOCK) &&
4269		    inoref->if_list.wk_type == D_JADDREF) {
4270			jaddrefn = (struct jaddref *)inoref;
4271			break;
4272		}
4273	}
4274	if (jaddrefn == NULL)
4275		return;
4276	jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4277	jaddrefn->ja_state |= jaddref->ja_state &
4278	    (ATTACHED | UNDONE | NEWBLOCK);
4279	jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4280	jaddref->ja_state |= ATTACHED;
4281	LIST_REMOVE(jaddref, ja_bmdeps);
4282	LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4283	    ja_bmdeps);
4284}
4285
4286/*
4287 * Cancel a jaddref either before it has been written or while it is being
4288 * written.  This happens when a link is removed before the add reaches
4289 * the disk.  The jaddref dependency is kept linked into the bmsafemap
4290 * and inode to prevent the link count or bitmap from reaching the disk
4291 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4292 * required.
4293 *
4294 * Returns 1 if the canceled addref requires journaling of the remove and
4295 * 0 otherwise.
4296 */
4297static int
4298cancel_jaddref(jaddref, inodedep, wkhd)
4299	struct jaddref *jaddref;
4300	struct inodedep *inodedep;
4301	struct workhead *wkhd;
4302{
4303	struct inoref *inoref;
4304	struct jsegdep *jsegdep;
4305	int needsj;
4306
4307	KASSERT((jaddref->ja_state & COMPLETE) == 0,
4308	    ("cancel_jaddref: Canceling complete jaddref"));
4309	if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4310		needsj = 1;
4311	else
4312		needsj = 0;
4313	if (inodedep == NULL)
4314		if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4315		    0, &inodedep) == 0)
4316			panic("cancel_jaddref: Lost inodedep");
4317	/*
4318	 * We must adjust the nlink of any reference operation that follows
4319	 * us so that it is consistent with the in-memory reference.  This
4320	 * ensures that inode nlink rollbacks always have the correct link.
4321	 */
4322	if (needsj == 0) {
4323		for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4324		    inoref = TAILQ_NEXT(inoref, if_deps)) {
4325			if (inoref->if_state & GOINGAWAY)
4326				break;
4327			inoref->if_nlink--;
4328		}
4329	}
4330	jsegdep = inoref_jseg(&jaddref->ja_ref);
4331	if (jaddref->ja_state & NEWBLOCK)
4332		move_newblock_dep(jaddref, inodedep);
4333	wake_worklist(&jaddref->ja_list);
4334	jaddref->ja_mkdir = NULL;
4335	if (jaddref->ja_state & INPROGRESS) {
4336		jaddref->ja_state &= ~INPROGRESS;
4337		WORKLIST_REMOVE(&jaddref->ja_list);
4338		jwork_insert(wkhd, jsegdep);
4339	} else {
4340		free_jsegdep(jsegdep);
4341		if (jaddref->ja_state & DEPCOMPLETE)
4342			remove_from_journal(&jaddref->ja_list);
4343	}
4344	jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4345	/*
4346	 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4347	 * can arrange for them to be freed with the bitmap.  Otherwise we
4348	 * no longer need this addref attached to the inoreflst and it
4349	 * will incorrectly adjust nlink if we leave it.
4350	 */
4351	if ((jaddref->ja_state & NEWBLOCK) == 0) {
4352		TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4353		    if_deps);
4354		jaddref->ja_state |= COMPLETE;
4355		free_jaddref(jaddref);
4356		return (needsj);
4357	}
4358	/*
4359	 * Leave the head of the list for jsegdeps for fast merging.
4360	 */
4361	if (LIST_FIRST(wkhd) != NULL) {
4362		jaddref->ja_state |= ONWORKLIST;
4363		LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4364	} else
4365		WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4366
4367	return (needsj);
4368}
4369
4370/*
4371 * Attempt to free a jaddref structure when some work completes.  This
4372 * should only succeed once the entry is written and all dependencies have
4373 * been notified.
4374 */
4375static void
4376free_jaddref(jaddref)
4377	struct jaddref *jaddref;
4378{
4379
4380	if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4381		return;
4382	if (jaddref->ja_ref.if_jsegdep)
4383		panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4384		    jaddref, jaddref->ja_state);
4385	if (jaddref->ja_state & NEWBLOCK)
4386		LIST_REMOVE(jaddref, ja_bmdeps);
4387	if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4388		panic("free_jaddref: Bad state %p(0x%X)",
4389		    jaddref, jaddref->ja_state);
4390	if (jaddref->ja_mkdir != NULL)
4391		panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4392	WORKITEM_FREE(jaddref, D_JADDREF);
4393}
4394
4395/*
4396 * Free a jremref structure once it has been written or discarded.
4397 */
4398static void
4399free_jremref(jremref)
4400	struct jremref *jremref;
4401{
4402
4403	if (jremref->jr_ref.if_jsegdep)
4404		free_jsegdep(jremref->jr_ref.if_jsegdep);
4405	if (jremref->jr_state & INPROGRESS)
4406		panic("free_jremref: IO still pending");
4407	WORKITEM_FREE(jremref, D_JREMREF);
4408}
4409
4410/*
4411 * Free a jnewblk structure.
4412 */
4413static void
4414free_jnewblk(jnewblk)
4415	struct jnewblk *jnewblk;
4416{
4417
4418	if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4419		return;
4420	LIST_REMOVE(jnewblk, jn_deps);
4421	if (jnewblk->jn_dep != NULL)
4422		panic("free_jnewblk: Dependency still attached.");
4423	WORKITEM_FREE(jnewblk, D_JNEWBLK);
4424}
4425
4426/*
4427 * Cancel a jnewblk which has been been made redundant by frag extension.
4428 */
4429static void
4430cancel_jnewblk(jnewblk, wkhd)
4431	struct jnewblk *jnewblk;
4432	struct workhead *wkhd;
4433{
4434	struct jsegdep *jsegdep;
4435
4436	CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4437	jsegdep = jnewblk->jn_jsegdep;
4438	if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4439		panic("cancel_jnewblk: Invalid state");
4440	jnewblk->jn_jsegdep  = NULL;
4441	jnewblk->jn_dep = NULL;
4442	jnewblk->jn_state |= GOINGAWAY;
4443	if (jnewblk->jn_state & INPROGRESS) {
4444		jnewblk->jn_state &= ~INPROGRESS;
4445		WORKLIST_REMOVE(&jnewblk->jn_list);
4446		jwork_insert(wkhd, jsegdep);
4447	} else {
4448		free_jsegdep(jsegdep);
4449		remove_from_journal(&jnewblk->jn_list);
4450	}
4451	wake_worklist(&jnewblk->jn_list);
4452	WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4453}
4454
4455static void
4456free_jblkdep(jblkdep)
4457	struct jblkdep *jblkdep;
4458{
4459
4460	if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4461		WORKITEM_FREE(jblkdep, D_JFREEBLK);
4462	else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4463		WORKITEM_FREE(jblkdep, D_JTRUNC);
4464	else
4465		panic("free_jblkdep: Unexpected type %s",
4466		    TYPENAME(jblkdep->jb_list.wk_type));
4467}
4468
4469/*
4470 * Free a single jseg once it is no longer referenced in memory or on
4471 * disk.  Reclaim journal blocks and dependencies waiting for the segment
4472 * to disappear.
4473 */
4474static void
4475free_jseg(jseg, jblocks)
4476	struct jseg *jseg;
4477	struct jblocks *jblocks;
4478{
4479	struct freework *freework;
4480
4481	/*
4482	 * Free freework structures that were lingering to indicate freed
4483	 * indirect blocks that forced journal write ordering on reallocate.
4484	 */
4485	while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4486		indirblk_remove(freework);
4487	if (jblocks->jb_oldestseg == jseg)
4488		jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4489	TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4490	jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4491	KASSERT(LIST_EMPTY(&jseg->js_entries),
4492	    ("free_jseg: Freed jseg has valid entries."));
4493	WORKITEM_FREE(jseg, D_JSEG);
4494}
4495
4496/*
4497 * Free all jsegs that meet the criteria for being reclaimed and update
4498 * oldestseg.
4499 */
4500static void
4501free_jsegs(jblocks)
4502	struct jblocks *jblocks;
4503{
4504	struct jseg *jseg;
4505
4506	/*
4507	 * Free only those jsegs which have none allocated before them to
4508	 * preserve the journal space ordering.
4509	 */
4510	while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4511		/*
4512		 * Only reclaim space when nothing depends on this journal
4513		 * set and another set has written that it is no longer
4514		 * valid.
4515		 */
4516		if (jseg->js_refs != 0) {
4517			jblocks->jb_oldestseg = jseg;
4518			return;
4519		}
4520		if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4521			break;
4522		if (jseg->js_seq > jblocks->jb_oldestwrseq)
4523			break;
4524		/*
4525		 * We can free jsegs that didn't write entries when
4526		 * oldestwrseq == js_seq.
4527		 */
4528		if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4529		    jseg->js_cnt != 0)
4530			break;
4531		free_jseg(jseg, jblocks);
4532	}
4533	/*
4534	 * If we exited the loop above we still must discover the
4535	 * oldest valid segment.
4536	 */
4537	if (jseg)
4538		for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4539		     jseg = TAILQ_NEXT(jseg, js_next))
4540			if (jseg->js_refs != 0)
4541				break;
4542	jblocks->jb_oldestseg = jseg;
4543	/*
4544	 * The journal has no valid records but some jsegs may still be
4545	 * waiting on oldestwrseq to advance.  We force a small record
4546	 * out to permit these lingering records to be reclaimed.
4547	 */
4548	if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4549		jblocks->jb_needseg = 1;
4550}
4551
4552/*
4553 * Release one reference to a jseg and free it if the count reaches 0.  This
4554 * should eventually reclaim journal space as well.
4555 */
4556static void
4557rele_jseg(jseg)
4558	struct jseg *jseg;
4559{
4560
4561	KASSERT(jseg->js_refs > 0,
4562	    ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4563	if (--jseg->js_refs != 0)
4564		return;
4565	free_jsegs(jseg->js_jblocks);
4566}
4567
4568/*
4569 * Release a jsegdep and decrement the jseg count.
4570 */
4571static void
4572free_jsegdep(jsegdep)
4573	struct jsegdep *jsegdep;
4574{
4575
4576	if (jsegdep->jd_seg)
4577		rele_jseg(jsegdep->jd_seg);
4578	WORKITEM_FREE(jsegdep, D_JSEGDEP);
4579}
4580
4581/*
4582 * Wait for a journal item to make it to disk.  Initiate journal processing
4583 * if required.
4584 */
4585static int
4586jwait(wk, waitfor)
4587	struct worklist *wk;
4588	int waitfor;
4589{
4590
4591	LOCK_OWNED(VFSTOUFS(wk->wk_mp));
4592	/*
4593	 * Blocking journal waits cause slow synchronous behavior.  Record
4594	 * stats on the frequency of these blocking operations.
4595	 */
4596	if (waitfor == MNT_WAIT) {
4597		stat_journal_wait++;
4598		switch (wk->wk_type) {
4599		case D_JREMREF:
4600		case D_JMVREF:
4601			stat_jwait_filepage++;
4602			break;
4603		case D_JTRUNC:
4604		case D_JFREEBLK:
4605			stat_jwait_freeblks++;
4606			break;
4607		case D_JNEWBLK:
4608			stat_jwait_newblk++;
4609			break;
4610		case D_JADDREF:
4611			stat_jwait_inode++;
4612			break;
4613		default:
4614			break;
4615		}
4616	}
4617	/*
4618	 * If IO has not started we process the journal.  We can't mark the
4619	 * worklist item as IOWAITING because we drop the lock while
4620	 * processing the journal and the worklist entry may be freed after
4621	 * this point.  The caller may call back in and re-issue the request.
4622	 */
4623	if ((wk->wk_state & INPROGRESS) == 0) {
4624		softdep_process_journal(wk->wk_mp, wk, waitfor);
4625		if (waitfor != MNT_WAIT)
4626			return (EBUSY);
4627		return (0);
4628	}
4629	if (waitfor != MNT_WAIT)
4630		return (EBUSY);
4631	wait_worklist(wk, "jwait");
4632	return (0);
4633}
4634
4635/*
4636 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4637 * appropriate.  This is a convenience function to reduce duplicate code
4638 * for the setup and revert functions below.
4639 */
4640static struct inodedep *
4641inodedep_lookup_ip(ip)
4642	struct inode *ip;
4643{
4644	struct inodedep *inodedep;
4645
4646	KASSERT(ip->i_nlink >= ip->i_effnlink,
4647	    ("inodedep_lookup_ip: bad delta"));
4648	(void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC,
4649	    &inodedep);
4650	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4651	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4652
4653	return (inodedep);
4654}
4655
4656/*
4657 * Called prior to creating a new inode and linking it to a directory.  The
4658 * jaddref structure must already be allocated by softdep_setup_inomapdep
4659 * and it is discovered here so we can initialize the mode and update
4660 * nlinkdelta.
4661 */
4662void
4663softdep_setup_create(dp, ip)
4664	struct inode *dp;
4665	struct inode *ip;
4666{
4667	struct inodedep *inodedep;
4668	struct jaddref *jaddref;
4669	struct vnode *dvp;
4670
4671	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4672	    ("softdep_setup_create called on non-softdep filesystem"));
4673	KASSERT(ip->i_nlink == 1,
4674	    ("softdep_setup_create: Invalid link count."));
4675	dvp = ITOV(dp);
4676	ACQUIRE_LOCK(dp->i_ump);
4677	inodedep = inodedep_lookup_ip(ip);
4678	if (DOINGSUJ(dvp)) {
4679		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4680		    inoreflst);
4681		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4682		    ("softdep_setup_create: No addref structure present."));
4683	}
4684	softdep_prelink(dvp, NULL);
4685	FREE_LOCK(dp->i_ump);
4686}
4687
4688/*
4689 * Create a jaddref structure to track the addition of a DOTDOT link when
4690 * we are reparenting an inode as part of a rename.  This jaddref will be
4691 * found by softdep_setup_directory_change.  Adjusts nlinkdelta for
4692 * non-journaling softdep.
4693 */
4694void
4695softdep_setup_dotdot_link(dp, ip)
4696	struct inode *dp;
4697	struct inode *ip;
4698{
4699	struct inodedep *inodedep;
4700	struct jaddref *jaddref;
4701	struct vnode *dvp;
4702
4703	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4704	    ("softdep_setup_dotdot_link called on non-softdep filesystem"));
4705	dvp = ITOV(dp);
4706	jaddref = NULL;
4707	/*
4708	 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4709	 * is used as a normal link would be.
4710	 */
4711	if (DOINGSUJ(dvp))
4712		jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4713		    dp->i_effnlink - 1, dp->i_mode);
4714	ACQUIRE_LOCK(dp->i_ump);
4715	inodedep = inodedep_lookup_ip(dp);
4716	if (jaddref)
4717		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4718		    if_deps);
4719	softdep_prelink(dvp, ITOV(ip));
4720	FREE_LOCK(dp->i_ump);
4721}
4722
4723/*
4724 * Create a jaddref structure to track a new link to an inode.  The directory
4725 * offset is not known until softdep_setup_directory_add or
4726 * softdep_setup_directory_change.  Adjusts nlinkdelta for non-journaling
4727 * softdep.
4728 */
4729void
4730softdep_setup_link(dp, ip)
4731	struct inode *dp;
4732	struct inode *ip;
4733{
4734	struct inodedep *inodedep;
4735	struct jaddref *jaddref;
4736	struct vnode *dvp;
4737
4738	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4739	    ("softdep_setup_link called on non-softdep filesystem"));
4740	dvp = ITOV(dp);
4741	jaddref = NULL;
4742	if (DOINGSUJ(dvp))
4743		jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4744		    ip->i_mode);
4745	ACQUIRE_LOCK(dp->i_ump);
4746	inodedep = inodedep_lookup_ip(ip);
4747	if (jaddref)
4748		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4749		    if_deps);
4750	softdep_prelink(dvp, ITOV(ip));
4751	FREE_LOCK(dp->i_ump);
4752}
4753
4754/*
4755 * Called to create the jaddref structures to track . and .. references as
4756 * well as lookup and further initialize the incomplete jaddref created
4757 * by softdep_setup_inomapdep when the inode was allocated.  Adjusts
4758 * nlinkdelta for non-journaling softdep.
4759 */
4760void
4761softdep_setup_mkdir(dp, ip)
4762	struct inode *dp;
4763	struct inode *ip;
4764{
4765	struct inodedep *inodedep;
4766	struct jaddref *dotdotaddref;
4767	struct jaddref *dotaddref;
4768	struct jaddref *jaddref;
4769	struct vnode *dvp;
4770
4771	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4772	    ("softdep_setup_mkdir called on non-softdep filesystem"));
4773	dvp = ITOV(dp);
4774	dotaddref = dotdotaddref = NULL;
4775	if (DOINGSUJ(dvp)) {
4776		dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
4777		    ip->i_mode);
4778		dotaddref->ja_state |= MKDIR_BODY;
4779		dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4780		    dp->i_effnlink - 1, dp->i_mode);
4781		dotdotaddref->ja_state |= MKDIR_PARENT;
4782	}
4783	ACQUIRE_LOCK(dp->i_ump);
4784	inodedep = inodedep_lookup_ip(ip);
4785	if (DOINGSUJ(dvp)) {
4786		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4787		    inoreflst);
4788		KASSERT(jaddref != NULL,
4789		    ("softdep_setup_mkdir: No addref structure present."));
4790		KASSERT(jaddref->ja_parent == dp->i_number,
4791		    ("softdep_setup_mkdir: bad parent %ju",
4792		    (uintmax_t)jaddref->ja_parent));
4793		TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
4794		    if_deps);
4795	}
4796	inodedep = inodedep_lookup_ip(dp);
4797	if (DOINGSUJ(dvp))
4798		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
4799		    &dotdotaddref->ja_ref, if_deps);
4800	softdep_prelink(ITOV(dp), NULL);
4801	FREE_LOCK(dp->i_ump);
4802}
4803
4804/*
4805 * Called to track nlinkdelta of the inode and parent directories prior to
4806 * unlinking a directory.
4807 */
4808void
4809softdep_setup_rmdir(dp, ip)
4810	struct inode *dp;
4811	struct inode *ip;
4812{
4813	struct vnode *dvp;
4814
4815	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4816	    ("softdep_setup_rmdir called on non-softdep filesystem"));
4817	dvp = ITOV(dp);
4818	ACQUIRE_LOCK(dp->i_ump);
4819	(void) inodedep_lookup_ip(ip);
4820	(void) inodedep_lookup_ip(dp);
4821	softdep_prelink(dvp, ITOV(ip));
4822	FREE_LOCK(dp->i_ump);
4823}
4824
4825/*
4826 * Called to track nlinkdelta of the inode and parent directories prior to
4827 * unlink.
4828 */
4829void
4830softdep_setup_unlink(dp, ip)
4831	struct inode *dp;
4832	struct inode *ip;
4833{
4834	struct vnode *dvp;
4835
4836	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4837	    ("softdep_setup_unlink called on non-softdep filesystem"));
4838	dvp = ITOV(dp);
4839	ACQUIRE_LOCK(dp->i_ump);
4840	(void) inodedep_lookup_ip(ip);
4841	(void) inodedep_lookup_ip(dp);
4842	softdep_prelink(dvp, ITOV(ip));
4843	FREE_LOCK(dp->i_ump);
4844}
4845
4846/*
4847 * Called to release the journal structures created by a failed non-directory
4848 * creation.  Adjusts nlinkdelta for non-journaling softdep.
4849 */
4850void
4851softdep_revert_create(dp, ip)
4852	struct inode *dp;
4853	struct inode *ip;
4854{
4855	struct inodedep *inodedep;
4856	struct jaddref *jaddref;
4857	struct vnode *dvp;
4858
4859	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4860	    ("softdep_revert_create called on non-softdep filesystem"));
4861	dvp = ITOV(dp);
4862	ACQUIRE_LOCK(dp->i_ump);
4863	inodedep = inodedep_lookup_ip(ip);
4864	if (DOINGSUJ(dvp)) {
4865		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4866		    inoreflst);
4867		KASSERT(jaddref->ja_parent == dp->i_number,
4868		    ("softdep_revert_create: addref parent mismatch"));
4869		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4870	}
4871	FREE_LOCK(dp->i_ump);
4872}
4873
4874/*
4875 * Called to release the journal structures created by a failed link
4876 * addition.  Adjusts nlinkdelta for non-journaling softdep.
4877 */
4878void
4879softdep_revert_link(dp, ip)
4880	struct inode *dp;
4881	struct inode *ip;
4882{
4883	struct inodedep *inodedep;
4884	struct jaddref *jaddref;
4885	struct vnode *dvp;
4886
4887	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4888	    ("softdep_revert_link called on non-softdep filesystem"));
4889	dvp = ITOV(dp);
4890	ACQUIRE_LOCK(dp->i_ump);
4891	inodedep = inodedep_lookup_ip(ip);
4892	if (DOINGSUJ(dvp)) {
4893		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4894		    inoreflst);
4895		KASSERT(jaddref->ja_parent == dp->i_number,
4896		    ("softdep_revert_link: addref parent mismatch"));
4897		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4898	}
4899	FREE_LOCK(dp->i_ump);
4900}
4901
4902/*
4903 * Called to release the journal structures created by a failed mkdir
4904 * attempt.  Adjusts nlinkdelta for non-journaling softdep.
4905 */
4906void
4907softdep_revert_mkdir(dp, ip)
4908	struct inode *dp;
4909	struct inode *ip;
4910{
4911	struct inodedep *inodedep;
4912	struct jaddref *jaddref;
4913	struct jaddref *dotaddref;
4914	struct vnode *dvp;
4915
4916	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4917	    ("softdep_revert_mkdir called on non-softdep filesystem"));
4918	dvp = ITOV(dp);
4919
4920	ACQUIRE_LOCK(dp->i_ump);
4921	inodedep = inodedep_lookup_ip(dp);
4922	if (DOINGSUJ(dvp)) {
4923		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4924		    inoreflst);
4925		KASSERT(jaddref->ja_parent == ip->i_number,
4926		    ("softdep_revert_mkdir: dotdot addref parent mismatch"));
4927		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4928	}
4929	inodedep = inodedep_lookup_ip(ip);
4930	if (DOINGSUJ(dvp)) {
4931		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4932		    inoreflst);
4933		KASSERT(jaddref->ja_parent == dp->i_number,
4934		    ("softdep_revert_mkdir: addref parent mismatch"));
4935		dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
4936		    inoreflst, if_deps);
4937		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4938		KASSERT(dotaddref->ja_parent == ip->i_number,
4939		    ("softdep_revert_mkdir: dot addref parent mismatch"));
4940		cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
4941	}
4942	FREE_LOCK(dp->i_ump);
4943}
4944
4945/*
4946 * Called to correct nlinkdelta after a failed rmdir.
4947 */
4948void
4949softdep_revert_rmdir(dp, ip)
4950	struct inode *dp;
4951	struct inode *ip;
4952{
4953
4954	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4955	    ("softdep_revert_rmdir called on non-softdep filesystem"));
4956	ACQUIRE_LOCK(dp->i_ump);
4957	(void) inodedep_lookup_ip(ip);
4958	(void) inodedep_lookup_ip(dp);
4959	FREE_LOCK(dp->i_ump);
4960}
4961
4962/*
4963 * Protecting the freemaps (or bitmaps).
4964 *
4965 * To eliminate the need to execute fsck before mounting a filesystem
4966 * after a power failure, one must (conservatively) guarantee that the
4967 * on-disk copy of the bitmaps never indicate that a live inode or block is
4968 * free.  So, when a block or inode is allocated, the bitmap should be
4969 * updated (on disk) before any new pointers.  When a block or inode is
4970 * freed, the bitmap should not be updated until all pointers have been
4971 * reset.  The latter dependency is handled by the delayed de-allocation
4972 * approach described below for block and inode de-allocation.  The former
4973 * dependency is handled by calling the following procedure when a block or
4974 * inode is allocated. When an inode is allocated an "inodedep" is created
4975 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
4976 * Each "inodedep" is also inserted into the hash indexing structure so
4977 * that any additional link additions can be made dependent on the inode
4978 * allocation.
4979 *
4980 * The ufs filesystem maintains a number of free block counts (e.g., per
4981 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
4982 * in addition to the bitmaps.  These counts are used to improve efficiency
4983 * during allocation and therefore must be consistent with the bitmaps.
4984 * There is no convenient way to guarantee post-crash consistency of these
4985 * counts with simple update ordering, for two main reasons: (1) The counts
4986 * and bitmaps for a single cylinder group block are not in the same disk
4987 * sector.  If a disk write is interrupted (e.g., by power failure), one may
4988 * be written and the other not.  (2) Some of the counts are located in the
4989 * superblock rather than the cylinder group block. So, we focus our soft
4990 * updates implementation on protecting the bitmaps. When mounting a
4991 * filesystem, we recompute the auxiliary counts from the bitmaps.
4992 */
4993
4994/*
4995 * Called just after updating the cylinder group block to allocate an inode.
4996 */
4997void
4998softdep_setup_inomapdep(bp, ip, newinum, mode)
4999	struct buf *bp;		/* buffer for cylgroup block with inode map */
5000	struct inode *ip;	/* inode related to allocation */
5001	ino_t newinum;		/* new inode number being allocated */
5002	int mode;
5003{
5004	struct inodedep *inodedep;
5005	struct bmsafemap *bmsafemap;
5006	struct jaddref *jaddref;
5007	struct mount *mp;
5008	struct fs *fs;
5009
5010	mp = UFSTOVFS(ip->i_ump);
5011	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5012	    ("softdep_setup_inomapdep called on non-softdep filesystem"));
5013	fs = ip->i_ump->um_fs;
5014	jaddref = NULL;
5015
5016	/*
5017	 * Allocate the journal reference add structure so that the bitmap
5018	 * can be dependent on it.
5019	 */
5020	if (MOUNTEDSUJ(mp)) {
5021		jaddref = newjaddref(ip, newinum, 0, 0, mode);
5022		jaddref->ja_state |= NEWBLOCK;
5023	}
5024
5025	/*
5026	 * Create a dependency for the newly allocated inode.
5027	 * Panic if it already exists as something is seriously wrong.
5028	 * Otherwise add it to the dependency list for the buffer holding
5029	 * the cylinder group map from which it was allocated.
5030	 *
5031	 * We have to preallocate a bmsafemap entry in case it is needed
5032	 * in bmsafemap_lookup since once we allocate the inodedep, we
5033	 * have to finish initializing it before we can FREE_LOCK().
5034	 * By preallocating, we avoid FREE_LOCK() while doing a malloc
5035	 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
5036	 * creating the inodedep as it can be freed during the time
5037	 * that we FREE_LOCK() while allocating the inodedep. We must
5038	 * call workitem_alloc() before entering the locked section as
5039	 * it also acquires the lock and we must avoid trying doing so
5040	 * recursively.
5041	 */
5042	bmsafemap = malloc(sizeof(struct bmsafemap),
5043	    M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5044	workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5045	ACQUIRE_LOCK(ip->i_ump);
5046	if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep)))
5047		panic("softdep_setup_inomapdep: dependency %p for new"
5048		    "inode already exists", inodedep);
5049	bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
5050	if (jaddref) {
5051		LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
5052		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
5053		    if_deps);
5054	} else {
5055		inodedep->id_state |= ONDEPLIST;
5056		LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
5057	}
5058	inodedep->id_bmsafemap = bmsafemap;
5059	inodedep->id_state &= ~DEPCOMPLETE;
5060	FREE_LOCK(ip->i_ump);
5061}
5062
5063/*
5064 * Called just after updating the cylinder group block to
5065 * allocate block or fragment.
5066 */
5067void
5068softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
5069	struct buf *bp;		/* buffer for cylgroup block with block map */
5070	struct mount *mp;	/* filesystem doing allocation */
5071	ufs2_daddr_t newblkno;	/* number of newly allocated block */
5072	int frags;		/* Number of fragments. */
5073	int oldfrags;		/* Previous number of fragments for extend. */
5074{
5075	struct newblk *newblk;
5076	struct bmsafemap *bmsafemap;
5077	struct jnewblk *jnewblk;
5078	struct ufsmount *ump;
5079	struct fs *fs;
5080
5081	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5082	    ("softdep_setup_blkmapdep called on non-softdep filesystem"));
5083	ump = VFSTOUFS(mp);
5084	fs = ump->um_fs;
5085	jnewblk = NULL;
5086	/*
5087	 * Create a dependency for the newly allocated block.
5088	 * Add it to the dependency list for the buffer holding
5089	 * the cylinder group map from which it was allocated.
5090	 */
5091	if (MOUNTEDSUJ(mp)) {
5092		jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
5093		workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
5094		jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
5095		jnewblk->jn_state = ATTACHED;
5096		jnewblk->jn_blkno = newblkno;
5097		jnewblk->jn_frags = frags;
5098		jnewblk->jn_oldfrags = oldfrags;
5099#ifdef SUJ_DEBUG
5100		{
5101			struct cg *cgp;
5102			uint8_t *blksfree;
5103			long bno;
5104			int i;
5105
5106			cgp = (struct cg *)bp->b_data;
5107			blksfree = cg_blksfree(cgp);
5108			bno = dtogd(fs, jnewblk->jn_blkno);
5109			for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
5110			    i++) {
5111				if (isset(blksfree, bno + i))
5112					panic("softdep_setup_blkmapdep: "
5113					    "free fragment %d from %d-%d "
5114					    "state 0x%X dep %p", i,
5115					    jnewblk->jn_oldfrags,
5116					    jnewblk->jn_frags,
5117					    jnewblk->jn_state,
5118					    jnewblk->jn_dep);
5119			}
5120		}
5121#endif
5122	}
5123
5124	CTR3(KTR_SUJ,
5125	    "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
5126	    newblkno, frags, oldfrags);
5127	ACQUIRE_LOCK(ump);
5128	if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
5129		panic("softdep_setup_blkmapdep: found block");
5130	newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
5131	    dtog(fs, newblkno), NULL);
5132	if (jnewblk) {
5133		jnewblk->jn_dep = (struct worklist *)newblk;
5134		LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
5135	} else {
5136		newblk->nb_state |= ONDEPLIST;
5137		LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
5138	}
5139	newblk->nb_bmsafemap = bmsafemap;
5140	newblk->nb_jnewblk = jnewblk;
5141	FREE_LOCK(ump);
5142}
5143
5144#define	BMSAFEMAP_HASH(ump, cg) \
5145      (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size])
5146
5147static int
5148bmsafemap_find(bmsafemaphd, cg, bmsafemapp)
5149	struct bmsafemap_hashhead *bmsafemaphd;
5150	int cg;
5151	struct bmsafemap **bmsafemapp;
5152{
5153	struct bmsafemap *bmsafemap;
5154
5155	LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
5156		if (bmsafemap->sm_cg == cg)
5157			break;
5158	if (bmsafemap) {
5159		*bmsafemapp = bmsafemap;
5160		return (1);
5161	}
5162	*bmsafemapp = NULL;
5163
5164	return (0);
5165}
5166
5167/*
5168 * Find the bmsafemap associated with a cylinder group buffer.
5169 * If none exists, create one. The buffer must be locked when
5170 * this routine is called and this routine must be called with
5171 * the softdep lock held. To avoid giving up the lock while
5172 * allocating a new bmsafemap, a preallocated bmsafemap may be
5173 * provided. If it is provided but not needed, it is freed.
5174 */
5175static struct bmsafemap *
5176bmsafemap_lookup(mp, bp, cg, newbmsafemap)
5177	struct mount *mp;
5178	struct buf *bp;
5179	int cg;
5180	struct bmsafemap *newbmsafemap;
5181{
5182	struct bmsafemap_hashhead *bmsafemaphd;
5183	struct bmsafemap *bmsafemap, *collision;
5184	struct worklist *wk;
5185	struct ufsmount *ump;
5186
5187	ump = VFSTOUFS(mp);
5188	LOCK_OWNED(ump);
5189	KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
5190	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5191		if (wk->wk_type == D_BMSAFEMAP) {
5192			if (newbmsafemap)
5193				WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5194			return (WK_BMSAFEMAP(wk));
5195		}
5196	}
5197	bmsafemaphd = BMSAFEMAP_HASH(ump, cg);
5198	if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) {
5199		if (newbmsafemap)
5200			WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5201		return (bmsafemap);
5202	}
5203	if (newbmsafemap) {
5204		bmsafemap = newbmsafemap;
5205	} else {
5206		FREE_LOCK(ump);
5207		bmsafemap = malloc(sizeof(struct bmsafemap),
5208			M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5209		workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5210		ACQUIRE_LOCK(ump);
5211	}
5212	bmsafemap->sm_buf = bp;
5213	LIST_INIT(&bmsafemap->sm_inodedephd);
5214	LIST_INIT(&bmsafemap->sm_inodedepwr);
5215	LIST_INIT(&bmsafemap->sm_newblkhd);
5216	LIST_INIT(&bmsafemap->sm_newblkwr);
5217	LIST_INIT(&bmsafemap->sm_jaddrefhd);
5218	LIST_INIT(&bmsafemap->sm_jnewblkhd);
5219	LIST_INIT(&bmsafemap->sm_freehd);
5220	LIST_INIT(&bmsafemap->sm_freewr);
5221	if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) {
5222		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5223		return (collision);
5224	}
5225	bmsafemap->sm_cg = cg;
5226	LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5227	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
5228	WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5229	return (bmsafemap);
5230}
5231
5232/*
5233 * Direct block allocation dependencies.
5234 *
5235 * When a new block is allocated, the corresponding disk locations must be
5236 * initialized (with zeros or new data) before the on-disk inode points to
5237 * them.  Also, the freemap from which the block was allocated must be
5238 * updated (on disk) before the inode's pointer. These two dependencies are
5239 * independent of each other and are needed for all file blocks and indirect
5240 * blocks that are pointed to directly by the inode.  Just before the
5241 * "in-core" version of the inode is updated with a newly allocated block
5242 * number, a procedure (below) is called to setup allocation dependency
5243 * structures.  These structures are removed when the corresponding
5244 * dependencies are satisfied or when the block allocation becomes obsolete
5245 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5246 * fragment that gets upgraded).  All of these cases are handled in
5247 * procedures described later.
5248 *
5249 * When a file extension causes a fragment to be upgraded, either to a larger
5250 * fragment or to a full block, the on-disk location may change (if the
5251 * previous fragment could not simply be extended). In this case, the old
5252 * fragment must be de-allocated, but not until after the inode's pointer has
5253 * been updated. In most cases, this is handled by later procedures, which
5254 * will construct a "freefrag" structure to be added to the workitem queue
5255 * when the inode update is complete (or obsolete).  The main exception to
5256 * this is when an allocation occurs while a pending allocation dependency
5257 * (for the same block pointer) remains.  This case is handled in the main
5258 * allocation dependency setup procedure by immediately freeing the
5259 * unreferenced fragments.
5260 */
5261void
5262softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5263	struct inode *ip;	/* inode to which block is being added */
5264	ufs_lbn_t off;		/* block pointer within inode */
5265	ufs2_daddr_t newblkno;	/* disk block number being added */
5266	ufs2_daddr_t oldblkno;	/* previous block number, 0 unless frag */
5267	long newsize;		/* size of new block */
5268	long oldsize;		/* size of new block */
5269	struct buf *bp;		/* bp for allocated block */
5270{
5271	struct allocdirect *adp, *oldadp;
5272	struct allocdirectlst *adphead;
5273	struct freefrag *freefrag;
5274	struct inodedep *inodedep;
5275	struct pagedep *pagedep;
5276	struct jnewblk *jnewblk;
5277	struct newblk *newblk;
5278	struct mount *mp;
5279	ufs_lbn_t lbn;
5280
5281	lbn = bp->b_lblkno;
5282	mp = UFSTOVFS(ip->i_ump);
5283	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5284	    ("softdep_setup_allocdirect called on non-softdep filesystem"));
5285	if (oldblkno && oldblkno != newblkno)
5286		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5287	else
5288		freefrag = NULL;
5289
5290	CTR6(KTR_SUJ,
5291	    "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5292	    "off %jd newsize %ld oldsize %d",
5293	    ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5294	ACQUIRE_LOCK(ip->i_ump);
5295	if (off >= NDADDR) {
5296		if (lbn > 0)
5297			panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5298			    lbn, off);
5299		/* allocating an indirect block */
5300		if (oldblkno != 0)
5301			panic("softdep_setup_allocdirect: non-zero indir");
5302	} else {
5303		if (off != lbn)
5304			panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5305			    lbn, off);
5306		/*
5307		 * Allocating a direct block.
5308		 *
5309		 * If we are allocating a directory block, then we must
5310		 * allocate an associated pagedep to track additions and
5311		 * deletions.
5312		 */
5313		if ((ip->i_mode & IFMT) == IFDIR)
5314			pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5315			    &pagedep);
5316	}
5317	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5318		panic("softdep_setup_allocdirect: lost block");
5319	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5320	    ("softdep_setup_allocdirect: newblk already initialized"));
5321	/*
5322	 * Convert the newblk to an allocdirect.
5323	 */
5324	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5325	adp = (struct allocdirect *)newblk;
5326	newblk->nb_freefrag = freefrag;
5327	adp->ad_offset = off;
5328	adp->ad_oldblkno = oldblkno;
5329	adp->ad_newsize = newsize;
5330	adp->ad_oldsize = oldsize;
5331
5332	/*
5333	 * Finish initializing the journal.
5334	 */
5335	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5336		jnewblk->jn_ino = ip->i_number;
5337		jnewblk->jn_lbn = lbn;
5338		add_to_journal(&jnewblk->jn_list);
5339	}
5340	if (freefrag && freefrag->ff_jdep != NULL &&
5341	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5342		add_to_journal(freefrag->ff_jdep);
5343	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5344	adp->ad_inodedep = inodedep;
5345
5346	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5347	/*
5348	 * The list of allocdirects must be kept in sorted and ascending
5349	 * order so that the rollback routines can quickly determine the
5350	 * first uncommitted block (the size of the file stored on disk
5351	 * ends at the end of the lowest committed fragment, or if there
5352	 * are no fragments, at the end of the highest committed block).
5353	 * Since files generally grow, the typical case is that the new
5354	 * block is to be added at the end of the list. We speed this
5355	 * special case by checking against the last allocdirect in the
5356	 * list before laboriously traversing the list looking for the
5357	 * insertion point.
5358	 */
5359	adphead = &inodedep->id_newinoupdt;
5360	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5361	if (oldadp == NULL || oldadp->ad_offset <= off) {
5362		/* insert at end of list */
5363		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5364		if (oldadp != NULL && oldadp->ad_offset == off)
5365			allocdirect_merge(adphead, adp, oldadp);
5366		FREE_LOCK(ip->i_ump);
5367		return;
5368	}
5369	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5370		if (oldadp->ad_offset >= off)
5371			break;
5372	}
5373	if (oldadp == NULL)
5374		panic("softdep_setup_allocdirect: lost entry");
5375	/* insert in middle of list */
5376	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5377	if (oldadp->ad_offset == off)
5378		allocdirect_merge(adphead, adp, oldadp);
5379
5380	FREE_LOCK(ip->i_ump);
5381}
5382
5383/*
5384 * Merge a newer and older journal record to be stored either in a
5385 * newblock or freefrag.  This handles aggregating journal records for
5386 * fragment allocation into a second record as well as replacing a
5387 * journal free with an aborted journal allocation.  A segment for the
5388 * oldest record will be placed on wkhd if it has been written.  If not
5389 * the segment for the newer record will suffice.
5390 */
5391static struct worklist *
5392jnewblk_merge(new, old, wkhd)
5393	struct worklist *new;
5394	struct worklist *old;
5395	struct workhead *wkhd;
5396{
5397	struct jnewblk *njnewblk;
5398	struct jnewblk *jnewblk;
5399
5400	/* Handle NULLs to simplify callers. */
5401	if (new == NULL)
5402		return (old);
5403	if (old == NULL)
5404		return (new);
5405	/* Replace a jfreefrag with a jnewblk. */
5406	if (new->wk_type == D_JFREEFRAG) {
5407		if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5408			panic("jnewblk_merge: blkno mismatch: %p, %p",
5409			    old, new);
5410		cancel_jfreefrag(WK_JFREEFRAG(new));
5411		return (old);
5412	}
5413	if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5414		panic("jnewblk_merge: Bad type: old %d new %d\n",
5415		    old->wk_type, new->wk_type);
5416	/*
5417	 * Handle merging of two jnewblk records that describe
5418	 * different sets of fragments in the same block.
5419	 */
5420	jnewblk = WK_JNEWBLK(old);
5421	njnewblk = WK_JNEWBLK(new);
5422	if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5423		panic("jnewblk_merge: Merging disparate blocks.");
5424	/*
5425	 * The record may be rolled back in the cg.
5426	 */
5427	if (jnewblk->jn_state & UNDONE) {
5428		jnewblk->jn_state &= ~UNDONE;
5429		njnewblk->jn_state |= UNDONE;
5430		njnewblk->jn_state &= ~ATTACHED;
5431	}
5432	/*
5433	 * We modify the newer addref and free the older so that if neither
5434	 * has been written the most up-to-date copy will be on disk.  If
5435	 * both have been written but rolled back we only temporarily need
5436	 * one of them to fix the bits when the cg write completes.
5437	 */
5438	jnewblk->jn_state |= ATTACHED | COMPLETE;
5439	njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5440	cancel_jnewblk(jnewblk, wkhd);
5441	WORKLIST_REMOVE(&jnewblk->jn_list);
5442	free_jnewblk(jnewblk);
5443	return (new);
5444}
5445
5446/*
5447 * Replace an old allocdirect dependency with a newer one.
5448 * This routine must be called with splbio interrupts blocked.
5449 */
5450static void
5451allocdirect_merge(adphead, newadp, oldadp)
5452	struct allocdirectlst *adphead;	/* head of list holding allocdirects */
5453	struct allocdirect *newadp;	/* allocdirect being added */
5454	struct allocdirect *oldadp;	/* existing allocdirect being checked */
5455{
5456	struct worklist *wk;
5457	struct freefrag *freefrag;
5458
5459	freefrag = NULL;
5460	LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp));
5461	if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5462	    newadp->ad_oldsize != oldadp->ad_newsize ||
5463	    newadp->ad_offset >= NDADDR)
5464		panic("%s %jd != new %jd || old size %ld != new %ld",
5465		    "allocdirect_merge: old blkno",
5466		    (intmax_t)newadp->ad_oldblkno,
5467		    (intmax_t)oldadp->ad_newblkno,
5468		    newadp->ad_oldsize, oldadp->ad_newsize);
5469	newadp->ad_oldblkno = oldadp->ad_oldblkno;
5470	newadp->ad_oldsize = oldadp->ad_oldsize;
5471	/*
5472	 * If the old dependency had a fragment to free or had never
5473	 * previously had a block allocated, then the new dependency
5474	 * can immediately post its freefrag and adopt the old freefrag.
5475	 * This action is done by swapping the freefrag dependencies.
5476	 * The new dependency gains the old one's freefrag, and the
5477	 * old one gets the new one and then immediately puts it on
5478	 * the worklist when it is freed by free_newblk. It is
5479	 * not possible to do this swap when the old dependency had a
5480	 * non-zero size but no previous fragment to free. This condition
5481	 * arises when the new block is an extension of the old block.
5482	 * Here, the first part of the fragment allocated to the new
5483	 * dependency is part of the block currently claimed on disk by
5484	 * the old dependency, so cannot legitimately be freed until the
5485	 * conditions for the new dependency are fulfilled.
5486	 */
5487	freefrag = newadp->ad_freefrag;
5488	if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5489		newadp->ad_freefrag = oldadp->ad_freefrag;
5490		oldadp->ad_freefrag = freefrag;
5491	}
5492	/*
5493	 * If we are tracking a new directory-block allocation,
5494	 * move it from the old allocdirect to the new allocdirect.
5495	 */
5496	if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5497		WORKLIST_REMOVE(wk);
5498		if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5499			panic("allocdirect_merge: extra newdirblk");
5500		WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5501	}
5502	TAILQ_REMOVE(adphead, oldadp, ad_next);
5503	/*
5504	 * We need to move any journal dependencies over to the freefrag
5505	 * that releases this block if it exists.  Otherwise we are
5506	 * extending an existing block and we'll wait until that is
5507	 * complete to release the journal space and extend the
5508	 * new journal to cover this old space as well.
5509	 */
5510	if (freefrag == NULL) {
5511		if (oldadp->ad_newblkno != newadp->ad_newblkno)
5512			panic("allocdirect_merge: %jd != %jd",
5513			    oldadp->ad_newblkno, newadp->ad_newblkno);
5514		newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5515		    jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5516		    &oldadp->ad_block.nb_jnewblk->jn_list,
5517		    &newadp->ad_block.nb_jwork);
5518		oldadp->ad_block.nb_jnewblk = NULL;
5519		cancel_newblk(&oldadp->ad_block, NULL,
5520		    &newadp->ad_block.nb_jwork);
5521	} else {
5522		wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5523		    &freefrag->ff_list, &freefrag->ff_jwork);
5524		freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5525		    &freefrag->ff_jwork);
5526	}
5527	free_newblk(&oldadp->ad_block);
5528}
5529
5530/*
5531 * Allocate a jfreefrag structure to journal a single block free.
5532 */
5533static struct jfreefrag *
5534newjfreefrag(freefrag, ip, blkno, size, lbn)
5535	struct freefrag *freefrag;
5536	struct inode *ip;
5537	ufs2_daddr_t blkno;
5538	long size;
5539	ufs_lbn_t lbn;
5540{
5541	struct jfreefrag *jfreefrag;
5542	struct fs *fs;
5543
5544	fs = ip->i_fs;
5545	jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5546	    M_SOFTDEP_FLAGS);
5547	workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump));
5548	jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5549	jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5550	jfreefrag->fr_ino = ip->i_number;
5551	jfreefrag->fr_lbn = lbn;
5552	jfreefrag->fr_blkno = blkno;
5553	jfreefrag->fr_frags = numfrags(fs, size);
5554	jfreefrag->fr_freefrag = freefrag;
5555
5556	return (jfreefrag);
5557}
5558
5559/*
5560 * Allocate a new freefrag structure.
5561 */
5562static struct freefrag *
5563newfreefrag(ip, blkno, size, lbn)
5564	struct inode *ip;
5565	ufs2_daddr_t blkno;
5566	long size;
5567	ufs_lbn_t lbn;
5568{
5569	struct freefrag *freefrag;
5570	struct fs *fs;
5571
5572	CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5573	    ip->i_number, blkno, size, lbn);
5574	fs = ip->i_fs;
5575	if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5576		panic("newfreefrag: frag size");
5577	freefrag = malloc(sizeof(struct freefrag),
5578	    M_FREEFRAG, M_SOFTDEP_FLAGS);
5579	workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump));
5580	freefrag->ff_state = ATTACHED;
5581	LIST_INIT(&freefrag->ff_jwork);
5582	freefrag->ff_inum = ip->i_number;
5583	freefrag->ff_vtype = ITOV(ip)->v_type;
5584	freefrag->ff_blkno = blkno;
5585	freefrag->ff_fragsize = size;
5586
5587	if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) {
5588		freefrag->ff_jdep = (struct worklist *)
5589		    newjfreefrag(freefrag, ip, blkno, size, lbn);
5590	} else {
5591		freefrag->ff_state |= DEPCOMPLETE;
5592		freefrag->ff_jdep = NULL;
5593	}
5594
5595	return (freefrag);
5596}
5597
5598/*
5599 * This workitem de-allocates fragments that were replaced during
5600 * file block allocation.
5601 */
5602static void
5603handle_workitem_freefrag(freefrag)
5604	struct freefrag *freefrag;
5605{
5606	struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5607	struct workhead wkhd;
5608
5609	CTR3(KTR_SUJ,
5610	    "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5611	    freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5612	/*
5613	 * It would be illegal to add new completion items to the
5614	 * freefrag after it was schedule to be done so it must be
5615	 * safe to modify the list head here.
5616	 */
5617	LIST_INIT(&wkhd);
5618	ACQUIRE_LOCK(ump);
5619	LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5620	/*
5621	 * If the journal has not been written we must cancel it here.
5622	 */
5623	if (freefrag->ff_jdep) {
5624		if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5625			panic("handle_workitem_freefrag: Unexpected type %d\n",
5626			    freefrag->ff_jdep->wk_type);
5627		cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5628	}
5629	FREE_LOCK(ump);
5630	ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5631	   freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd);
5632	ACQUIRE_LOCK(ump);
5633	WORKITEM_FREE(freefrag, D_FREEFRAG);
5634	FREE_LOCK(ump);
5635}
5636
5637/*
5638 * Set up a dependency structure for an external attributes data block.
5639 * This routine follows much of the structure of softdep_setup_allocdirect.
5640 * See the description of softdep_setup_allocdirect above for details.
5641 */
5642void
5643softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5644	struct inode *ip;
5645	ufs_lbn_t off;
5646	ufs2_daddr_t newblkno;
5647	ufs2_daddr_t oldblkno;
5648	long newsize;
5649	long oldsize;
5650	struct buf *bp;
5651{
5652	struct allocdirect *adp, *oldadp;
5653	struct allocdirectlst *adphead;
5654	struct freefrag *freefrag;
5655	struct inodedep *inodedep;
5656	struct jnewblk *jnewblk;
5657	struct newblk *newblk;
5658	struct mount *mp;
5659	ufs_lbn_t lbn;
5660
5661	mp = UFSTOVFS(ip->i_ump);
5662	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5663	    ("softdep_setup_allocext called on non-softdep filesystem"));
5664	KASSERT(off < NXADDR, ("softdep_setup_allocext: lbn %lld > NXADDR",
5665		    (long long)off));
5666
5667	lbn = bp->b_lblkno;
5668	if (oldblkno && oldblkno != newblkno)
5669		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5670	else
5671		freefrag = NULL;
5672
5673	ACQUIRE_LOCK(ip->i_ump);
5674	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5675		panic("softdep_setup_allocext: lost block");
5676	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5677	    ("softdep_setup_allocext: newblk already initialized"));
5678	/*
5679	 * Convert the newblk to an allocdirect.
5680	 */
5681	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5682	adp = (struct allocdirect *)newblk;
5683	newblk->nb_freefrag = freefrag;
5684	adp->ad_offset = off;
5685	adp->ad_oldblkno = oldblkno;
5686	adp->ad_newsize = newsize;
5687	adp->ad_oldsize = oldsize;
5688	adp->ad_state |=  EXTDATA;
5689
5690	/*
5691	 * Finish initializing the journal.
5692	 */
5693	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5694		jnewblk->jn_ino = ip->i_number;
5695		jnewblk->jn_lbn = lbn;
5696		add_to_journal(&jnewblk->jn_list);
5697	}
5698	if (freefrag && freefrag->ff_jdep != NULL &&
5699	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5700		add_to_journal(freefrag->ff_jdep);
5701	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5702	adp->ad_inodedep = inodedep;
5703
5704	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5705	/*
5706	 * The list of allocdirects must be kept in sorted and ascending
5707	 * order so that the rollback routines can quickly determine the
5708	 * first uncommitted block (the size of the file stored on disk
5709	 * ends at the end of the lowest committed fragment, or if there
5710	 * are no fragments, at the end of the highest committed block).
5711	 * Since files generally grow, the typical case is that the new
5712	 * block is to be added at the end of the list. We speed this
5713	 * special case by checking against the last allocdirect in the
5714	 * list before laboriously traversing the list looking for the
5715	 * insertion point.
5716	 */
5717	adphead = &inodedep->id_newextupdt;
5718	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5719	if (oldadp == NULL || oldadp->ad_offset <= off) {
5720		/* insert at end of list */
5721		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5722		if (oldadp != NULL && oldadp->ad_offset == off)
5723			allocdirect_merge(adphead, adp, oldadp);
5724		FREE_LOCK(ip->i_ump);
5725		return;
5726	}
5727	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5728		if (oldadp->ad_offset >= off)
5729			break;
5730	}
5731	if (oldadp == NULL)
5732		panic("softdep_setup_allocext: lost entry");
5733	/* insert in middle of list */
5734	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5735	if (oldadp->ad_offset == off)
5736		allocdirect_merge(adphead, adp, oldadp);
5737	FREE_LOCK(ip->i_ump);
5738}
5739
5740/*
5741 * Indirect block allocation dependencies.
5742 *
5743 * The same dependencies that exist for a direct block also exist when
5744 * a new block is allocated and pointed to by an entry in a block of
5745 * indirect pointers. The undo/redo states described above are also
5746 * used here. Because an indirect block contains many pointers that
5747 * may have dependencies, a second copy of the entire in-memory indirect
5748 * block is kept. The buffer cache copy is always completely up-to-date.
5749 * The second copy, which is used only as a source for disk writes,
5750 * contains only the safe pointers (i.e., those that have no remaining
5751 * update dependencies). The second copy is freed when all pointers
5752 * are safe. The cache is not allowed to replace indirect blocks with
5753 * pending update dependencies. If a buffer containing an indirect
5754 * block with dependencies is written, these routines will mark it
5755 * dirty again. It can only be successfully written once all the
5756 * dependencies are removed. The ffs_fsync routine in conjunction with
5757 * softdep_sync_metadata work together to get all the dependencies
5758 * removed so that a file can be successfully written to disk. Three
5759 * procedures are used when setting up indirect block pointer
5760 * dependencies. The division is necessary because of the organization
5761 * of the "balloc" routine and because of the distinction between file
5762 * pages and file metadata blocks.
5763 */
5764
5765/*
5766 * Allocate a new allocindir structure.
5767 */
5768static struct allocindir *
5769newallocindir(ip, ptrno, newblkno, oldblkno, lbn)
5770	struct inode *ip;	/* inode for file being extended */
5771	int ptrno;		/* offset of pointer in indirect block */
5772	ufs2_daddr_t newblkno;	/* disk block number being added */
5773	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5774	ufs_lbn_t lbn;
5775{
5776	struct newblk *newblk;
5777	struct allocindir *aip;
5778	struct freefrag *freefrag;
5779	struct jnewblk *jnewblk;
5780
5781	if (oldblkno)
5782		freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn);
5783	else
5784		freefrag = NULL;
5785	ACQUIRE_LOCK(ip->i_ump);
5786	if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0)
5787		panic("new_allocindir: lost block");
5788	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5789	    ("newallocindir: newblk already initialized"));
5790	WORKITEM_REASSIGN(newblk, D_ALLOCINDIR);
5791	newblk->nb_freefrag = freefrag;
5792	aip = (struct allocindir *)newblk;
5793	aip->ai_offset = ptrno;
5794	aip->ai_oldblkno = oldblkno;
5795	aip->ai_lbn = lbn;
5796	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5797		jnewblk->jn_ino = ip->i_number;
5798		jnewblk->jn_lbn = lbn;
5799		add_to_journal(&jnewblk->jn_list);
5800	}
5801	if (freefrag && freefrag->ff_jdep != NULL &&
5802	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5803		add_to_journal(freefrag->ff_jdep);
5804	return (aip);
5805}
5806
5807/*
5808 * Called just before setting an indirect block pointer
5809 * to a newly allocated file page.
5810 */
5811void
5812softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
5813	struct inode *ip;	/* inode for file being extended */
5814	ufs_lbn_t lbn;		/* allocated block number within file */
5815	struct buf *bp;		/* buffer with indirect blk referencing page */
5816	int ptrno;		/* offset of pointer in indirect block */
5817	ufs2_daddr_t newblkno;	/* disk block number being added */
5818	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5819	struct buf *nbp;	/* buffer holding allocated page */
5820{
5821	struct inodedep *inodedep;
5822	struct freefrag *freefrag;
5823	struct allocindir *aip;
5824	struct pagedep *pagedep;
5825	struct mount *mp;
5826
5827	mp = UFSTOVFS(ip->i_ump);
5828	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5829	    ("softdep_setup_allocindir_page called on non-softdep filesystem"));
5830	KASSERT(lbn == nbp->b_lblkno,
5831	    ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
5832	    lbn, bp->b_lblkno));
5833	CTR4(KTR_SUJ,
5834	    "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
5835	    "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
5836	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
5837	aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
5838	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5839	/*
5840	 * If we are allocating a directory page, then we must
5841	 * allocate an associated pagedep to track additions and
5842	 * deletions.
5843	 */
5844	if ((ip->i_mode & IFMT) == IFDIR)
5845		pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
5846	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5847	freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
5848	FREE_LOCK(ip->i_ump);
5849	if (freefrag)
5850		handle_workitem_freefrag(freefrag);
5851}
5852
5853/*
5854 * Called just before setting an indirect block pointer to a
5855 * newly allocated indirect block.
5856 */
5857void
5858softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
5859	struct buf *nbp;	/* newly allocated indirect block */
5860	struct inode *ip;	/* inode for file being extended */
5861	struct buf *bp;		/* indirect block referencing allocated block */
5862	int ptrno;		/* offset of pointer in indirect block */
5863	ufs2_daddr_t newblkno;	/* disk block number being added */
5864{
5865	struct inodedep *inodedep;
5866	struct allocindir *aip;
5867	ufs_lbn_t lbn;
5868
5869	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
5870	    ("softdep_setup_allocindir_meta called on non-softdep filesystem"));
5871	CTR3(KTR_SUJ,
5872	    "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
5873	    ip->i_number, newblkno, ptrno);
5874	lbn = nbp->b_lblkno;
5875	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
5876	aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
5877	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC,
5878	    &inodedep);
5879	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5880	if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
5881		panic("softdep_setup_allocindir_meta: Block already existed");
5882	FREE_LOCK(ip->i_ump);
5883}
5884
5885static void
5886indirdep_complete(indirdep)
5887	struct indirdep *indirdep;
5888{
5889	struct allocindir *aip;
5890
5891	LIST_REMOVE(indirdep, ir_next);
5892	indirdep->ir_state |= DEPCOMPLETE;
5893
5894	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
5895		LIST_REMOVE(aip, ai_next);
5896		free_newblk(&aip->ai_block);
5897	}
5898	/*
5899	 * If this indirdep is not attached to a buf it was simply waiting
5900	 * on completion to clear completehd.  free_indirdep() asserts
5901	 * that nothing is dangling.
5902	 */
5903	if ((indirdep->ir_state & ONWORKLIST) == 0)
5904		free_indirdep(indirdep);
5905}
5906
5907static struct indirdep *
5908indirdep_lookup(mp, ip, bp)
5909	struct mount *mp;
5910	struct inode *ip;
5911	struct buf *bp;
5912{
5913	struct indirdep *indirdep, *newindirdep;
5914	struct newblk *newblk;
5915	struct ufsmount *ump;
5916	struct worklist *wk;
5917	struct fs *fs;
5918	ufs2_daddr_t blkno;
5919
5920	ump = VFSTOUFS(mp);
5921	LOCK_OWNED(ump);
5922	indirdep = NULL;
5923	newindirdep = NULL;
5924	fs = ip->i_fs;
5925	for (;;) {
5926		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5927			if (wk->wk_type != D_INDIRDEP)
5928				continue;
5929			indirdep = WK_INDIRDEP(wk);
5930			break;
5931		}
5932		/* Found on the buffer worklist, no new structure to free. */
5933		if (indirdep != NULL && newindirdep == NULL)
5934			return (indirdep);
5935		if (indirdep != NULL && newindirdep != NULL)
5936			panic("indirdep_lookup: simultaneous create");
5937		/* None found on the buffer and a new structure is ready. */
5938		if (indirdep == NULL && newindirdep != NULL)
5939			break;
5940		/* None found and no new structure available. */
5941		FREE_LOCK(ump);
5942		newindirdep = malloc(sizeof(struct indirdep),
5943		    M_INDIRDEP, M_SOFTDEP_FLAGS);
5944		workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
5945		newindirdep->ir_state = ATTACHED;
5946		if (ip->i_ump->um_fstype == UFS1)
5947			newindirdep->ir_state |= UFS1FMT;
5948		TAILQ_INIT(&newindirdep->ir_trunc);
5949		newindirdep->ir_saveddata = NULL;
5950		LIST_INIT(&newindirdep->ir_deplisthd);
5951		LIST_INIT(&newindirdep->ir_donehd);
5952		LIST_INIT(&newindirdep->ir_writehd);
5953		LIST_INIT(&newindirdep->ir_completehd);
5954		if (bp->b_blkno == bp->b_lblkno) {
5955			ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
5956			    NULL, NULL);
5957			bp->b_blkno = blkno;
5958		}
5959		newindirdep->ir_freeblks = NULL;
5960		newindirdep->ir_savebp =
5961		    getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
5962		newindirdep->ir_bp = bp;
5963		BUF_KERNPROC(newindirdep->ir_savebp);
5964		bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
5965		ACQUIRE_LOCK(ump);
5966	}
5967	indirdep = newindirdep;
5968	WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
5969	/*
5970	 * If the block is not yet allocated we don't set DEPCOMPLETE so
5971	 * that we don't free dependencies until the pointers are valid.
5972	 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
5973	 * than using the hash.
5974	 */
5975	if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
5976		LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
5977	else
5978		indirdep->ir_state |= DEPCOMPLETE;
5979	return (indirdep);
5980}
5981
5982/*
5983 * Called to finish the allocation of the "aip" allocated
5984 * by one of the two routines above.
5985 */
5986static struct freefrag *
5987setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)
5988	struct buf *bp;		/* in-memory copy of the indirect block */
5989	struct inode *ip;	/* inode for file being extended */
5990	struct inodedep *inodedep; /* Inodedep for ip */
5991	struct allocindir *aip;	/* allocindir allocated by the above routines */
5992	ufs_lbn_t lbn;		/* Logical block number for this block. */
5993{
5994	struct fs *fs;
5995	struct indirdep *indirdep;
5996	struct allocindir *oldaip;
5997	struct freefrag *freefrag;
5998	struct mount *mp;
5999
6000	LOCK_OWNED(ip->i_ump);
6001	mp = UFSTOVFS(ip->i_ump);
6002	fs = ip->i_fs;
6003	if (bp->b_lblkno >= 0)
6004		panic("setup_allocindir_phase2: not indir blk");
6005	KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
6006	    ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
6007	indirdep = indirdep_lookup(mp, ip, bp);
6008	KASSERT(indirdep->ir_savebp != NULL,
6009	    ("setup_allocindir_phase2 NULL ir_savebp"));
6010	aip->ai_indirdep = indirdep;
6011	/*
6012	 * Check for an unwritten dependency for this indirect offset.  If
6013	 * there is, merge the old dependency into the new one.  This happens
6014	 * as a result of reallocblk only.
6015	 */
6016	freefrag = NULL;
6017	if (aip->ai_oldblkno != 0) {
6018		LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
6019			if (oldaip->ai_offset == aip->ai_offset) {
6020				freefrag = allocindir_merge(aip, oldaip);
6021				goto done;
6022			}
6023		}
6024		LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
6025			if (oldaip->ai_offset == aip->ai_offset) {
6026				freefrag = allocindir_merge(aip, oldaip);
6027				goto done;
6028			}
6029		}
6030	}
6031done:
6032	LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
6033	return (freefrag);
6034}
6035
6036/*
6037 * Merge two allocindirs which refer to the same block.  Move newblock
6038 * dependencies and setup the freefrags appropriately.
6039 */
6040static struct freefrag *
6041allocindir_merge(aip, oldaip)
6042	struct allocindir *aip;
6043	struct allocindir *oldaip;
6044{
6045	struct freefrag *freefrag;
6046	struct worklist *wk;
6047
6048	if (oldaip->ai_newblkno != aip->ai_oldblkno)
6049		panic("allocindir_merge: blkno");
6050	aip->ai_oldblkno = oldaip->ai_oldblkno;
6051	freefrag = aip->ai_freefrag;
6052	aip->ai_freefrag = oldaip->ai_freefrag;
6053	oldaip->ai_freefrag = NULL;
6054	KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
6055	/*
6056	 * If we are tracking a new directory-block allocation,
6057	 * move it from the old allocindir to the new allocindir.
6058	 */
6059	if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
6060		WORKLIST_REMOVE(wk);
6061		if (!LIST_EMPTY(&oldaip->ai_newdirblk))
6062			panic("allocindir_merge: extra newdirblk");
6063		WORKLIST_INSERT(&aip->ai_newdirblk, wk);
6064	}
6065	/*
6066	 * We can skip journaling for this freefrag and just complete
6067	 * any pending journal work for the allocindir that is being
6068	 * removed after the freefrag completes.
6069	 */
6070	if (freefrag->ff_jdep)
6071		cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
6072	LIST_REMOVE(oldaip, ai_next);
6073	freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
6074	    &freefrag->ff_list, &freefrag->ff_jwork);
6075	free_newblk(&oldaip->ai_block);
6076
6077	return (freefrag);
6078}
6079
6080static inline void
6081setup_freedirect(freeblks, ip, i, needj)
6082	struct freeblks *freeblks;
6083	struct inode *ip;
6084	int i;
6085	int needj;
6086{
6087	ufs2_daddr_t blkno;
6088	int frags;
6089
6090	blkno = DIP(ip, i_db[i]);
6091	if (blkno == 0)
6092		return;
6093	DIP_SET(ip, i_db[i], 0);
6094	frags = sblksize(ip->i_fs, ip->i_size, i);
6095	frags = numfrags(ip->i_fs, frags);
6096	newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj);
6097}
6098
6099static inline void
6100setup_freeext(freeblks, ip, i, needj)
6101	struct freeblks *freeblks;
6102	struct inode *ip;
6103	int i;
6104	int needj;
6105{
6106	ufs2_daddr_t blkno;
6107	int frags;
6108
6109	blkno = ip->i_din2->di_extb[i];
6110	if (blkno == 0)
6111		return;
6112	ip->i_din2->di_extb[i] = 0;
6113	frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i);
6114	frags = numfrags(ip->i_fs, frags);
6115	newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
6116}
6117
6118static inline void
6119setup_freeindir(freeblks, ip, i, lbn, needj)
6120	struct freeblks *freeblks;
6121	struct inode *ip;
6122	int i;
6123	ufs_lbn_t lbn;
6124	int needj;
6125{
6126	ufs2_daddr_t blkno;
6127
6128	blkno = DIP(ip, i_ib[i]);
6129	if (blkno == 0)
6130		return;
6131	DIP_SET(ip, i_ib[i], 0);
6132	newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag,
6133	    0, needj);
6134}
6135
6136static inline struct freeblks *
6137newfreeblks(mp, ip)
6138	struct mount *mp;
6139	struct inode *ip;
6140{
6141	struct freeblks *freeblks;
6142
6143	freeblks = malloc(sizeof(struct freeblks),
6144		M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
6145	workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
6146	LIST_INIT(&freeblks->fb_jblkdephd);
6147	LIST_INIT(&freeblks->fb_jwork);
6148	freeblks->fb_ref = 0;
6149	freeblks->fb_cgwait = 0;
6150	freeblks->fb_state = ATTACHED;
6151	freeblks->fb_uid = ip->i_uid;
6152	freeblks->fb_inum = ip->i_number;
6153	freeblks->fb_vtype = ITOV(ip)->v_type;
6154	freeblks->fb_modrev = DIP(ip, i_modrev);
6155	freeblks->fb_devvp = ip->i_devvp;
6156	freeblks->fb_chkcnt = 0;
6157	freeblks->fb_len = 0;
6158
6159	return (freeblks);
6160}
6161
6162static void
6163trunc_indirdep(indirdep, freeblks, bp, off)
6164	struct indirdep *indirdep;
6165	struct freeblks *freeblks;
6166	struct buf *bp;
6167	int off;
6168{
6169	struct allocindir *aip, *aipn;
6170
6171	/*
6172	 * The first set of allocindirs won't be in savedbp.
6173	 */
6174	LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
6175		if (aip->ai_offset > off)
6176			cancel_allocindir(aip, bp, freeblks, 1);
6177	LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
6178		if (aip->ai_offset > off)
6179			cancel_allocindir(aip, bp, freeblks, 1);
6180	/*
6181	 * These will exist in savedbp.
6182	 */
6183	LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
6184		if (aip->ai_offset > off)
6185			cancel_allocindir(aip, NULL, freeblks, 0);
6186	LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
6187		if (aip->ai_offset > off)
6188			cancel_allocindir(aip, NULL, freeblks, 0);
6189}
6190
6191/*
6192 * Follow the chain of indirects down to lastlbn creating a freework
6193 * structure for each.  This will be used to start indir_trunc() at
6194 * the right offset and create the journal records for the parrtial
6195 * truncation.  A second step will handle the truncated dependencies.
6196 */
6197static int
6198setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno)
6199	struct freeblks *freeblks;
6200	struct inode *ip;
6201	ufs_lbn_t lbn;
6202	ufs_lbn_t lastlbn;
6203	ufs2_daddr_t blkno;
6204{
6205	struct indirdep *indirdep;
6206	struct indirdep *indirn;
6207	struct freework *freework;
6208	struct newblk *newblk;
6209	struct mount *mp;
6210	struct buf *bp;
6211	uint8_t *start;
6212	uint8_t *end;
6213	ufs_lbn_t lbnadd;
6214	int level;
6215	int error;
6216	int off;
6217
6218
6219	freework = NULL;
6220	if (blkno == 0)
6221		return (0);
6222	mp = freeblks->fb_list.wk_mp;
6223	bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
6224	if ((bp->b_flags & B_CACHE) == 0) {
6225		bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno);
6226		bp->b_iocmd = BIO_READ;
6227		bp->b_flags &= ~B_INVAL;
6228		bp->b_ioflags &= ~BIO_ERROR;
6229		vfs_busy_pages(bp, 0);
6230		bp->b_iooffset = dbtob(bp->b_blkno);
6231		bstrategy(bp);
6232		curthread->td_ru.ru_inblock++;
6233		error = bufwait(bp);
6234		if (error) {
6235			brelse(bp);
6236			return (error);
6237		}
6238	}
6239	level = lbn_level(lbn);
6240	lbnadd = lbn_offset(ip->i_fs, level);
6241	/*
6242	 * Compute the offset of the last block we want to keep.  Store
6243	 * in the freework the first block we want to completely free.
6244	 */
6245	off = (lastlbn - -(lbn + level)) / lbnadd;
6246	if (off + 1 == NINDIR(ip->i_fs))
6247		goto nowork;
6248	freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1,
6249	    0);
6250	/*
6251	 * Link the freework into the indirdep.  This will prevent any new
6252	 * allocations from proceeding until we are finished with the
6253	 * truncate and the block is written.
6254	 */
6255	ACQUIRE_LOCK(ip->i_ump);
6256	indirdep = indirdep_lookup(mp, ip, bp);
6257	if (indirdep->ir_freeblks)
6258		panic("setup_trunc_indir: indirdep already truncated.");
6259	TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6260	freework->fw_indir = indirdep;
6261	/*
6262	 * Cancel any allocindirs that will not make it to disk.
6263	 * We have to do this for all copies of the indirdep that
6264	 * live on this newblk.
6265	 */
6266	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6267		newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk);
6268		LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6269			trunc_indirdep(indirn, freeblks, bp, off);
6270	} else
6271		trunc_indirdep(indirdep, freeblks, bp, off);
6272	FREE_LOCK(ip->i_ump);
6273	/*
6274	 * Creation is protected by the buf lock. The saveddata is only
6275	 * needed if a full truncation follows a partial truncation but it
6276	 * is difficult to allocate in that case so we fetch it anyway.
6277	 */
6278	if (indirdep->ir_saveddata == NULL)
6279		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6280		    M_SOFTDEP_FLAGS);
6281nowork:
6282	/* Fetch the blkno of the child and the zero start offset. */
6283	if (ip->i_ump->um_fstype == UFS1) {
6284		blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6285		start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6286	} else {
6287		blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6288		start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6289	}
6290	if (freework) {
6291		/* Zero the truncated pointers. */
6292		end = bp->b_data + bp->b_bcount;
6293		bzero(start, end - start);
6294		bdwrite(bp);
6295	} else
6296		bqrelse(bp);
6297	if (level == 0)
6298		return (0);
6299	lbn++; /* adjust level */
6300	lbn -= (off * lbnadd);
6301	return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6302}
6303
6304/*
6305 * Complete the partial truncation of an indirect block setup by
6306 * setup_trunc_indir().  This zeros the truncated pointers in the saved
6307 * copy and writes them to disk before the freeblks is allowed to complete.
6308 */
6309static void
6310complete_trunc_indir(freework)
6311	struct freework *freework;
6312{
6313	struct freework *fwn;
6314	struct indirdep *indirdep;
6315	struct ufsmount *ump;
6316	struct buf *bp;
6317	uintptr_t start;
6318	int count;
6319
6320	ump = VFSTOUFS(freework->fw_list.wk_mp);
6321	LOCK_OWNED(ump);
6322	indirdep = freework->fw_indir;
6323	for (;;) {
6324		bp = indirdep->ir_bp;
6325		/* See if the block was discarded. */
6326		if (bp == NULL)
6327			break;
6328		/* Inline part of getdirtybuf().  We dont want bremfree. */
6329		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6330			break;
6331		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6332		    LOCK_PTR(ump)) == 0)
6333			BUF_UNLOCK(bp);
6334		ACQUIRE_LOCK(ump);
6335	}
6336	freework->fw_state |= DEPCOMPLETE;
6337	TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6338	/*
6339	 * Zero the pointers in the saved copy.
6340	 */
6341	if (indirdep->ir_state & UFS1FMT)
6342		start = sizeof(ufs1_daddr_t);
6343	else
6344		start = sizeof(ufs2_daddr_t);
6345	start *= freework->fw_start;
6346	count = indirdep->ir_savebp->b_bcount - start;
6347	start += (uintptr_t)indirdep->ir_savebp->b_data;
6348	bzero((char *)start, count);
6349	/*
6350	 * We need to start the next truncation in the list if it has not
6351	 * been started yet.
6352	 */
6353	fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6354	if (fwn != NULL) {
6355		if (fwn->fw_freeblks == indirdep->ir_freeblks)
6356			TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6357		if ((fwn->fw_state & ONWORKLIST) == 0)
6358			freework_enqueue(fwn);
6359	}
6360	/*
6361	 * If bp is NULL the block was fully truncated, restore
6362	 * the saved block list otherwise free it if it is no
6363	 * longer needed.
6364	 */
6365	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6366		if (bp == NULL)
6367			bcopy(indirdep->ir_saveddata,
6368			    indirdep->ir_savebp->b_data,
6369			    indirdep->ir_savebp->b_bcount);
6370		free(indirdep->ir_saveddata, M_INDIRDEP);
6371		indirdep->ir_saveddata = NULL;
6372	}
6373	/*
6374	 * When bp is NULL there is a full truncation pending.  We
6375	 * must wait for this full truncation to be journaled before
6376	 * we can release this freework because the disk pointers will
6377	 * never be written as zero.
6378	 */
6379	if (bp == NULL)  {
6380		if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6381			handle_written_freework(freework);
6382		else
6383			WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6384			   &freework->fw_list);
6385	} else {
6386		/* Complete when the real copy is written. */
6387		WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6388		BUF_UNLOCK(bp);
6389	}
6390}
6391
6392/*
6393 * Calculate the number of blocks we are going to release where datablocks
6394 * is the current total and length is the new file size.
6395 */
6396static ufs2_daddr_t
6397blkcount(fs, datablocks, length)
6398	struct fs *fs;
6399	ufs2_daddr_t datablocks;
6400	off_t length;
6401{
6402	off_t totblks, numblks;
6403
6404	totblks = 0;
6405	numblks = howmany(length, fs->fs_bsize);
6406	if (numblks <= NDADDR) {
6407		totblks = howmany(length, fs->fs_fsize);
6408		goto out;
6409	}
6410        totblks = blkstofrags(fs, numblks);
6411	numblks -= NDADDR;
6412	/*
6413	 * Count all single, then double, then triple indirects required.
6414	 * Subtracting one indirects worth of blocks for each pass
6415	 * acknowledges one of each pointed to by the inode.
6416	 */
6417	for (;;) {
6418		totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6419		numblks -= NINDIR(fs);
6420		if (numblks <= 0)
6421			break;
6422		numblks = howmany(numblks, NINDIR(fs));
6423	}
6424out:
6425	totblks = fsbtodb(fs, totblks);
6426	/*
6427	 * Handle sparse files.  We can't reclaim more blocks than the inode
6428	 * references.  We will correct it later in handle_complete_freeblks()
6429	 * when we know the real count.
6430	 */
6431	if (totblks > datablocks)
6432		return (0);
6433	return (datablocks - totblks);
6434}
6435
6436/*
6437 * Handle freeblocks for journaled softupdate filesystems.
6438 *
6439 * Contrary to normal softupdates, we must preserve the block pointers in
6440 * indirects until their subordinates are free.  This is to avoid journaling
6441 * every block that is freed which may consume more space than the journal
6442 * itself.  The recovery program will see the free block journals at the
6443 * base of the truncated area and traverse them to reclaim space.  The
6444 * pointers in the inode may be cleared immediately after the journal
6445 * records are written because each direct and indirect pointer in the
6446 * inode is recorded in a journal.  This permits full truncation to proceed
6447 * asynchronously.  The write order is journal -> inode -> cgs -> indirects.
6448 *
6449 * The algorithm is as follows:
6450 * 1) Traverse the in-memory state and create journal entries to release
6451 *    the relevant blocks and full indirect trees.
6452 * 2) Traverse the indirect block chain adding partial truncation freework
6453 *    records to indirects in the path to lastlbn.  The freework will
6454 *    prevent new allocation dependencies from being satisfied in this
6455 *    indirect until the truncation completes.
6456 * 3) Read and lock the inode block, performing an update with the new size
6457 *    and pointers.  This prevents truncated data from becoming valid on
6458 *    disk through step 4.
6459 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6460 *    eliminate journal work for those records that do not require it.
6461 * 5) Schedule the journal records to be written followed by the inode block.
6462 * 6) Allocate any necessary frags for the end of file.
6463 * 7) Zero any partially truncated blocks.
6464 *
6465 * From this truncation proceeds asynchronously using the freework and
6466 * indir_trunc machinery.  The file will not be extended again into a
6467 * partially truncated indirect block until all work is completed but
6468 * the normal dependency mechanism ensures that it is rolled back/forward
6469 * as appropriate.  Further truncation may occur without delay and is
6470 * serialized in indir_trunc().
6471 */
6472void
6473softdep_journal_freeblocks(ip, cred, length, flags)
6474	struct inode *ip;	/* The inode whose length is to be reduced */
6475	struct ucred *cred;
6476	off_t length;		/* The new length for the file */
6477	int flags;		/* IO_EXT and/or IO_NORMAL */
6478{
6479	struct freeblks *freeblks, *fbn;
6480	struct worklist *wk, *wkn;
6481	struct inodedep *inodedep;
6482	struct jblkdep *jblkdep;
6483	struct allocdirect *adp, *adpn;
6484	struct ufsmount *ump;
6485	struct fs *fs;
6486	struct buf *bp;
6487	struct vnode *vp;
6488	struct mount *mp;
6489	ufs2_daddr_t extblocks, datablocks;
6490	ufs_lbn_t tmpval, lbn, lastlbn;
6491	int frags, lastoff, iboff, allocblock, needj, error, i;
6492
6493	fs = ip->i_fs;
6494	ump = ip->i_ump;
6495	mp = UFSTOVFS(ump);
6496	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6497	    ("softdep_journal_freeblocks called on non-softdep filesystem"));
6498	vp = ITOV(ip);
6499	needj = 1;
6500	iboff = -1;
6501	allocblock = 0;
6502	extblocks = 0;
6503	datablocks = 0;
6504	frags = 0;
6505	freeblks = newfreeblks(mp, ip);
6506	ACQUIRE_LOCK(ump);
6507	/*
6508	 * If we're truncating a removed file that will never be written
6509	 * we don't need to journal the block frees.  The canceled journals
6510	 * for the allocations will suffice.
6511	 */
6512	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6513	if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6514	    length == 0)
6515		needj = 0;
6516	CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6517	    ip->i_number, length, needj);
6518	FREE_LOCK(ump);
6519	/*
6520	 * Calculate the lbn that we are truncating to.  This results in -1
6521	 * if we're truncating the 0 bytes.  So it is the last lbn we want
6522	 * to keep, not the first lbn we want to truncate.
6523	 */
6524	lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6525	lastoff = blkoff(fs, length);
6526	/*
6527	 * Compute frags we are keeping in lastlbn.  0 means all.
6528	 */
6529	if (lastlbn >= 0 && lastlbn < NDADDR) {
6530		frags = fragroundup(fs, lastoff);
6531		/* adp offset of last valid allocdirect. */
6532		iboff = lastlbn;
6533	} else if (lastlbn > 0)
6534		iboff = NDADDR;
6535	if (fs->fs_magic == FS_UFS2_MAGIC)
6536		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6537	/*
6538	 * Handle normal data blocks and indirects.  This section saves
6539	 * values used after the inode update to complete frag and indirect
6540	 * truncation.
6541	 */
6542	if ((flags & IO_NORMAL) != 0) {
6543		/*
6544		 * Handle truncation of whole direct and indirect blocks.
6545		 */
6546		for (i = iboff + 1; i < NDADDR; i++)
6547			setup_freedirect(freeblks, ip, i, needj);
6548		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6549		    i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6550			/* Release a whole indirect tree. */
6551			if (lbn > lastlbn) {
6552				setup_freeindir(freeblks, ip, i, -lbn -i,
6553				    needj);
6554				continue;
6555			}
6556			iboff = i + NDADDR;
6557			/*
6558			 * Traverse partially truncated indirect tree.
6559			 */
6560			if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6561				setup_trunc_indir(freeblks, ip, -lbn - i,
6562				    lastlbn, DIP(ip, i_ib[i]));
6563		}
6564		/*
6565		 * Handle partial truncation to a frag boundary.
6566		 */
6567		if (frags) {
6568			ufs2_daddr_t blkno;
6569			long oldfrags;
6570
6571			oldfrags = blksize(fs, ip, lastlbn);
6572			blkno = DIP(ip, i_db[lastlbn]);
6573			if (blkno && oldfrags != frags) {
6574				oldfrags -= frags;
6575				oldfrags = numfrags(ip->i_fs, oldfrags);
6576				blkno += numfrags(ip->i_fs, frags);
6577				newfreework(ump, freeblks, NULL, lastlbn,
6578				    blkno, oldfrags, 0, needj);
6579				if (needj)
6580					adjust_newfreework(freeblks,
6581					    numfrags(ip->i_fs, frags));
6582			} else if (blkno == 0)
6583				allocblock = 1;
6584		}
6585		/*
6586		 * Add a journal record for partial truncate if we are
6587		 * handling indirect blocks.  Non-indirects need no extra
6588		 * journaling.
6589		 */
6590		if (length != 0 && lastlbn >= NDADDR) {
6591			ip->i_flag |= IN_TRUNCATED;
6592			newjtrunc(freeblks, length, 0);
6593		}
6594		ip->i_size = length;
6595		DIP_SET(ip, i_size, ip->i_size);
6596		datablocks = DIP(ip, i_blocks) - extblocks;
6597		if (length != 0)
6598			datablocks = blkcount(ip->i_fs, datablocks, length);
6599		freeblks->fb_len = length;
6600	}
6601	if ((flags & IO_EXT) != 0) {
6602		for (i = 0; i < NXADDR; i++)
6603			setup_freeext(freeblks, ip, i, needj);
6604		ip->i_din2->di_extsize = 0;
6605		datablocks += extblocks;
6606	}
6607#ifdef QUOTA
6608	/* Reference the quotas in case the block count is wrong in the end. */
6609	quotaref(vp, freeblks->fb_quota);
6610	(void) chkdq(ip, -datablocks, NOCRED, 0);
6611#endif
6612	freeblks->fb_chkcnt = -datablocks;
6613	UFS_LOCK(ump);
6614	fs->fs_pendingblocks += datablocks;
6615	UFS_UNLOCK(ump);
6616	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6617	/*
6618	 * Handle truncation of incomplete alloc direct dependencies.  We
6619	 * hold the inode block locked to prevent incomplete dependencies
6620	 * from reaching the disk while we are eliminating those that
6621	 * have been truncated.  This is a partially inlined ffs_update().
6622	 */
6623	ufs_itimes(vp);
6624	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6625	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6626	    (int)fs->fs_bsize, cred, &bp);
6627	if (error) {
6628		brelse(bp);
6629		softdep_error("softdep_journal_freeblocks", error);
6630		return;
6631	}
6632	if (bp->b_bufsize == fs->fs_bsize)
6633		bp->b_flags |= B_CLUSTEROK;
6634	softdep_update_inodeblock(ip, bp, 0);
6635	if (ump->um_fstype == UFS1)
6636		*((struct ufs1_dinode *)bp->b_data +
6637		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6638	else
6639		*((struct ufs2_dinode *)bp->b_data +
6640		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6641	ACQUIRE_LOCK(ump);
6642	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6643	if ((inodedep->id_state & IOSTARTED) != 0)
6644		panic("softdep_setup_freeblocks: inode busy");
6645	/*
6646	 * Add the freeblks structure to the list of operations that
6647	 * must await the zero'ed inode being written to disk. If we
6648	 * still have a bitmap dependency (needj), then the inode
6649	 * has never been written to disk, so we can process the
6650	 * freeblks below once we have deleted the dependencies.
6651	 */
6652	if (needj)
6653		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6654	else
6655		freeblks->fb_state |= COMPLETE;
6656	if ((flags & IO_NORMAL) != 0) {
6657		TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6658			if (adp->ad_offset > iboff)
6659				cancel_allocdirect(&inodedep->id_inoupdt, adp,
6660				    freeblks);
6661			/*
6662			 * Truncate the allocdirect.  We could eliminate
6663			 * or modify journal records as well.
6664			 */
6665			else if (adp->ad_offset == iboff && frags)
6666				adp->ad_newsize = frags;
6667		}
6668	}
6669	if ((flags & IO_EXT) != 0)
6670		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
6671			cancel_allocdirect(&inodedep->id_extupdt, adp,
6672			    freeblks);
6673	/*
6674	 * Scan the bufwait list for newblock dependencies that will never
6675	 * make it to disk.
6676	 */
6677	LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6678		if (wk->wk_type != D_ALLOCDIRECT)
6679			continue;
6680		adp = WK_ALLOCDIRECT(wk);
6681		if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6682		    ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6683			cancel_jfreeblk(freeblks, adp->ad_newblkno);
6684			cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6685			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6686		}
6687	}
6688	/*
6689	 * Add journal work.
6690	 */
6691	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6692		add_to_journal(&jblkdep->jb_list);
6693	FREE_LOCK(ump);
6694	bdwrite(bp);
6695	/*
6696	 * Truncate dependency structures beyond length.
6697	 */
6698	trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6699	/*
6700	 * This is only set when we need to allocate a fragment because
6701	 * none existed at the end of a frag-sized file.  It handles only
6702	 * allocating a new, zero filled block.
6703	 */
6704	if (allocblock) {
6705		ip->i_size = length - lastoff;
6706		DIP_SET(ip, i_size, ip->i_size);
6707		error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6708		if (error != 0) {
6709			softdep_error("softdep_journal_freeblks", error);
6710			return;
6711		}
6712		ip->i_size = length;
6713		DIP_SET(ip, i_size, length);
6714		ip->i_flag |= IN_CHANGE | IN_UPDATE;
6715		allocbuf(bp, frags);
6716		ffs_update(vp, 0);
6717		bawrite(bp);
6718	} else if (lastoff != 0 && vp->v_type != VDIR) {
6719		int size;
6720
6721		/*
6722		 * Zero the end of a truncated frag or block.
6723		 */
6724		size = sblksize(fs, length, lastlbn);
6725		error = bread(vp, lastlbn, size, cred, &bp);
6726		if (error) {
6727			softdep_error("softdep_journal_freeblks", error);
6728			return;
6729		}
6730		bzero((char *)bp->b_data + lastoff, size - lastoff);
6731		bawrite(bp);
6732
6733	}
6734	ACQUIRE_LOCK(ump);
6735	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6736	TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
6737	freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
6738	/*
6739	 * We zero earlier truncations so they don't erroneously
6740	 * update i_blocks.
6741	 */
6742	if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
6743		TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
6744			fbn->fb_len = 0;
6745	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
6746	    LIST_EMPTY(&freeblks->fb_jblkdephd))
6747		freeblks->fb_state |= INPROGRESS;
6748	else
6749		freeblks = NULL;
6750	FREE_LOCK(ump);
6751	if (freeblks)
6752		handle_workitem_freeblocks(freeblks, 0);
6753	trunc_pages(ip, length, extblocks, flags);
6754
6755}
6756
6757/*
6758 * Flush a JOP_SYNC to the journal.
6759 */
6760void
6761softdep_journal_fsync(ip)
6762	struct inode *ip;
6763{
6764	struct jfsync *jfsync;
6765
6766	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
6767	    ("softdep_journal_fsync called on non-softdep filesystem"));
6768	if ((ip->i_flag & IN_TRUNCATED) == 0)
6769		return;
6770	ip->i_flag &= ~IN_TRUNCATED;
6771	jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
6772	workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump));
6773	jfsync->jfs_size = ip->i_size;
6774	jfsync->jfs_ino = ip->i_number;
6775	ACQUIRE_LOCK(ip->i_ump);
6776	add_to_journal(&jfsync->jfs_list);
6777	jwait(&jfsync->jfs_list, MNT_WAIT);
6778	FREE_LOCK(ip->i_ump);
6779}
6780
6781/*
6782 * Block de-allocation dependencies.
6783 *
6784 * When blocks are de-allocated, the on-disk pointers must be nullified before
6785 * the blocks are made available for use by other files.  (The true
6786 * requirement is that old pointers must be nullified before new on-disk
6787 * pointers are set.  We chose this slightly more stringent requirement to
6788 * reduce complexity.) Our implementation handles this dependency by updating
6789 * the inode (or indirect block) appropriately but delaying the actual block
6790 * de-allocation (i.e., freemap and free space count manipulation) until
6791 * after the updated versions reach stable storage.  After the disk is
6792 * updated, the blocks can be safely de-allocated whenever it is convenient.
6793 * This implementation handles only the common case of reducing a file's
6794 * length to zero. Other cases are handled by the conventional synchronous
6795 * write approach.
6796 *
6797 * The ffs implementation with which we worked double-checks
6798 * the state of the block pointers and file size as it reduces
6799 * a file's length.  Some of this code is replicated here in our
6800 * soft updates implementation.  The freeblks->fb_chkcnt field is
6801 * used to transfer a part of this information to the procedure
6802 * that eventually de-allocates the blocks.
6803 *
6804 * This routine should be called from the routine that shortens
6805 * a file's length, before the inode's size or block pointers
6806 * are modified. It will save the block pointer information for
6807 * later release and zero the inode so that the calling routine
6808 * can release it.
6809 */
6810void
6811softdep_setup_freeblocks(ip, length, flags)
6812	struct inode *ip;	/* The inode whose length is to be reduced */
6813	off_t length;		/* The new length for the file */
6814	int flags;		/* IO_EXT and/or IO_NORMAL */
6815{
6816	struct ufs1_dinode *dp1;
6817	struct ufs2_dinode *dp2;
6818	struct freeblks *freeblks;
6819	struct inodedep *inodedep;
6820	struct allocdirect *adp;
6821	struct ufsmount *ump;
6822	struct buf *bp;
6823	struct fs *fs;
6824	ufs2_daddr_t extblocks, datablocks;
6825	struct mount *mp;
6826	int i, delay, error;
6827	ufs_lbn_t tmpval;
6828	ufs_lbn_t lbn;
6829
6830	ump = ip->i_ump;
6831	mp = UFSTOVFS(ump);
6832	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6833	    ("softdep_setup_freeblocks called on non-softdep filesystem"));
6834	CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
6835	    ip->i_number, length);
6836	KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length"));
6837	fs = ip->i_fs;
6838	freeblks = newfreeblks(mp, ip);
6839	extblocks = 0;
6840	datablocks = 0;
6841	if (fs->fs_magic == FS_UFS2_MAGIC)
6842		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6843	if ((flags & IO_NORMAL) != 0) {
6844		for (i = 0; i < NDADDR; i++)
6845			setup_freedirect(freeblks, ip, i, 0);
6846		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6847		    i++, lbn += tmpval, tmpval *= NINDIR(fs))
6848			setup_freeindir(freeblks, ip, i, -lbn -i, 0);
6849		ip->i_size = 0;
6850		DIP_SET(ip, i_size, 0);
6851		datablocks = DIP(ip, i_blocks) - extblocks;
6852	}
6853	if ((flags & IO_EXT) != 0) {
6854		for (i = 0; i < NXADDR; i++)
6855			setup_freeext(freeblks, ip, i, 0);
6856		ip->i_din2->di_extsize = 0;
6857		datablocks += extblocks;
6858	}
6859#ifdef QUOTA
6860	/* Reference the quotas in case the block count is wrong in the end. */
6861	quotaref(ITOV(ip), freeblks->fb_quota);
6862	(void) chkdq(ip, -datablocks, NOCRED, 0);
6863#endif
6864	freeblks->fb_chkcnt = -datablocks;
6865	UFS_LOCK(ump);
6866	fs->fs_pendingblocks += datablocks;
6867	UFS_UNLOCK(ump);
6868	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6869	/*
6870	 * Push the zero'ed inode to to its disk buffer so that we are free
6871	 * to delete its dependencies below. Once the dependencies are gone
6872	 * the buffer can be safely released.
6873	 */
6874	if ((error = bread(ip->i_devvp,
6875	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6876	    (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
6877		brelse(bp);
6878		softdep_error("softdep_setup_freeblocks", error);
6879	}
6880	if (ump->um_fstype == UFS1) {
6881		dp1 = ((struct ufs1_dinode *)bp->b_data +
6882		    ino_to_fsbo(fs, ip->i_number));
6883		ip->i_din1->di_freelink = dp1->di_freelink;
6884		*dp1 = *ip->i_din1;
6885	} else {
6886		dp2 = ((struct ufs2_dinode *)bp->b_data +
6887		    ino_to_fsbo(fs, ip->i_number));
6888		ip->i_din2->di_freelink = dp2->di_freelink;
6889		*dp2 = *ip->i_din2;
6890	}
6891	/*
6892	 * Find and eliminate any inode dependencies.
6893	 */
6894	ACQUIRE_LOCK(ump);
6895	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6896	if ((inodedep->id_state & IOSTARTED) != 0)
6897		panic("softdep_setup_freeblocks: inode busy");
6898	/*
6899	 * Add the freeblks structure to the list of operations that
6900	 * must await the zero'ed inode being written to disk. If we
6901	 * still have a bitmap dependency (delay == 0), then the inode
6902	 * has never been written to disk, so we can process the
6903	 * freeblks below once we have deleted the dependencies.
6904	 */
6905	delay = (inodedep->id_state & DEPCOMPLETE);
6906	if (delay)
6907		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6908	else
6909		freeblks->fb_state |= COMPLETE;
6910	/*
6911	 * Because the file length has been truncated to zero, any
6912	 * pending block allocation dependency structures associated
6913	 * with this inode are obsolete and can simply be de-allocated.
6914	 * We must first merge the two dependency lists to get rid of
6915	 * any duplicate freefrag structures, then purge the merged list.
6916	 * If we still have a bitmap dependency, then the inode has never
6917	 * been written to disk, so we can free any fragments without delay.
6918	 */
6919	if (flags & IO_NORMAL) {
6920		merge_inode_lists(&inodedep->id_newinoupdt,
6921		    &inodedep->id_inoupdt);
6922		while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
6923			cancel_allocdirect(&inodedep->id_inoupdt, adp,
6924			    freeblks);
6925	}
6926	if (flags & IO_EXT) {
6927		merge_inode_lists(&inodedep->id_newextupdt,
6928		    &inodedep->id_extupdt);
6929		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
6930			cancel_allocdirect(&inodedep->id_extupdt, adp,
6931			    freeblks);
6932	}
6933	FREE_LOCK(ump);
6934	bdwrite(bp);
6935	trunc_dependencies(ip, freeblks, -1, 0, flags);
6936	ACQUIRE_LOCK(ump);
6937	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
6938		(void) free_inodedep(inodedep);
6939	freeblks->fb_state |= DEPCOMPLETE;
6940	/*
6941	 * If the inode with zeroed block pointers is now on disk
6942	 * we can start freeing blocks.
6943	 */
6944	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
6945		freeblks->fb_state |= INPROGRESS;
6946	else
6947		freeblks = NULL;
6948	FREE_LOCK(ump);
6949	if (freeblks)
6950		handle_workitem_freeblocks(freeblks, 0);
6951	trunc_pages(ip, length, extblocks, flags);
6952}
6953
6954/*
6955 * Eliminate pages from the page cache that back parts of this inode and
6956 * adjust the vnode pager's idea of our size.  This prevents stale data
6957 * from hanging around in the page cache.
6958 */
6959static void
6960trunc_pages(ip, length, extblocks, flags)
6961	struct inode *ip;
6962	off_t length;
6963	ufs2_daddr_t extblocks;
6964	int flags;
6965{
6966	struct vnode *vp;
6967	struct fs *fs;
6968	ufs_lbn_t lbn;
6969	off_t end, extend;
6970
6971	vp = ITOV(ip);
6972	fs = ip->i_fs;
6973	extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
6974	if ((flags & IO_EXT) != 0)
6975		vn_pages_remove(vp, extend, 0);
6976	if ((flags & IO_NORMAL) == 0)
6977		return;
6978	BO_LOCK(&vp->v_bufobj);
6979	drain_output(vp);
6980	BO_UNLOCK(&vp->v_bufobj);
6981	/*
6982	 * The vnode pager eliminates file pages we eliminate indirects
6983	 * below.
6984	 */
6985	vnode_pager_setsize(vp, length);
6986	/*
6987	 * Calculate the end based on the last indirect we want to keep.  If
6988	 * the block extends into indirects we can just use the negative of
6989	 * its lbn.  Doubles and triples exist at lower numbers so we must
6990	 * be careful not to remove those, if they exist.  double and triple
6991	 * indirect lbns do not overlap with others so it is not important
6992	 * to verify how many levels are required.
6993	 */
6994	lbn = lblkno(fs, length);
6995	if (lbn >= NDADDR) {
6996		/* Calculate the virtual lbn of the triple indirect. */
6997		lbn = -lbn - (NIADDR - 1);
6998		end = OFF_TO_IDX(lblktosize(fs, lbn));
6999	} else
7000		end = extend;
7001	vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
7002}
7003
7004/*
7005 * See if the buf bp is in the range eliminated by truncation.
7006 */
7007static int
7008trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags)
7009	struct buf *bp;
7010	int *blkoffp;
7011	ufs_lbn_t lastlbn;
7012	int lastoff;
7013	int flags;
7014{
7015	ufs_lbn_t lbn;
7016
7017	*blkoffp = 0;
7018	/* Only match ext/normal blocks as appropriate. */
7019	if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
7020	    ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
7021		return (0);
7022	/* ALTDATA is always a full truncation. */
7023	if ((bp->b_xflags & BX_ALTDATA) != 0)
7024		return (1);
7025	/* -1 is full truncation. */
7026	if (lastlbn == -1)
7027		return (1);
7028	/*
7029	 * If this is a partial truncate we only want those
7030	 * blocks and indirect blocks that cover the range
7031	 * we're after.
7032	 */
7033	lbn = bp->b_lblkno;
7034	if (lbn < 0)
7035		lbn = -(lbn + lbn_level(lbn));
7036	if (lbn < lastlbn)
7037		return (0);
7038	/* Here we only truncate lblkno if it's partial. */
7039	if (lbn == lastlbn) {
7040		if (lastoff == 0)
7041			return (0);
7042		*blkoffp = lastoff;
7043	}
7044	return (1);
7045}
7046
7047/*
7048 * Eliminate any dependencies that exist in memory beyond lblkno:off
7049 */
7050static void
7051trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
7052	struct inode *ip;
7053	struct freeblks *freeblks;
7054	ufs_lbn_t lastlbn;
7055	int lastoff;
7056	int flags;
7057{
7058	struct bufobj *bo;
7059	struct vnode *vp;
7060	struct buf *bp;
7061	int blkoff;
7062
7063	/*
7064	 * We must wait for any I/O in progress to finish so that
7065	 * all potential buffers on the dirty list will be visible.
7066	 * Once they are all there, walk the list and get rid of
7067	 * any dependencies.
7068	 */
7069	vp = ITOV(ip);
7070	bo = &vp->v_bufobj;
7071	BO_LOCK(bo);
7072	drain_output(vp);
7073	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
7074		bp->b_vflags &= ~BV_SCANNED;
7075restart:
7076	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
7077		if (bp->b_vflags & BV_SCANNED)
7078			continue;
7079		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7080			bp->b_vflags |= BV_SCANNED;
7081			continue;
7082		}
7083		KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer"));
7084		if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL)
7085			goto restart;
7086		BO_UNLOCK(bo);
7087		if (deallocate_dependencies(bp, freeblks, blkoff))
7088			bqrelse(bp);
7089		else
7090			brelse(bp);
7091		BO_LOCK(bo);
7092		goto restart;
7093	}
7094	/*
7095	 * Now do the work of vtruncbuf while also matching indirect blocks.
7096	 */
7097	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
7098		bp->b_vflags &= ~BV_SCANNED;
7099cleanrestart:
7100	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
7101		if (bp->b_vflags & BV_SCANNED)
7102			continue;
7103		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7104			bp->b_vflags |= BV_SCANNED;
7105			continue;
7106		}
7107		if (BUF_LOCK(bp,
7108		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
7109		    BO_LOCKPTR(bo)) == ENOLCK) {
7110			BO_LOCK(bo);
7111			goto cleanrestart;
7112		}
7113		bp->b_vflags |= BV_SCANNED;
7114		bremfree(bp);
7115		if (blkoff != 0) {
7116			allocbuf(bp, blkoff);
7117			bqrelse(bp);
7118		} else {
7119			bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
7120			brelse(bp);
7121		}
7122		BO_LOCK(bo);
7123		goto cleanrestart;
7124	}
7125	drain_output(vp);
7126	BO_UNLOCK(bo);
7127}
7128
7129static int
7130cancel_pagedep(pagedep, freeblks, blkoff)
7131	struct pagedep *pagedep;
7132	struct freeblks *freeblks;
7133	int blkoff;
7134{
7135	struct jremref *jremref;
7136	struct jmvref *jmvref;
7137	struct dirrem *dirrem, *tmp;
7138	int i;
7139
7140	/*
7141	 * Copy any directory remove dependencies to the list
7142	 * to be processed after the freeblks proceeds.  If
7143	 * directory entry never made it to disk they
7144	 * can be dumped directly onto the work list.
7145	 */
7146	LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
7147		/* Skip this directory removal if it is intended to remain. */
7148		if (dirrem->dm_offset < blkoff)
7149			continue;
7150		/*
7151		 * If there are any dirrems we wait for the journal write
7152		 * to complete and then restart the buf scan as the lock
7153		 * has been dropped.
7154		 */
7155		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
7156			jwait(&jremref->jr_list, MNT_WAIT);
7157			return (ERESTART);
7158		}
7159		LIST_REMOVE(dirrem, dm_next);
7160		dirrem->dm_dirinum = pagedep->pd_ino;
7161		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
7162	}
7163	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
7164		jwait(&jmvref->jm_list, MNT_WAIT);
7165		return (ERESTART);
7166	}
7167	/*
7168	 * When we're partially truncating a pagedep we just want to flush
7169	 * journal entries and return.  There can not be any adds in the
7170	 * truncated portion of the directory and newblk must remain if
7171	 * part of the block remains.
7172	 */
7173	if (blkoff != 0) {
7174		struct diradd *dap;
7175
7176		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
7177			if (dap->da_offset > blkoff)
7178				panic("cancel_pagedep: diradd %p off %d > %d",
7179				    dap, dap->da_offset, blkoff);
7180		for (i = 0; i < DAHASHSZ; i++)
7181			LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
7182				if (dap->da_offset > blkoff)
7183					panic("cancel_pagedep: diradd %p off %d > %d",
7184					    dap, dap->da_offset, blkoff);
7185		return (0);
7186	}
7187	/*
7188	 * There should be no directory add dependencies present
7189	 * as the directory could not be truncated until all
7190	 * children were removed.
7191	 */
7192	KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
7193	    ("deallocate_dependencies: pendinghd != NULL"));
7194	for (i = 0; i < DAHASHSZ; i++)
7195		KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
7196		    ("deallocate_dependencies: diraddhd != NULL"));
7197	if ((pagedep->pd_state & NEWBLOCK) != 0)
7198		free_newdirblk(pagedep->pd_newdirblk);
7199	if (free_pagedep(pagedep) == 0)
7200		panic("Failed to free pagedep %p", pagedep);
7201	return (0);
7202}
7203
7204/*
7205 * Reclaim any dependency structures from a buffer that is about to
7206 * be reallocated to a new vnode. The buffer must be locked, thus,
7207 * no I/O completion operations can occur while we are manipulating
7208 * its associated dependencies. The mutex is held so that other I/O's
7209 * associated with related dependencies do not occur.
7210 */
7211static int
7212deallocate_dependencies(bp, freeblks, off)
7213	struct buf *bp;
7214	struct freeblks *freeblks;
7215	int off;
7216{
7217	struct indirdep *indirdep;
7218	struct pagedep *pagedep;
7219	struct worklist *wk, *wkn;
7220	struct ufsmount *ump;
7221
7222	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
7223		goto done;
7224	ump = VFSTOUFS(wk->wk_mp);
7225	ACQUIRE_LOCK(ump);
7226	LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7227		switch (wk->wk_type) {
7228		case D_INDIRDEP:
7229			indirdep = WK_INDIRDEP(wk);
7230			if (bp->b_lblkno >= 0 ||
7231			    bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7232				panic("deallocate_dependencies: not indir");
7233			cancel_indirdep(indirdep, bp, freeblks);
7234			continue;
7235
7236		case D_PAGEDEP:
7237			pagedep = WK_PAGEDEP(wk);
7238			if (cancel_pagedep(pagedep, freeblks, off)) {
7239				FREE_LOCK(ump);
7240				return (ERESTART);
7241			}
7242			continue;
7243
7244		case D_ALLOCINDIR:
7245			/*
7246			 * Simply remove the allocindir, we'll find it via
7247			 * the indirdep where we can clear pointers if
7248			 * needed.
7249			 */
7250			WORKLIST_REMOVE(wk);
7251			continue;
7252
7253		case D_FREEWORK:
7254			/*
7255			 * A truncation is waiting for the zero'd pointers
7256			 * to be written.  It can be freed when the freeblks
7257			 * is journaled.
7258			 */
7259			WORKLIST_REMOVE(wk);
7260			wk->wk_state |= ONDEPLIST;
7261			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7262			break;
7263
7264		case D_ALLOCDIRECT:
7265			if (off != 0)
7266				continue;
7267			/* FALLTHROUGH */
7268		default:
7269			panic("deallocate_dependencies: Unexpected type %s",
7270			    TYPENAME(wk->wk_type));
7271			/* NOTREACHED */
7272		}
7273	}
7274	FREE_LOCK(ump);
7275done:
7276	/*
7277	 * Don't throw away this buf, we were partially truncating and
7278	 * some deps may always remain.
7279	 */
7280	if (off) {
7281		allocbuf(bp, off);
7282		bp->b_vflags |= BV_SCANNED;
7283		return (EBUSY);
7284	}
7285	bp->b_flags |= B_INVAL | B_NOCACHE;
7286
7287	return (0);
7288}
7289
7290/*
7291 * An allocdirect is being canceled due to a truncate.  We must make sure
7292 * the journal entry is released in concert with the blkfree that releases
7293 * the storage.  Completed journal entries must not be released until the
7294 * space is no longer pointed to by the inode or in the bitmap.
7295 */
7296static void
7297cancel_allocdirect(adphead, adp, freeblks)
7298	struct allocdirectlst *adphead;
7299	struct allocdirect *adp;
7300	struct freeblks *freeblks;
7301{
7302	struct freework *freework;
7303	struct newblk *newblk;
7304	struct worklist *wk;
7305
7306	TAILQ_REMOVE(adphead, adp, ad_next);
7307	newblk = (struct newblk *)adp;
7308	freework = NULL;
7309	/*
7310	 * Find the correct freework structure.
7311	 */
7312	LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7313		if (wk->wk_type != D_FREEWORK)
7314			continue;
7315		freework = WK_FREEWORK(wk);
7316		if (freework->fw_blkno == newblk->nb_newblkno)
7317			break;
7318	}
7319	if (freework == NULL)
7320		panic("cancel_allocdirect: Freework not found");
7321	/*
7322	 * If a newblk exists at all we still have the journal entry that
7323	 * initiated the allocation so we do not need to journal the free.
7324	 */
7325	cancel_jfreeblk(freeblks, freework->fw_blkno);
7326	/*
7327	 * If the journal hasn't been written the jnewblk must be passed
7328	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
7329	 * this by linking the journal dependency into the freework to be
7330	 * freed when freework_freeblock() is called.  If the journal has
7331	 * been written we can simply reclaim the journal space when the
7332	 * freeblks work is complete.
7333	 */
7334	freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7335	    &freeblks->fb_jwork);
7336	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7337}
7338
7339
7340/*
7341 * Cancel a new block allocation.  May be an indirect or direct block.  We
7342 * remove it from various lists and return any journal record that needs to
7343 * be resolved by the caller.
7344 *
7345 * A special consideration is made for indirects which were never pointed
7346 * at on disk and will never be found once this block is released.
7347 */
7348static struct jnewblk *
7349cancel_newblk(newblk, wk, wkhd)
7350	struct newblk *newblk;
7351	struct worklist *wk;
7352	struct workhead *wkhd;
7353{
7354	struct jnewblk *jnewblk;
7355
7356	CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7357
7358	newblk->nb_state |= GOINGAWAY;
7359	/*
7360	 * Previously we traversed the completedhd on each indirdep
7361	 * attached to this newblk to cancel them and gather journal
7362	 * work.  Since we need only the oldest journal segment and
7363	 * the lowest point on the tree will always have the oldest
7364	 * journal segment we are free to release the segments
7365	 * of any subordinates and may leave the indirdep list to
7366	 * indirdep_complete() when this newblk is freed.
7367	 */
7368	if (newblk->nb_state & ONDEPLIST) {
7369		newblk->nb_state &= ~ONDEPLIST;
7370		LIST_REMOVE(newblk, nb_deps);
7371	}
7372	if (newblk->nb_state & ONWORKLIST)
7373		WORKLIST_REMOVE(&newblk->nb_list);
7374	/*
7375	 * If the journal entry hasn't been written we save a pointer to
7376	 * the dependency that frees it until it is written or the
7377	 * superseding operation completes.
7378	 */
7379	jnewblk = newblk->nb_jnewblk;
7380	if (jnewblk != NULL && wk != NULL) {
7381		newblk->nb_jnewblk = NULL;
7382		jnewblk->jn_dep = wk;
7383	}
7384	if (!LIST_EMPTY(&newblk->nb_jwork))
7385		jwork_move(wkhd, &newblk->nb_jwork);
7386	/*
7387	 * When truncating we must free the newdirblk early to remove
7388	 * the pagedep from the hash before returning.
7389	 */
7390	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7391		free_newdirblk(WK_NEWDIRBLK(wk));
7392	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7393		panic("cancel_newblk: extra newdirblk");
7394
7395	return (jnewblk);
7396}
7397
7398/*
7399 * Schedule the freefrag associated with a newblk to be released once
7400 * the pointers are written and the previous block is no longer needed.
7401 */
7402static void
7403newblk_freefrag(newblk)
7404	struct newblk *newblk;
7405{
7406	struct freefrag *freefrag;
7407
7408	if (newblk->nb_freefrag == NULL)
7409		return;
7410	freefrag = newblk->nb_freefrag;
7411	newblk->nb_freefrag = NULL;
7412	freefrag->ff_state |= COMPLETE;
7413	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7414		add_to_worklist(&freefrag->ff_list, 0);
7415}
7416
7417/*
7418 * Free a newblk. Generate a new freefrag work request if appropriate.
7419 * This must be called after the inode pointer and any direct block pointers
7420 * are valid or fully removed via truncate or frag extension.
7421 */
7422static void
7423free_newblk(newblk)
7424	struct newblk *newblk;
7425{
7426	struct indirdep *indirdep;
7427	struct worklist *wk;
7428
7429	KASSERT(newblk->nb_jnewblk == NULL,
7430	    ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk));
7431	KASSERT(newblk->nb_list.wk_type != D_NEWBLK,
7432	    ("free_newblk: unclaimed newblk"));
7433	LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp));
7434	newblk_freefrag(newblk);
7435	if (newblk->nb_state & ONDEPLIST)
7436		LIST_REMOVE(newblk, nb_deps);
7437	if (newblk->nb_state & ONWORKLIST)
7438		WORKLIST_REMOVE(&newblk->nb_list);
7439	LIST_REMOVE(newblk, nb_hash);
7440	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7441		free_newdirblk(WK_NEWDIRBLK(wk));
7442	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7443		panic("free_newblk: extra newdirblk");
7444	while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7445		indirdep_complete(indirdep);
7446	handle_jwork(&newblk->nb_jwork);
7447	WORKITEM_FREE(newblk, D_NEWBLK);
7448}
7449
7450/*
7451 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7452 * This routine must be called with splbio interrupts blocked.
7453 */
7454static void
7455free_newdirblk(newdirblk)
7456	struct newdirblk *newdirblk;
7457{
7458	struct pagedep *pagedep;
7459	struct diradd *dap;
7460	struct worklist *wk;
7461
7462	LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp));
7463	WORKLIST_REMOVE(&newdirblk->db_list);
7464	/*
7465	 * If the pagedep is still linked onto the directory buffer
7466	 * dependency chain, then some of the entries on the
7467	 * pd_pendinghd list may not be committed to disk yet. In
7468	 * this case, we will simply clear the NEWBLOCK flag and
7469	 * let the pd_pendinghd list be processed when the pagedep
7470	 * is next written. If the pagedep is no longer on the buffer
7471	 * dependency chain, then all the entries on the pd_pending
7472	 * list are committed to disk and we can free them here.
7473	 */
7474	pagedep = newdirblk->db_pagedep;
7475	pagedep->pd_state &= ~NEWBLOCK;
7476	if ((pagedep->pd_state & ONWORKLIST) == 0) {
7477		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7478			free_diradd(dap, NULL);
7479		/*
7480		 * If no dependencies remain, the pagedep will be freed.
7481		 */
7482		free_pagedep(pagedep);
7483	}
7484	/* Should only ever be one item in the list. */
7485	while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7486		WORKLIST_REMOVE(wk);
7487		handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7488	}
7489	WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7490}
7491
7492/*
7493 * Prepare an inode to be freed. The actual free operation is not
7494 * done until the zero'ed inode has been written to disk.
7495 */
7496void
7497softdep_freefile(pvp, ino, mode)
7498	struct vnode *pvp;
7499	ino_t ino;
7500	int mode;
7501{
7502	struct inode *ip = VTOI(pvp);
7503	struct inodedep *inodedep;
7504	struct freefile *freefile;
7505	struct freeblks *freeblks;
7506	struct ufsmount *ump;
7507
7508	ump = ip->i_ump;
7509	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7510	    ("softdep_freefile called on non-softdep filesystem"));
7511	/*
7512	 * This sets up the inode de-allocation dependency.
7513	 */
7514	freefile = malloc(sizeof(struct freefile),
7515		M_FREEFILE, M_SOFTDEP_FLAGS);
7516	workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7517	freefile->fx_mode = mode;
7518	freefile->fx_oldinum = ino;
7519	freefile->fx_devvp = ip->i_devvp;
7520	LIST_INIT(&freefile->fx_jwork);
7521	UFS_LOCK(ump);
7522	ip->i_fs->fs_pendinginodes += 1;
7523	UFS_UNLOCK(ump);
7524
7525	/*
7526	 * If the inodedep does not exist, then the zero'ed inode has
7527	 * been written to disk. If the allocated inode has never been
7528	 * written to disk, then the on-disk inode is zero'ed. In either
7529	 * case we can free the file immediately.  If the journal was
7530	 * canceled before being written the inode will never make it to
7531	 * disk and we must send the canceled journal entrys to
7532	 * ffs_freefile() to be cleared in conjunction with the bitmap.
7533	 * Any blocks waiting on the inode to write can be safely freed
7534	 * here as it will never been written.
7535	 */
7536	ACQUIRE_LOCK(ump);
7537	inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7538	if (inodedep) {
7539		/*
7540		 * Clear out freeblks that no longer need to reference
7541		 * this inode.
7542		 */
7543		while ((freeblks =
7544		    TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7545			TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7546			    fb_next);
7547			freeblks->fb_state &= ~ONDEPLIST;
7548		}
7549		/*
7550		 * Remove this inode from the unlinked list.
7551		 */
7552		if (inodedep->id_state & UNLINKED) {
7553			/*
7554			 * Save the journal work to be freed with the bitmap
7555			 * before we clear UNLINKED.  Otherwise it can be lost
7556			 * if the inode block is written.
7557			 */
7558			handle_bufwait(inodedep, &freefile->fx_jwork);
7559			clear_unlinked_inodedep(inodedep);
7560			/*
7561			 * Re-acquire inodedep as we've dropped the
7562			 * per-filesystem lock in clear_unlinked_inodedep().
7563			 */
7564			inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7565		}
7566	}
7567	if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7568		FREE_LOCK(ump);
7569		handle_workitem_freefile(freefile);
7570		return;
7571	}
7572	if ((inodedep->id_state & DEPCOMPLETE) == 0)
7573		inodedep->id_state |= GOINGAWAY;
7574	WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7575	FREE_LOCK(ump);
7576	if (ip->i_number == ino)
7577		ip->i_flag |= IN_MODIFIED;
7578}
7579
7580/*
7581 * Check to see if an inode has never been written to disk. If
7582 * so free the inodedep and return success, otherwise return failure.
7583 * This routine must be called with splbio interrupts blocked.
7584 *
7585 * If we still have a bitmap dependency, then the inode has never
7586 * been written to disk. Drop the dependency as it is no longer
7587 * necessary since the inode is being deallocated. We set the
7588 * ALLCOMPLETE flags since the bitmap now properly shows that the
7589 * inode is not allocated. Even if the inode is actively being
7590 * written, it has been rolled back to its zero'ed state, so we
7591 * are ensured that a zero inode is what is on the disk. For short
7592 * lived files, this change will usually result in removing all the
7593 * dependencies from the inode so that it can be freed immediately.
7594 */
7595static int
7596check_inode_unwritten(inodedep)
7597	struct inodedep *inodedep;
7598{
7599
7600	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7601
7602	if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7603	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7604	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7605	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7606	    !LIST_EMPTY(&inodedep->id_inowait) ||
7607	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7608	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7609	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7610	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7611	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7612	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7613	    inodedep->id_mkdiradd != NULL ||
7614	    inodedep->id_nlinkdelta != 0)
7615		return (0);
7616	/*
7617	 * Another process might be in initiate_write_inodeblock_ufs[12]
7618	 * trying to allocate memory without holding "Softdep Lock".
7619	 */
7620	if ((inodedep->id_state & IOSTARTED) != 0 &&
7621	    inodedep->id_savedino1 == NULL)
7622		return (0);
7623
7624	if (inodedep->id_state & ONDEPLIST)
7625		LIST_REMOVE(inodedep, id_deps);
7626	inodedep->id_state &= ~ONDEPLIST;
7627	inodedep->id_state |= ALLCOMPLETE;
7628	inodedep->id_bmsafemap = NULL;
7629	if (inodedep->id_state & ONWORKLIST)
7630		WORKLIST_REMOVE(&inodedep->id_list);
7631	if (inodedep->id_savedino1 != NULL) {
7632		free(inodedep->id_savedino1, M_SAVEDINO);
7633		inodedep->id_savedino1 = NULL;
7634	}
7635	if (free_inodedep(inodedep) == 0)
7636		panic("check_inode_unwritten: busy inode");
7637	return (1);
7638}
7639
7640static int
7641check_inodedep_free(inodedep)
7642	struct inodedep *inodedep;
7643{
7644
7645	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7646	if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7647	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7648	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7649	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7650	    !LIST_EMPTY(&inodedep->id_inowait) ||
7651	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7652	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7653	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7654	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7655	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7656	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7657	    inodedep->id_mkdiradd != NULL ||
7658	    inodedep->id_nlinkdelta != 0 ||
7659	    inodedep->id_savedino1 != NULL)
7660		return (0);
7661	return (1);
7662}
7663
7664/*
7665 * Try to free an inodedep structure. Return 1 if it could be freed.
7666 */
7667static int
7668free_inodedep(inodedep)
7669	struct inodedep *inodedep;
7670{
7671
7672	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7673	if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7674	    !check_inodedep_free(inodedep))
7675		return (0);
7676	if (inodedep->id_state & ONDEPLIST)
7677		LIST_REMOVE(inodedep, id_deps);
7678	LIST_REMOVE(inodedep, id_hash);
7679	WORKITEM_FREE(inodedep, D_INODEDEP);
7680	return (1);
7681}
7682
7683/*
7684 * Free the block referenced by a freework structure.  The parent freeblks
7685 * structure is released and completed when the final cg bitmap reaches
7686 * the disk.  This routine may be freeing a jnewblk which never made it to
7687 * disk in which case we do not have to wait as the operation is undone
7688 * in memory immediately.
7689 */
7690static void
7691freework_freeblock(freework)
7692	struct freework *freework;
7693{
7694	struct freeblks *freeblks;
7695	struct jnewblk *jnewblk;
7696	struct ufsmount *ump;
7697	struct workhead wkhd;
7698	struct fs *fs;
7699	int bsize;
7700	int needj;
7701
7702	ump = VFSTOUFS(freework->fw_list.wk_mp);
7703	LOCK_OWNED(ump);
7704	/*
7705	 * Handle partial truncate separately.
7706	 */
7707	if (freework->fw_indir) {
7708		complete_trunc_indir(freework);
7709		return;
7710	}
7711	freeblks = freework->fw_freeblks;
7712	fs = ump->um_fs;
7713	needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7714	bsize = lfragtosize(fs, freework->fw_frags);
7715	LIST_INIT(&wkhd);
7716	/*
7717	 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7718	 * on the indirblk hashtable and prevents premature freeing.
7719	 */
7720	freework->fw_state |= DEPCOMPLETE;
7721	/*
7722	 * SUJ needs to wait for the segment referencing freed indirect
7723	 * blocks to expire so that we know the checker will not confuse
7724	 * a re-allocated indirect block with its old contents.
7725	 */
7726	if (needj && freework->fw_lbn <= -NDADDR)
7727		indirblk_insert(freework);
7728	/*
7729	 * If we are canceling an existing jnewblk pass it to the free
7730	 * routine, otherwise pass the freeblk which will ultimately
7731	 * release the freeblks.  If we're not journaling, we can just
7732	 * free the freeblks immediately.
7733	 */
7734	jnewblk = freework->fw_jnewblk;
7735	if (jnewblk != NULL) {
7736		cancel_jnewblk(jnewblk, &wkhd);
7737		needj = 0;
7738	} else if (needj) {
7739		freework->fw_state |= DELAYEDFREE;
7740		freeblks->fb_cgwait++;
7741		WORKLIST_INSERT(&wkhd, &freework->fw_list);
7742	}
7743	FREE_LOCK(ump);
7744	freeblks_free(ump, freeblks, btodb(bsize));
7745	CTR4(KTR_SUJ,
7746	    "freework_freeblock: ino %d blkno %jd lbn %jd size %ld",
7747	    freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
7748	ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
7749	    freeblks->fb_inum, freeblks->fb_vtype, &wkhd);
7750	ACQUIRE_LOCK(ump);
7751	/*
7752	 * The jnewblk will be discarded and the bits in the map never
7753	 * made it to disk.  We can immediately free the freeblk.
7754	 */
7755	if (needj == 0)
7756		handle_written_freework(freework);
7757}
7758
7759/*
7760 * We enqueue freework items that need processing back on the freeblks and
7761 * add the freeblks to the worklist.  This makes it easier to find all work
7762 * required to flush a truncation in process_truncates().
7763 */
7764static void
7765freework_enqueue(freework)
7766	struct freework *freework;
7767{
7768	struct freeblks *freeblks;
7769
7770	freeblks = freework->fw_freeblks;
7771	if ((freework->fw_state & INPROGRESS) == 0)
7772		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
7773	if ((freeblks->fb_state &
7774	    (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
7775	    LIST_EMPTY(&freeblks->fb_jblkdephd))
7776		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7777}
7778
7779/*
7780 * Start, continue, or finish the process of freeing an indirect block tree.
7781 * The free operation may be paused at any point with fw_off containing the
7782 * offset to restart from.  This enables us to implement some flow control
7783 * for large truncates which may fan out and generate a huge number of
7784 * dependencies.
7785 */
7786static void
7787handle_workitem_indirblk(freework)
7788	struct freework *freework;
7789{
7790	struct freeblks *freeblks;
7791	struct ufsmount *ump;
7792	struct fs *fs;
7793
7794	freeblks = freework->fw_freeblks;
7795	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7796	fs = ump->um_fs;
7797	if (freework->fw_state & DEPCOMPLETE) {
7798		handle_written_freework(freework);
7799		return;
7800	}
7801	if (freework->fw_off == NINDIR(fs)) {
7802		freework_freeblock(freework);
7803		return;
7804	}
7805	freework->fw_state |= INPROGRESS;
7806	FREE_LOCK(ump);
7807	indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
7808	    freework->fw_lbn);
7809	ACQUIRE_LOCK(ump);
7810}
7811
7812/*
7813 * Called when a freework structure attached to a cg buf is written.  The
7814 * ref on either the parent or the freeblks structure is released and
7815 * the freeblks is added back to the worklist if there is more work to do.
7816 */
7817static void
7818handle_written_freework(freework)
7819	struct freework *freework;
7820{
7821	struct freeblks *freeblks;
7822	struct freework *parent;
7823
7824	freeblks = freework->fw_freeblks;
7825	parent = freework->fw_parent;
7826	if (freework->fw_state & DELAYEDFREE)
7827		freeblks->fb_cgwait--;
7828	freework->fw_state |= COMPLETE;
7829	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
7830		WORKITEM_FREE(freework, D_FREEWORK);
7831	if (parent) {
7832		if (--parent->fw_ref == 0)
7833			freework_enqueue(parent);
7834		return;
7835	}
7836	if (--freeblks->fb_ref != 0)
7837		return;
7838	if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
7839	    ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
7840		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7841}
7842
7843/*
7844 * This workitem routine performs the block de-allocation.
7845 * The workitem is added to the pending list after the updated
7846 * inode block has been written to disk.  As mentioned above,
7847 * checks regarding the number of blocks de-allocated (compared
7848 * to the number of blocks allocated for the file) are also
7849 * performed in this function.
7850 */
7851static int
7852handle_workitem_freeblocks(freeblks, flags)
7853	struct freeblks *freeblks;
7854	int flags;
7855{
7856	struct freework *freework;
7857	struct newblk *newblk;
7858	struct allocindir *aip;
7859	struct ufsmount *ump;
7860	struct worklist *wk;
7861
7862	KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
7863	    ("handle_workitem_freeblocks: Journal entries not written."));
7864	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7865	ACQUIRE_LOCK(ump);
7866	while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
7867		WORKLIST_REMOVE(wk);
7868		switch (wk->wk_type) {
7869		case D_DIRREM:
7870			wk->wk_state |= COMPLETE;
7871			add_to_worklist(wk, 0);
7872			continue;
7873
7874		case D_ALLOCDIRECT:
7875			free_newblk(WK_NEWBLK(wk));
7876			continue;
7877
7878		case D_ALLOCINDIR:
7879			aip = WK_ALLOCINDIR(wk);
7880			freework = NULL;
7881			if (aip->ai_state & DELAYEDFREE) {
7882				FREE_LOCK(ump);
7883				freework = newfreework(ump, freeblks, NULL,
7884				    aip->ai_lbn, aip->ai_newblkno,
7885				    ump->um_fs->fs_frag, 0, 0);
7886				ACQUIRE_LOCK(ump);
7887			}
7888			newblk = WK_NEWBLK(wk);
7889			if (newblk->nb_jnewblk) {
7890				freework->fw_jnewblk = newblk->nb_jnewblk;
7891				newblk->nb_jnewblk->jn_dep = &freework->fw_list;
7892				newblk->nb_jnewblk = NULL;
7893			}
7894			free_newblk(newblk);
7895			continue;
7896
7897		case D_FREEWORK:
7898			freework = WK_FREEWORK(wk);
7899			if (freework->fw_lbn <= -NDADDR)
7900				handle_workitem_indirblk(freework);
7901			else
7902				freework_freeblock(freework);
7903			continue;
7904		default:
7905			panic("handle_workitem_freeblocks: Unknown type %s",
7906			    TYPENAME(wk->wk_type));
7907		}
7908	}
7909	if (freeblks->fb_ref != 0) {
7910		freeblks->fb_state &= ~INPROGRESS;
7911		wake_worklist(&freeblks->fb_list);
7912		freeblks = NULL;
7913	}
7914	FREE_LOCK(ump);
7915	if (freeblks)
7916		return handle_complete_freeblocks(freeblks, flags);
7917	return (0);
7918}
7919
7920/*
7921 * Handle completion of block free via truncate.  This allows fs_pending
7922 * to track the actual free block count more closely than if we only updated
7923 * it at the end.  We must be careful to handle cases where the block count
7924 * on free was incorrect.
7925 */
7926static void
7927freeblks_free(ump, freeblks, blocks)
7928	struct ufsmount *ump;
7929	struct freeblks *freeblks;
7930	int blocks;
7931{
7932	struct fs *fs;
7933	ufs2_daddr_t remain;
7934
7935	UFS_LOCK(ump);
7936	remain = -freeblks->fb_chkcnt;
7937	freeblks->fb_chkcnt += blocks;
7938	if (remain > 0) {
7939		if (remain < blocks)
7940			blocks = remain;
7941		fs = ump->um_fs;
7942		fs->fs_pendingblocks -= blocks;
7943	}
7944	UFS_UNLOCK(ump);
7945}
7946
7947/*
7948 * Once all of the freework workitems are complete we can retire the
7949 * freeblocks dependency and any journal work awaiting completion.  This
7950 * can not be called until all other dependencies are stable on disk.
7951 */
7952static int
7953handle_complete_freeblocks(freeblks, flags)
7954	struct freeblks *freeblks;
7955	int flags;
7956{
7957	struct inodedep *inodedep;
7958	struct inode *ip;
7959	struct vnode *vp;
7960	struct fs *fs;
7961	struct ufsmount *ump;
7962	ufs2_daddr_t spare;
7963
7964	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7965	fs = ump->um_fs;
7966	flags = LK_EXCLUSIVE | flags;
7967	spare = freeblks->fb_chkcnt;
7968
7969	/*
7970	 * If we did not release the expected number of blocks we may have
7971	 * to adjust the inode block count here.  Only do so if it wasn't
7972	 * a truncation to zero and the modrev still matches.
7973	 */
7974	if (spare && freeblks->fb_len != 0) {
7975		if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7976		    flags, &vp, FFSV_FORCEINSMQ) != 0)
7977			return (EBUSY);
7978		ip = VTOI(vp);
7979		if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
7980			DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
7981			ip->i_flag |= IN_CHANGE;
7982			/*
7983			 * We must wait so this happens before the
7984			 * journal is reclaimed.
7985			 */
7986			ffs_update(vp, 1);
7987		}
7988		vput(vp);
7989	}
7990	if (spare < 0) {
7991		UFS_LOCK(ump);
7992		fs->fs_pendingblocks += spare;
7993		UFS_UNLOCK(ump);
7994	}
7995#ifdef QUOTA
7996	/* Handle spare. */
7997	if (spare)
7998		quotaadj(freeblks->fb_quota, ump, -spare);
7999	quotarele(freeblks->fb_quota);
8000#endif
8001	ACQUIRE_LOCK(ump);
8002	if (freeblks->fb_state & ONDEPLIST) {
8003		inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
8004		    0, &inodedep);
8005		TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
8006		freeblks->fb_state &= ~ONDEPLIST;
8007		if (TAILQ_EMPTY(&inodedep->id_freeblklst))
8008			free_inodedep(inodedep);
8009	}
8010	/*
8011	 * All of the freeblock deps must be complete prior to this call
8012	 * so it's now safe to complete earlier outstanding journal entries.
8013	 */
8014	handle_jwork(&freeblks->fb_jwork);
8015	WORKITEM_FREE(freeblks, D_FREEBLKS);
8016	FREE_LOCK(ump);
8017	return (0);
8018}
8019
8020/*
8021 * Release blocks associated with the freeblks and stored in the indirect
8022 * block dbn. If level is greater than SINGLE, the block is an indirect block
8023 * and recursive calls to indirtrunc must be used to cleanse other indirect
8024 * blocks.
8025 *
8026 * This handles partial and complete truncation of blocks.  Partial is noted
8027 * with goingaway == 0.  In this case the freework is completed after the
8028 * zero'd indirects are written to disk.  For full truncation the freework
8029 * is completed after the block is freed.
8030 */
8031static void
8032indir_trunc(freework, dbn, lbn)
8033	struct freework *freework;
8034	ufs2_daddr_t dbn;
8035	ufs_lbn_t lbn;
8036{
8037	struct freework *nfreework;
8038	struct workhead wkhd;
8039	struct freeblks *freeblks;
8040	struct buf *bp;
8041	struct fs *fs;
8042	struct indirdep *indirdep;
8043	struct ufsmount *ump;
8044	ufs1_daddr_t *bap1;
8045	ufs2_daddr_t nb, nnb, *bap2;
8046	ufs_lbn_t lbnadd, nlbn;
8047	int i, nblocks, ufs1fmt;
8048	int freedblocks;
8049	int goingaway;
8050	int freedeps;
8051	int needj;
8052	int level;
8053	int cnt;
8054
8055	freeblks = freework->fw_freeblks;
8056	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8057	fs = ump->um_fs;
8058	/*
8059	 * Get buffer of block pointers to be freed.  There are three cases:
8060	 *
8061	 * 1) Partial truncate caches the indirdep pointer in the freework
8062	 *    which provides us a back copy to the save bp which holds the
8063	 *    pointers we want to clear.  When this completes the zero
8064	 *    pointers are written to the real copy.
8065	 * 2) The indirect is being completely truncated, cancel_indirdep()
8066	 *    eliminated the real copy and placed the indirdep on the saved
8067	 *    copy.  The indirdep and buf are discarded when this completes.
8068	 * 3) The indirect was not in memory, we read a copy off of the disk
8069	 *    using the devvp and drop and invalidate the buffer when we're
8070	 *    done.
8071	 */
8072	goingaway = 1;
8073	indirdep = NULL;
8074	if (freework->fw_indir != NULL) {
8075		goingaway = 0;
8076		indirdep = freework->fw_indir;
8077		bp = indirdep->ir_savebp;
8078		if (bp == NULL || bp->b_blkno != dbn)
8079			panic("indir_trunc: Bad saved buf %p blkno %jd",
8080			    bp, (intmax_t)dbn);
8081	} else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
8082		/*
8083		 * The lock prevents the buf dep list from changing and
8084	 	 * indirects on devvp should only ever have one dependency.
8085		 */
8086		indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
8087		if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
8088			panic("indir_trunc: Bad indirdep %p from buf %p",
8089			    indirdep, bp);
8090	} else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize,
8091	    NOCRED, &bp) != 0) {
8092		brelse(bp);
8093		return;
8094	}
8095	ACQUIRE_LOCK(ump);
8096	/* Protects against a race with complete_trunc_indir(). */
8097	freework->fw_state &= ~INPROGRESS;
8098	/*
8099	 * If we have an indirdep we need to enforce the truncation order
8100	 * and discard it when it is complete.
8101	 */
8102	if (indirdep) {
8103		if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
8104		    !TAILQ_EMPTY(&indirdep->ir_trunc)) {
8105			/*
8106			 * Add the complete truncate to the list on the
8107			 * indirdep to enforce in-order processing.
8108			 */
8109			if (freework->fw_indir == NULL)
8110				TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
8111				    freework, fw_next);
8112			FREE_LOCK(ump);
8113			return;
8114		}
8115		/*
8116		 * If we're goingaway, free the indirdep.  Otherwise it will
8117		 * linger until the write completes.
8118		 */
8119		if (goingaway)
8120			free_indirdep(indirdep);
8121	}
8122	FREE_LOCK(ump);
8123	/* Initialize pointers depending on block size. */
8124	if (ump->um_fstype == UFS1) {
8125		bap1 = (ufs1_daddr_t *)bp->b_data;
8126		nb = bap1[freework->fw_off];
8127		ufs1fmt = 1;
8128		bap2 = NULL;
8129	} else {
8130		bap2 = (ufs2_daddr_t *)bp->b_data;
8131		nb = bap2[freework->fw_off];
8132		ufs1fmt = 0;
8133		bap1 = NULL;
8134	}
8135	level = lbn_level(lbn);
8136	needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
8137	lbnadd = lbn_offset(fs, level);
8138	nblocks = btodb(fs->fs_bsize);
8139	nfreework = freework;
8140	freedeps = 0;
8141	cnt = 0;
8142	/*
8143	 * Reclaim blocks.  Traverses into nested indirect levels and
8144	 * arranges for the current level to be freed when subordinates
8145	 * are free when journaling.
8146	 */
8147	for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
8148		if (i != NINDIR(fs) - 1) {
8149			if (ufs1fmt)
8150				nnb = bap1[i+1];
8151			else
8152				nnb = bap2[i+1];
8153		} else
8154			nnb = 0;
8155		if (nb == 0)
8156			continue;
8157		cnt++;
8158		if (level != 0) {
8159			nlbn = (lbn + 1) - (i * lbnadd);
8160			if (needj != 0) {
8161				nfreework = newfreework(ump, freeblks, freework,
8162				    nlbn, nb, fs->fs_frag, 0, 0);
8163				freedeps++;
8164			}
8165			indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
8166		} else {
8167			struct freedep *freedep;
8168
8169			/*
8170			 * Attempt to aggregate freedep dependencies for
8171			 * all blocks being released to the same CG.
8172			 */
8173			LIST_INIT(&wkhd);
8174			if (needj != 0 &&
8175			    (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
8176				freedep = newfreedep(freework);
8177				WORKLIST_INSERT_UNLOCKED(&wkhd,
8178				    &freedep->fd_list);
8179				freedeps++;
8180			}
8181			CTR3(KTR_SUJ,
8182			    "indir_trunc: ino %d blkno %jd size %ld",
8183			    freeblks->fb_inum, nb, fs->fs_bsize);
8184			ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
8185			    fs->fs_bsize, freeblks->fb_inum,
8186			    freeblks->fb_vtype, &wkhd);
8187		}
8188	}
8189	if (goingaway) {
8190		bp->b_flags |= B_INVAL | B_NOCACHE;
8191		brelse(bp);
8192	}
8193	freedblocks = 0;
8194	if (level == 0)
8195		freedblocks = (nblocks * cnt);
8196	if (needj == 0)
8197		freedblocks += nblocks;
8198	freeblks_free(ump, freeblks, freedblocks);
8199	/*
8200	 * If we are journaling set up the ref counts and offset so this
8201	 * indirect can be completed when its children are free.
8202	 */
8203	if (needj) {
8204		ACQUIRE_LOCK(ump);
8205		freework->fw_off = i;
8206		freework->fw_ref += freedeps;
8207		freework->fw_ref -= NINDIR(fs) + 1;
8208		if (level == 0)
8209			freeblks->fb_cgwait += freedeps;
8210		if (freework->fw_ref == 0)
8211			freework_freeblock(freework);
8212		FREE_LOCK(ump);
8213		return;
8214	}
8215	/*
8216	 * If we're not journaling we can free the indirect now.
8217	 */
8218	dbn = dbtofsb(fs, dbn);
8219	CTR3(KTR_SUJ,
8220	    "indir_trunc 2: ino %d blkno %jd size %ld",
8221	    freeblks->fb_inum, dbn, fs->fs_bsize);
8222	ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8223	    freeblks->fb_inum, freeblks->fb_vtype, NULL);
8224	/* Non SUJ softdep does single-threaded truncations. */
8225	if (freework->fw_blkno == dbn) {
8226		freework->fw_state |= ALLCOMPLETE;
8227		ACQUIRE_LOCK(ump);
8228		handle_written_freework(freework);
8229		FREE_LOCK(ump);
8230	}
8231	return;
8232}
8233
8234/*
8235 * Cancel an allocindir when it is removed via truncation.  When bp is not
8236 * NULL the indirect never appeared on disk and is scheduled to be freed
8237 * independently of the indir so we can more easily track journal work.
8238 */
8239static void
8240cancel_allocindir(aip, bp, freeblks, trunc)
8241	struct allocindir *aip;
8242	struct buf *bp;
8243	struct freeblks *freeblks;
8244	int trunc;
8245{
8246	struct indirdep *indirdep;
8247	struct freefrag *freefrag;
8248	struct newblk *newblk;
8249
8250	newblk = (struct newblk *)aip;
8251	LIST_REMOVE(aip, ai_next);
8252	/*
8253	 * We must eliminate the pointer in bp if it must be freed on its
8254	 * own due to partial truncate or pending journal work.
8255	 */
8256	if (bp && (trunc || newblk->nb_jnewblk)) {
8257		/*
8258		 * Clear the pointer and mark the aip to be freed
8259		 * directly if it never existed on disk.
8260		 */
8261		aip->ai_state |= DELAYEDFREE;
8262		indirdep = aip->ai_indirdep;
8263		if (indirdep->ir_state & UFS1FMT)
8264			((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8265		else
8266			((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8267	}
8268	/*
8269	 * When truncating the previous pointer will be freed via
8270	 * savedbp.  Eliminate the freefrag which would dup free.
8271	 */
8272	if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8273		newblk->nb_freefrag = NULL;
8274		if (freefrag->ff_jdep)
8275			cancel_jfreefrag(
8276			    WK_JFREEFRAG(freefrag->ff_jdep));
8277		jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8278		WORKITEM_FREE(freefrag, D_FREEFRAG);
8279	}
8280	/*
8281	 * If the journal hasn't been written the jnewblk must be passed
8282	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
8283	 * this by leaving the journal dependency on the newblk to be freed
8284	 * when a freework is created in handle_workitem_freeblocks().
8285	 */
8286	cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8287	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8288}
8289
8290/*
8291 * Create the mkdir dependencies for . and .. in a new directory.  Link them
8292 * in to a newdirblk so any subsequent additions are tracked properly.  The
8293 * caller is responsible for adding the mkdir1 dependency to the journal
8294 * and updating id_mkdiradd.  This function returns with the per-filesystem
8295 * lock held.
8296 */
8297static struct mkdir *
8298setup_newdir(dap, newinum, dinum, newdirbp, mkdirp)
8299	struct diradd *dap;
8300	ino_t newinum;
8301	ino_t dinum;
8302	struct buf *newdirbp;
8303	struct mkdir **mkdirp;
8304{
8305	struct newblk *newblk;
8306	struct pagedep *pagedep;
8307	struct inodedep *inodedep;
8308	struct newdirblk *newdirblk;
8309	struct mkdir *mkdir1, *mkdir2;
8310	struct worklist *wk;
8311	struct jaddref *jaddref;
8312	struct ufsmount *ump;
8313	struct mount *mp;
8314
8315	mp = dap->da_list.wk_mp;
8316	ump = VFSTOUFS(mp);
8317	newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8318	    M_SOFTDEP_FLAGS);
8319	workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8320	LIST_INIT(&newdirblk->db_mkdir);
8321	mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8322	workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8323	mkdir1->md_state = ATTACHED | MKDIR_BODY;
8324	mkdir1->md_diradd = dap;
8325	mkdir1->md_jaddref = NULL;
8326	mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8327	workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8328	mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8329	mkdir2->md_diradd = dap;
8330	mkdir2->md_jaddref = NULL;
8331	if (MOUNTEDSUJ(mp) == 0) {
8332		mkdir1->md_state |= DEPCOMPLETE;
8333		mkdir2->md_state |= DEPCOMPLETE;
8334	}
8335	/*
8336	 * Dependency on "." and ".." being written to disk.
8337	 */
8338	mkdir1->md_buf = newdirbp;
8339	ACQUIRE_LOCK(VFSTOUFS(mp));
8340	LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs);
8341	/*
8342	 * We must link the pagedep, allocdirect, and newdirblk for
8343	 * the initial file page so the pointer to the new directory
8344	 * is not written until the directory contents are live and
8345	 * any subsequent additions are not marked live until the
8346	 * block is reachable via the inode.
8347	 */
8348	if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8349		panic("setup_newdir: lost pagedep");
8350	LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8351		if (wk->wk_type == D_ALLOCDIRECT)
8352			break;
8353	if (wk == NULL)
8354		panic("setup_newdir: lost allocdirect");
8355	if (pagedep->pd_state & NEWBLOCK)
8356		panic("setup_newdir: NEWBLOCK already set");
8357	newblk = WK_NEWBLK(wk);
8358	pagedep->pd_state |= NEWBLOCK;
8359	pagedep->pd_newdirblk = newdirblk;
8360	newdirblk->db_pagedep = pagedep;
8361	WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8362	WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8363	/*
8364	 * Look up the inodedep for the parent directory so that we
8365	 * can link mkdir2 into the pending dotdot jaddref or
8366	 * the inode write if there is none.  If the inode is
8367	 * ALLCOMPLETE and no jaddref is present all dependencies have
8368	 * been satisfied and mkdir2 can be freed.
8369	 */
8370	inodedep_lookup(mp, dinum, 0, &inodedep);
8371	if (MOUNTEDSUJ(mp)) {
8372		if (inodedep == NULL)
8373			panic("setup_newdir: Lost parent.");
8374		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8375		    inoreflst);
8376		KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8377		    (jaddref->ja_state & MKDIR_PARENT),
8378		    ("setup_newdir: bad dotdot jaddref %p", jaddref));
8379		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8380		mkdir2->md_jaddref = jaddref;
8381		jaddref->ja_mkdir = mkdir2;
8382	} else if (inodedep == NULL ||
8383	    (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8384		dap->da_state &= ~MKDIR_PARENT;
8385		WORKITEM_FREE(mkdir2, D_MKDIR);
8386		mkdir2 = NULL;
8387	} else {
8388		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8389		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8390	}
8391	*mkdirp = mkdir2;
8392
8393	return (mkdir1);
8394}
8395
8396/*
8397 * Directory entry addition dependencies.
8398 *
8399 * When adding a new directory entry, the inode (with its incremented link
8400 * count) must be written to disk before the directory entry's pointer to it.
8401 * Also, if the inode is newly allocated, the corresponding freemap must be
8402 * updated (on disk) before the directory entry's pointer. These requirements
8403 * are met via undo/redo on the directory entry's pointer, which consists
8404 * simply of the inode number.
8405 *
8406 * As directory entries are added and deleted, the free space within a
8407 * directory block can become fragmented.  The ufs filesystem will compact
8408 * a fragmented directory block to make space for a new entry. When this
8409 * occurs, the offsets of previously added entries change. Any "diradd"
8410 * dependency structures corresponding to these entries must be updated with
8411 * the new offsets.
8412 */
8413
8414/*
8415 * This routine is called after the in-memory inode's link
8416 * count has been incremented, but before the directory entry's
8417 * pointer to the inode has been set.
8418 */
8419int
8420softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
8421	struct buf *bp;		/* buffer containing directory block */
8422	struct inode *dp;	/* inode for directory */
8423	off_t diroffset;	/* offset of new entry in directory */
8424	ino_t newinum;		/* inode referenced by new directory entry */
8425	struct buf *newdirbp;	/* non-NULL => contents of new mkdir */
8426	int isnewblk;		/* entry is in a newly allocated block */
8427{
8428	int offset;		/* offset of new entry within directory block */
8429	ufs_lbn_t lbn;		/* block in directory containing new entry */
8430	struct fs *fs;
8431	struct diradd *dap;
8432	struct newblk *newblk;
8433	struct pagedep *pagedep;
8434	struct inodedep *inodedep;
8435	struct newdirblk *newdirblk;
8436	struct mkdir *mkdir1, *mkdir2;
8437	struct jaddref *jaddref;
8438	struct ufsmount *ump;
8439	struct mount *mp;
8440	int isindir;
8441
8442	ump = dp->i_ump;
8443	mp = UFSTOVFS(ump);
8444	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8445	    ("softdep_setup_directory_add called on non-softdep filesystem"));
8446	/*
8447	 * Whiteouts have no dependencies.
8448	 */
8449	if (newinum == WINO) {
8450		if (newdirbp != NULL)
8451			bdwrite(newdirbp);
8452		return (0);
8453	}
8454	jaddref = NULL;
8455	mkdir1 = mkdir2 = NULL;
8456	fs = dp->i_fs;
8457	lbn = lblkno(fs, diroffset);
8458	offset = blkoff(fs, diroffset);
8459	dap = malloc(sizeof(struct diradd), M_DIRADD,
8460		M_SOFTDEP_FLAGS|M_ZERO);
8461	workitem_alloc(&dap->da_list, D_DIRADD, mp);
8462	dap->da_offset = offset;
8463	dap->da_newinum = newinum;
8464	dap->da_state = ATTACHED;
8465	LIST_INIT(&dap->da_jwork);
8466	isindir = bp->b_lblkno >= NDADDR;
8467	newdirblk = NULL;
8468	if (isnewblk &&
8469	    (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8470		newdirblk = malloc(sizeof(struct newdirblk),
8471		    M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8472		workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8473		LIST_INIT(&newdirblk->db_mkdir);
8474	}
8475	/*
8476	 * If we're creating a new directory setup the dependencies and set
8477	 * the dap state to wait for them.  Otherwise it's COMPLETE and
8478	 * we can move on.
8479	 */
8480	if (newdirbp == NULL) {
8481		dap->da_state |= DEPCOMPLETE;
8482		ACQUIRE_LOCK(ump);
8483	} else {
8484		dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8485		mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8486		    &mkdir2);
8487	}
8488	/*
8489	 * Link into parent directory pagedep to await its being written.
8490	 */
8491	pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8492#ifdef DEBUG
8493	if (diradd_lookup(pagedep, offset) != NULL)
8494		panic("softdep_setup_directory_add: %p already at off %d\n",
8495		    diradd_lookup(pagedep, offset), offset);
8496#endif
8497	dap->da_pagedep = pagedep;
8498	LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8499	    da_pdlist);
8500	inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
8501	/*
8502	 * If we're journaling, link the diradd into the jaddref so it
8503	 * may be completed after the journal entry is written.  Otherwise,
8504	 * link the diradd into its inodedep.  If the inode is not yet
8505	 * written place it on the bufwait list, otherwise do the post-inode
8506	 * write processing to put it on the id_pendinghd list.
8507	 */
8508	if (MOUNTEDSUJ(mp)) {
8509		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8510		    inoreflst);
8511		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8512		    ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8513		jaddref->ja_diroff = diroffset;
8514		jaddref->ja_diradd = dap;
8515		add_to_journal(&jaddref->ja_list);
8516	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8517		diradd_inode_written(dap, inodedep);
8518	else
8519		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8520	/*
8521	 * Add the journal entries for . and .. links now that the primary
8522	 * link is written.
8523	 */
8524	if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8525		jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8526		    inoreflst, if_deps);
8527		KASSERT(jaddref != NULL &&
8528		    jaddref->ja_ino == jaddref->ja_parent &&
8529		    (jaddref->ja_state & MKDIR_BODY),
8530		    ("softdep_setup_directory_add: bad dot jaddref %p",
8531		    jaddref));
8532		mkdir1->md_jaddref = jaddref;
8533		jaddref->ja_mkdir = mkdir1;
8534		/*
8535		 * It is important that the dotdot journal entry
8536		 * is added prior to the dot entry since dot writes
8537		 * both the dot and dotdot links.  These both must
8538		 * be added after the primary link for the journal
8539		 * to remain consistent.
8540		 */
8541		add_to_journal(&mkdir2->md_jaddref->ja_list);
8542		add_to_journal(&jaddref->ja_list);
8543	}
8544	/*
8545	 * If we are adding a new directory remember this diradd so that if
8546	 * we rename it we can keep the dot and dotdot dependencies.  If
8547	 * we are adding a new name for an inode that has a mkdiradd we
8548	 * must be in rename and we have to move the dot and dotdot
8549	 * dependencies to this new name.  The old name is being orphaned
8550	 * soon.
8551	 */
8552	if (mkdir1 != NULL) {
8553		if (inodedep->id_mkdiradd != NULL)
8554			panic("softdep_setup_directory_add: Existing mkdir");
8555		inodedep->id_mkdiradd = dap;
8556	} else if (inodedep->id_mkdiradd)
8557		merge_diradd(inodedep, dap);
8558	if (newdirblk != NULL) {
8559		/*
8560		 * There is nothing to do if we are already tracking
8561		 * this block.
8562		 */
8563		if ((pagedep->pd_state & NEWBLOCK) != 0) {
8564			WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8565			FREE_LOCK(ump);
8566			return (0);
8567		}
8568		if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8569		    == 0)
8570			panic("softdep_setup_directory_add: lost entry");
8571		WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8572		pagedep->pd_state |= NEWBLOCK;
8573		pagedep->pd_newdirblk = newdirblk;
8574		newdirblk->db_pagedep = pagedep;
8575		FREE_LOCK(ump);
8576		/*
8577		 * If we extended into an indirect signal direnter to sync.
8578		 */
8579		if (isindir)
8580			return (1);
8581		return (0);
8582	}
8583	FREE_LOCK(ump);
8584	return (0);
8585}
8586
8587/*
8588 * This procedure is called to change the offset of a directory
8589 * entry when compacting a directory block which must be owned
8590 * exclusively by the caller. Note that the actual entry movement
8591 * must be done in this procedure to ensure that no I/O completions
8592 * occur while the move is in progress.
8593 */
8594void
8595softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
8596	struct buf *bp;		/* Buffer holding directory block. */
8597	struct inode *dp;	/* inode for directory */
8598	caddr_t base;		/* address of dp->i_offset */
8599	caddr_t oldloc;		/* address of old directory location */
8600	caddr_t newloc;		/* address of new directory location */
8601	int entrysize;		/* size of directory entry */
8602{
8603	int offset, oldoffset, newoffset;
8604	struct pagedep *pagedep;
8605	struct jmvref *jmvref;
8606	struct diradd *dap;
8607	struct direct *de;
8608	struct mount *mp;
8609	ufs_lbn_t lbn;
8610	int flags;
8611
8612	mp = UFSTOVFS(dp->i_ump);
8613	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8614	    ("softdep_change_directoryentry_offset called on "
8615	     "non-softdep filesystem"));
8616	de = (struct direct *)oldloc;
8617	jmvref = NULL;
8618	flags = 0;
8619	/*
8620	 * Moves are always journaled as it would be too complex to
8621	 * determine if any affected adds or removes are present in the
8622	 * journal.
8623	 */
8624	if (MOUNTEDSUJ(mp)) {
8625		flags = DEPALLOC;
8626		jmvref = newjmvref(dp, de->d_ino,
8627		    dp->i_offset + (oldloc - base),
8628		    dp->i_offset + (newloc - base));
8629	}
8630	lbn = lblkno(dp->i_fs, dp->i_offset);
8631	offset = blkoff(dp->i_fs, dp->i_offset);
8632	oldoffset = offset + (oldloc - base);
8633	newoffset = offset + (newloc - base);
8634	ACQUIRE_LOCK(dp->i_ump);
8635	if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8636		goto done;
8637	dap = diradd_lookup(pagedep, oldoffset);
8638	if (dap) {
8639		dap->da_offset = newoffset;
8640		newoffset = DIRADDHASH(newoffset);
8641		oldoffset = DIRADDHASH(oldoffset);
8642		if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8643		    newoffset != oldoffset) {
8644			LIST_REMOVE(dap, da_pdlist);
8645			LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8646			    dap, da_pdlist);
8647		}
8648	}
8649done:
8650	if (jmvref) {
8651		jmvref->jm_pagedep = pagedep;
8652		LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8653		add_to_journal(&jmvref->jm_list);
8654	}
8655	bcopy(oldloc, newloc, entrysize);
8656	FREE_LOCK(dp->i_ump);
8657}
8658
8659/*
8660 * Move the mkdir dependencies and journal work from one diradd to another
8661 * when renaming a directory.  The new name must depend on the mkdir deps
8662 * completing as the old name did.  Directories can only have one valid link
8663 * at a time so one must be canonical.
8664 */
8665static void
8666merge_diradd(inodedep, newdap)
8667	struct inodedep *inodedep;
8668	struct diradd *newdap;
8669{
8670	struct diradd *olddap;
8671	struct mkdir *mkdir, *nextmd;
8672	struct ufsmount *ump;
8673	short state;
8674
8675	olddap = inodedep->id_mkdiradd;
8676	inodedep->id_mkdiradd = newdap;
8677	if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8678		newdap->da_state &= ~DEPCOMPLETE;
8679		ump = VFSTOUFS(inodedep->id_list.wk_mp);
8680		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8681		     mkdir = nextmd) {
8682			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8683			if (mkdir->md_diradd != olddap)
8684				continue;
8685			mkdir->md_diradd = newdap;
8686			state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8687			newdap->da_state |= state;
8688			olddap->da_state &= ~state;
8689			if ((olddap->da_state &
8690			    (MKDIR_PARENT | MKDIR_BODY)) == 0)
8691				break;
8692		}
8693		if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8694			panic("merge_diradd: unfound ref");
8695	}
8696	/*
8697	 * Any mkdir related journal items are not safe to be freed until
8698	 * the new name is stable.
8699	 */
8700	jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8701	olddap->da_state |= DEPCOMPLETE;
8702	complete_diradd(olddap);
8703}
8704
8705/*
8706 * Move the diradd to the pending list when all diradd dependencies are
8707 * complete.
8708 */
8709static void
8710complete_diradd(dap)
8711	struct diradd *dap;
8712{
8713	struct pagedep *pagedep;
8714
8715	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8716		if (dap->da_state & DIRCHG)
8717			pagedep = dap->da_previous->dm_pagedep;
8718		else
8719			pagedep = dap->da_pagedep;
8720		LIST_REMOVE(dap, da_pdlist);
8721		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8722	}
8723}
8724
8725/*
8726 * Cancel a diradd when a dirrem overlaps with it.  We must cancel the journal
8727 * add entries and conditonally journal the remove.
8728 */
8729static void
8730cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref)
8731	struct diradd *dap;
8732	struct dirrem *dirrem;
8733	struct jremref *jremref;
8734	struct jremref *dotremref;
8735	struct jremref *dotdotremref;
8736{
8737	struct inodedep *inodedep;
8738	struct jaddref *jaddref;
8739	struct inoref *inoref;
8740	struct ufsmount *ump;
8741	struct mkdir *mkdir;
8742
8743	/*
8744	 * If no remove references were allocated we're on a non-journaled
8745	 * filesystem and can skip the cancel step.
8746	 */
8747	if (jremref == NULL) {
8748		free_diradd(dap, NULL);
8749		return;
8750	}
8751	/*
8752	 * Cancel the primary name an free it if it does not require
8753	 * journaling.
8754	 */
8755	if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
8756	    0, &inodedep) != 0) {
8757		/* Abort the addref that reference this diradd.  */
8758		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
8759			if (inoref->if_list.wk_type != D_JADDREF)
8760				continue;
8761			jaddref = (struct jaddref *)inoref;
8762			if (jaddref->ja_diradd != dap)
8763				continue;
8764			if (cancel_jaddref(jaddref, inodedep,
8765			    &dirrem->dm_jwork) == 0) {
8766				free_jremref(jremref);
8767				jremref = NULL;
8768			}
8769			break;
8770		}
8771	}
8772	/*
8773	 * Cancel subordinate names and free them if they do not require
8774	 * journaling.
8775	 */
8776	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8777		ump = VFSTOUFS(dap->da_list.wk_mp);
8778		LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) {
8779			if (mkdir->md_diradd != dap)
8780				continue;
8781			if ((jaddref = mkdir->md_jaddref) == NULL)
8782				continue;
8783			mkdir->md_jaddref = NULL;
8784			if (mkdir->md_state & MKDIR_PARENT) {
8785				if (cancel_jaddref(jaddref, NULL,
8786				    &dirrem->dm_jwork) == 0) {
8787					free_jremref(dotdotremref);
8788					dotdotremref = NULL;
8789				}
8790			} else {
8791				if (cancel_jaddref(jaddref, inodedep,
8792				    &dirrem->dm_jwork) == 0) {
8793					free_jremref(dotremref);
8794					dotremref = NULL;
8795				}
8796			}
8797		}
8798	}
8799
8800	if (jremref)
8801		journal_jremref(dirrem, jremref, inodedep);
8802	if (dotremref)
8803		journal_jremref(dirrem, dotremref, inodedep);
8804	if (dotdotremref)
8805		journal_jremref(dirrem, dotdotremref, NULL);
8806	jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
8807	free_diradd(dap, &dirrem->dm_jwork);
8808}
8809
8810/*
8811 * Free a diradd dependency structure. This routine must be called
8812 * with splbio interrupts blocked.
8813 */
8814static void
8815free_diradd(dap, wkhd)
8816	struct diradd *dap;
8817	struct workhead *wkhd;
8818{
8819	struct dirrem *dirrem;
8820	struct pagedep *pagedep;
8821	struct inodedep *inodedep;
8822	struct mkdir *mkdir, *nextmd;
8823	struct ufsmount *ump;
8824
8825	ump = VFSTOUFS(dap->da_list.wk_mp);
8826	LOCK_OWNED(ump);
8827	LIST_REMOVE(dap, da_pdlist);
8828	if (dap->da_state & ONWORKLIST)
8829		WORKLIST_REMOVE(&dap->da_list);
8830	if ((dap->da_state & DIRCHG) == 0) {
8831		pagedep = dap->da_pagedep;
8832	} else {
8833		dirrem = dap->da_previous;
8834		pagedep = dirrem->dm_pagedep;
8835		dirrem->dm_dirinum = pagedep->pd_ino;
8836		dirrem->dm_state |= COMPLETE;
8837		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
8838			add_to_worklist(&dirrem->dm_list, 0);
8839	}
8840	if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
8841	    0, &inodedep) != 0)
8842		if (inodedep->id_mkdiradd == dap)
8843			inodedep->id_mkdiradd = NULL;
8844	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8845		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8846		     mkdir = nextmd) {
8847			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8848			if (mkdir->md_diradd != dap)
8849				continue;
8850			dap->da_state &=
8851			    ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
8852			LIST_REMOVE(mkdir, md_mkdirs);
8853			if (mkdir->md_state & ONWORKLIST)
8854				WORKLIST_REMOVE(&mkdir->md_list);
8855			if (mkdir->md_jaddref != NULL)
8856				panic("free_diradd: Unexpected jaddref");
8857			WORKITEM_FREE(mkdir, D_MKDIR);
8858			if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
8859				break;
8860		}
8861		if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8862			panic("free_diradd: unfound ref");
8863	}
8864	if (inodedep)
8865		free_inodedep(inodedep);
8866	/*
8867	 * Free any journal segments waiting for the directory write.
8868	 */
8869	handle_jwork(&dap->da_jwork);
8870	WORKITEM_FREE(dap, D_DIRADD);
8871}
8872
8873/*
8874 * Directory entry removal dependencies.
8875 *
8876 * When removing a directory entry, the entry's inode pointer must be
8877 * zero'ed on disk before the corresponding inode's link count is decremented
8878 * (possibly freeing the inode for re-use). This dependency is handled by
8879 * updating the directory entry but delaying the inode count reduction until
8880 * after the directory block has been written to disk. After this point, the
8881 * inode count can be decremented whenever it is convenient.
8882 */
8883
8884/*
8885 * This routine should be called immediately after removing
8886 * a directory entry.  The inode's link count should not be
8887 * decremented by the calling procedure -- the soft updates
8888 * code will do this task when it is safe.
8889 */
8890void
8891softdep_setup_remove(bp, dp, ip, isrmdir)
8892	struct buf *bp;		/* buffer containing directory block */
8893	struct inode *dp;	/* inode for the directory being modified */
8894	struct inode *ip;	/* inode for directory entry being removed */
8895	int isrmdir;		/* indicates if doing RMDIR */
8896{
8897	struct dirrem *dirrem, *prevdirrem;
8898	struct inodedep *inodedep;
8899	int direct;
8900
8901	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
8902	    ("softdep_setup_remove called on non-softdep filesystem"));
8903	/*
8904	 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.  We want
8905	 * newdirrem() to setup the full directory remove which requires
8906	 * isrmdir > 1.
8907	 */
8908	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
8909	/*
8910	 * Add the dirrem to the inodedep's pending remove list for quick
8911	 * discovery later.
8912	 */
8913	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8914	    &inodedep) == 0)
8915		panic("softdep_setup_remove: Lost inodedep.");
8916	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
8917	dirrem->dm_state |= ONDEPLIST;
8918	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
8919
8920	/*
8921	 * If the COMPLETE flag is clear, then there were no active
8922	 * entries and we want to roll back to a zeroed entry until
8923	 * the new inode is committed to disk. If the COMPLETE flag is
8924	 * set then we have deleted an entry that never made it to
8925	 * disk. If the entry we deleted resulted from a name change,
8926	 * then the old name still resides on disk. We cannot delete
8927	 * its inode (returned to us in prevdirrem) until the zeroed
8928	 * directory entry gets to disk. The new inode has never been
8929	 * referenced on the disk, so can be deleted immediately.
8930	 */
8931	if ((dirrem->dm_state & COMPLETE) == 0) {
8932		LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
8933		    dm_next);
8934		FREE_LOCK(ip->i_ump);
8935	} else {
8936		if (prevdirrem != NULL)
8937			LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
8938			    prevdirrem, dm_next);
8939		dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
8940		direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
8941		FREE_LOCK(ip->i_ump);
8942		if (direct)
8943			handle_workitem_remove(dirrem, 0);
8944	}
8945}
8946
8947/*
8948 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
8949 * pd_pendinghd list of a pagedep.
8950 */
8951static struct diradd *
8952diradd_lookup(pagedep, offset)
8953	struct pagedep *pagedep;
8954	int offset;
8955{
8956	struct diradd *dap;
8957
8958	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
8959		if (dap->da_offset == offset)
8960			return (dap);
8961	LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
8962		if (dap->da_offset == offset)
8963			return (dap);
8964	return (NULL);
8965}
8966
8967/*
8968 * Search for a .. diradd dependency in a directory that is being removed.
8969 * If the directory was renamed to a new parent we have a diradd rather
8970 * than a mkdir for the .. entry.  We need to cancel it now before
8971 * it is found in truncate().
8972 */
8973static struct jremref *
8974cancel_diradd_dotdot(ip, dirrem, jremref)
8975	struct inode *ip;
8976	struct dirrem *dirrem;
8977	struct jremref *jremref;
8978{
8979	struct pagedep *pagedep;
8980	struct diradd *dap;
8981	struct worklist *wk;
8982
8983	if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0,
8984	    &pagedep) == 0)
8985		return (jremref);
8986	dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
8987	if (dap == NULL)
8988		return (jremref);
8989	cancel_diradd(dap, dirrem, jremref, NULL, NULL);
8990	/*
8991	 * Mark any journal work as belonging to the parent so it is freed
8992	 * with the .. reference.
8993	 */
8994	LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
8995		wk->wk_state |= MKDIR_PARENT;
8996	return (NULL);
8997}
8998
8999/*
9000 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
9001 * replace it with a dirrem/diradd pair as a result of re-parenting a
9002 * directory.  This ensures that we don't simultaneously have a mkdir and
9003 * a diradd for the same .. entry.
9004 */
9005static struct jremref *
9006cancel_mkdir_dotdot(ip, dirrem, jremref)
9007	struct inode *ip;
9008	struct dirrem *dirrem;
9009	struct jremref *jremref;
9010{
9011	struct inodedep *inodedep;
9012	struct jaddref *jaddref;
9013	struct ufsmount *ump;
9014	struct mkdir *mkdir;
9015	struct diradd *dap;
9016
9017	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
9018	    &inodedep) == 0)
9019		return (jremref);
9020	dap = inodedep->id_mkdiradd;
9021	if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
9022		return (jremref);
9023	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9024	for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
9025	    mkdir = LIST_NEXT(mkdir, md_mkdirs))
9026		if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
9027			break;
9028	if (mkdir == NULL)
9029		panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
9030	if ((jaddref = mkdir->md_jaddref) != NULL) {
9031		mkdir->md_jaddref = NULL;
9032		jaddref->ja_state &= ~MKDIR_PARENT;
9033		if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0,
9034		    &inodedep) == 0)
9035			panic("cancel_mkdir_dotdot: Lost parent inodedep");
9036		if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
9037			journal_jremref(dirrem, jremref, inodedep);
9038			jremref = NULL;
9039		}
9040	}
9041	if (mkdir->md_state & ONWORKLIST)
9042		WORKLIST_REMOVE(&mkdir->md_list);
9043	mkdir->md_state |= ALLCOMPLETE;
9044	complete_mkdir(mkdir);
9045	return (jremref);
9046}
9047
9048static void
9049journal_jremref(dirrem, jremref, inodedep)
9050	struct dirrem *dirrem;
9051	struct jremref *jremref;
9052	struct inodedep *inodedep;
9053{
9054
9055	if (inodedep == NULL)
9056		if (inodedep_lookup(jremref->jr_list.wk_mp,
9057		    jremref->jr_ref.if_ino, 0, &inodedep) == 0)
9058			panic("journal_jremref: Lost inodedep");
9059	LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
9060	TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
9061	add_to_journal(&jremref->jr_list);
9062}
9063
9064static void
9065dirrem_journal(dirrem, jremref, dotremref, dotdotremref)
9066	struct dirrem *dirrem;
9067	struct jremref *jremref;
9068	struct jremref *dotremref;
9069	struct jremref *dotdotremref;
9070{
9071	struct inodedep *inodedep;
9072
9073
9074	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
9075	    &inodedep) == 0)
9076		panic("dirrem_journal: Lost inodedep");
9077	journal_jremref(dirrem, jremref, inodedep);
9078	if (dotremref)
9079		journal_jremref(dirrem, dotremref, inodedep);
9080	if (dotdotremref)
9081		journal_jremref(dirrem, dotdotremref, NULL);
9082}
9083
9084/*
9085 * Allocate a new dirrem if appropriate and return it along with
9086 * its associated pagedep. Called without a lock, returns with lock.
9087 */
9088static struct dirrem *
9089newdirrem(bp, dp, ip, isrmdir, prevdirremp)
9090	struct buf *bp;		/* buffer containing directory block */
9091	struct inode *dp;	/* inode for the directory being modified */
9092	struct inode *ip;	/* inode for directory entry being removed */
9093	int isrmdir;		/* indicates if doing RMDIR */
9094	struct dirrem **prevdirremp; /* previously referenced inode, if any */
9095{
9096	int offset;
9097	ufs_lbn_t lbn;
9098	struct diradd *dap;
9099	struct dirrem *dirrem;
9100	struct pagedep *pagedep;
9101	struct jremref *jremref;
9102	struct jremref *dotremref;
9103	struct jremref *dotdotremref;
9104	struct vnode *dvp;
9105
9106	/*
9107	 * Whiteouts have no deletion dependencies.
9108	 */
9109	if (ip == NULL)
9110		panic("newdirrem: whiteout");
9111	dvp = ITOV(dp);
9112	/*
9113	 * If the system is over its limit and our filesystem is
9114	 * responsible for more than our share of that usage and
9115	 * we are not a snapshot, request some inodedep cleanup.
9116	 * Limiting the number of dirrem structures will also limit
9117	 * the number of freefile and freeblks structures.
9118	 */
9119	ACQUIRE_LOCK(ip->i_ump);
9120	if (!IS_SNAPSHOT(ip) && softdep_excess_items(ip->i_ump, D_DIRREM))
9121		schedule_cleanup(ITOV(dp)->v_mount);
9122	else
9123		FREE_LOCK(ip->i_ump);
9124	dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS |
9125	    M_ZERO);
9126	workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
9127	LIST_INIT(&dirrem->dm_jremrefhd);
9128	LIST_INIT(&dirrem->dm_jwork);
9129	dirrem->dm_state = isrmdir ? RMDIR : 0;
9130	dirrem->dm_oldinum = ip->i_number;
9131	*prevdirremp = NULL;
9132	/*
9133	 * Allocate remove reference structures to track journal write
9134	 * dependencies.  We will always have one for the link and
9135	 * when doing directories we will always have one more for dot.
9136	 * When renaming a directory we skip the dotdot link change so
9137	 * this is not needed.
9138	 */
9139	jremref = dotremref = dotdotremref = NULL;
9140	if (DOINGSUJ(dvp)) {
9141		if (isrmdir) {
9142			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
9143			    ip->i_effnlink + 2);
9144			dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
9145			    ip->i_effnlink + 1);
9146			dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
9147			    dp->i_effnlink + 1);
9148			dotdotremref->jr_state |= MKDIR_PARENT;
9149		} else
9150			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
9151			    ip->i_effnlink + 1);
9152	}
9153	ACQUIRE_LOCK(ip->i_ump);
9154	lbn = lblkno(dp->i_fs, dp->i_offset);
9155	offset = blkoff(dp->i_fs, dp->i_offset);
9156	pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC,
9157	    &pagedep);
9158	dirrem->dm_pagedep = pagedep;
9159	dirrem->dm_offset = offset;
9160	/*
9161	 * If we're renaming a .. link to a new directory, cancel any
9162	 * existing MKDIR_PARENT mkdir.  If it has already been canceled
9163	 * the jremref is preserved for any potential diradd in this
9164	 * location.  This can not coincide with a rmdir.
9165	 */
9166	if (dp->i_offset == DOTDOT_OFFSET) {
9167		if (isrmdir)
9168			panic("newdirrem: .. directory change during remove?");
9169		jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
9170	}
9171	/*
9172	 * If we're removing a directory search for the .. dependency now and
9173	 * cancel it.  Any pending journal work will be added to the dirrem
9174	 * to be completed when the workitem remove completes.
9175	 */
9176	if (isrmdir)
9177		dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
9178	/*
9179	 * Check for a diradd dependency for the same directory entry.
9180	 * If present, then both dependencies become obsolete and can
9181	 * be de-allocated.
9182	 */
9183	dap = diradd_lookup(pagedep, offset);
9184	if (dap == NULL) {
9185		/*
9186		 * Link the jremref structures into the dirrem so they are
9187		 * written prior to the pagedep.
9188		 */
9189		if (jremref)
9190			dirrem_journal(dirrem, jremref, dotremref,
9191			    dotdotremref);
9192		return (dirrem);
9193	}
9194	/*
9195	 * Must be ATTACHED at this point.
9196	 */
9197	if ((dap->da_state & ATTACHED) == 0)
9198		panic("newdirrem: not ATTACHED");
9199	if (dap->da_newinum != ip->i_number)
9200		panic("newdirrem: inum %ju should be %ju",
9201		    (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum);
9202	/*
9203	 * If we are deleting a changed name that never made it to disk,
9204	 * then return the dirrem describing the previous inode (which
9205	 * represents the inode currently referenced from this entry on disk).
9206	 */
9207	if ((dap->da_state & DIRCHG) != 0) {
9208		*prevdirremp = dap->da_previous;
9209		dap->da_state &= ~DIRCHG;
9210		dap->da_pagedep = pagedep;
9211	}
9212	/*
9213	 * We are deleting an entry that never made it to disk.
9214	 * Mark it COMPLETE so we can delete its inode immediately.
9215	 */
9216	dirrem->dm_state |= COMPLETE;
9217	cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
9218#ifdef SUJ_DEBUG
9219	if (isrmdir == 0) {
9220		struct worklist *wk;
9221
9222		LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9223			if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
9224				panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
9225	}
9226#endif
9227
9228	return (dirrem);
9229}
9230
9231/*
9232 * Directory entry change dependencies.
9233 *
9234 * Changing an existing directory entry requires that an add operation
9235 * be completed first followed by a deletion. The semantics for the addition
9236 * are identical to the description of adding a new entry above except
9237 * that the rollback is to the old inode number rather than zero. Once
9238 * the addition dependency is completed, the removal is done as described
9239 * in the removal routine above.
9240 */
9241
9242/*
9243 * This routine should be called immediately after changing
9244 * a directory entry.  The inode's link count should not be
9245 * decremented by the calling procedure -- the soft updates
9246 * code will perform this task when it is safe.
9247 */
9248void
9249softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
9250	struct buf *bp;		/* buffer containing directory block */
9251	struct inode *dp;	/* inode for the directory being modified */
9252	struct inode *ip;	/* inode for directory entry being removed */
9253	ino_t newinum;		/* new inode number for changed entry */
9254	int isrmdir;		/* indicates if doing RMDIR */
9255{
9256	int offset;
9257	struct diradd *dap = NULL;
9258	struct dirrem *dirrem, *prevdirrem;
9259	struct pagedep *pagedep;
9260	struct inodedep *inodedep;
9261	struct jaddref *jaddref;
9262	struct mount *mp;
9263
9264	offset = blkoff(dp->i_fs, dp->i_offset);
9265	mp = UFSTOVFS(dp->i_ump);
9266	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
9267	   ("softdep_setup_directory_change called on non-softdep filesystem"));
9268
9269	/*
9270	 * Whiteouts do not need diradd dependencies.
9271	 */
9272	if (newinum != WINO) {
9273		dap = malloc(sizeof(struct diradd),
9274		    M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9275		workitem_alloc(&dap->da_list, D_DIRADD, mp);
9276		dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9277		dap->da_offset = offset;
9278		dap->da_newinum = newinum;
9279		LIST_INIT(&dap->da_jwork);
9280	}
9281
9282	/*
9283	 * Allocate a new dirrem and ACQUIRE_LOCK.
9284	 */
9285	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9286	pagedep = dirrem->dm_pagedep;
9287	/*
9288	 * The possible values for isrmdir:
9289	 *	0 - non-directory file rename
9290	 *	1 - directory rename within same directory
9291	 *   inum - directory rename to new directory of given inode number
9292	 * When renaming to a new directory, we are both deleting and
9293	 * creating a new directory entry, so the link count on the new
9294	 * directory should not change. Thus we do not need the followup
9295	 * dirrem which is usually done in handle_workitem_remove. We set
9296	 * the DIRCHG flag to tell handle_workitem_remove to skip the
9297	 * followup dirrem.
9298	 */
9299	if (isrmdir > 1)
9300		dirrem->dm_state |= DIRCHG;
9301
9302	/*
9303	 * Whiteouts have no additional dependencies,
9304	 * so just put the dirrem on the correct list.
9305	 */
9306	if (newinum == WINO) {
9307		if ((dirrem->dm_state & COMPLETE) == 0) {
9308			LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9309			    dm_next);
9310		} else {
9311			dirrem->dm_dirinum = pagedep->pd_ino;
9312			if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9313				add_to_worklist(&dirrem->dm_list, 0);
9314		}
9315		FREE_LOCK(dp->i_ump);
9316		return;
9317	}
9318	/*
9319	 * Add the dirrem to the inodedep's pending remove list for quick
9320	 * discovery later.  A valid nlinkdelta ensures that this lookup
9321	 * will not fail.
9322	 */
9323	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9324		panic("softdep_setup_directory_change: Lost inodedep.");
9325	dirrem->dm_state |= ONDEPLIST;
9326	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9327
9328	/*
9329	 * If the COMPLETE flag is clear, then there were no active
9330	 * entries and we want to roll back to the previous inode until
9331	 * the new inode is committed to disk. If the COMPLETE flag is
9332	 * set, then we have deleted an entry that never made it to disk.
9333	 * If the entry we deleted resulted from a name change, then the old
9334	 * inode reference still resides on disk. Any rollback that we do
9335	 * needs to be to that old inode (returned to us in prevdirrem). If
9336	 * the entry we deleted resulted from a create, then there is
9337	 * no entry on the disk, so we want to roll back to zero rather
9338	 * than the uncommitted inode. In either of the COMPLETE cases we
9339	 * want to immediately free the unwritten and unreferenced inode.
9340	 */
9341	if ((dirrem->dm_state & COMPLETE) == 0) {
9342		dap->da_previous = dirrem;
9343	} else {
9344		if (prevdirrem != NULL) {
9345			dap->da_previous = prevdirrem;
9346		} else {
9347			dap->da_state &= ~DIRCHG;
9348			dap->da_pagedep = pagedep;
9349		}
9350		dirrem->dm_dirinum = pagedep->pd_ino;
9351		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9352			add_to_worklist(&dirrem->dm_list, 0);
9353	}
9354	/*
9355	 * Lookup the jaddref for this journal entry.  We must finish
9356	 * initializing it and make the diradd write dependent on it.
9357	 * If we're not journaling, put it on the id_bufwait list if the
9358	 * inode is not yet written. If it is written, do the post-inode
9359	 * write processing to put it on the id_pendinghd list.
9360	 */
9361	inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
9362	if (MOUNTEDSUJ(mp)) {
9363		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9364		    inoreflst);
9365		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9366		    ("softdep_setup_directory_change: bad jaddref %p",
9367		    jaddref));
9368		jaddref->ja_diroff = dp->i_offset;
9369		jaddref->ja_diradd = dap;
9370		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9371		    dap, da_pdlist);
9372		add_to_journal(&jaddref->ja_list);
9373	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9374		dap->da_state |= COMPLETE;
9375		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9376		WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9377	} else {
9378		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9379		    dap, da_pdlist);
9380		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9381	}
9382	/*
9383	 * If we're making a new name for a directory that has not been
9384	 * committed when need to move the dot and dotdot references to
9385	 * this new name.
9386	 */
9387	if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET)
9388		merge_diradd(inodedep, dap);
9389	FREE_LOCK(dp->i_ump);
9390}
9391
9392/*
9393 * Called whenever the link count on an inode is changed.
9394 * It creates an inode dependency so that the new reference(s)
9395 * to the inode cannot be committed to disk until the updated
9396 * inode has been written.
9397 */
9398void
9399softdep_change_linkcnt(ip)
9400	struct inode *ip;	/* the inode with the increased link count */
9401{
9402	struct inodedep *inodedep;
9403
9404	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
9405	    ("softdep_change_linkcnt called on non-softdep filesystem"));
9406	ACQUIRE_LOCK(ip->i_ump);
9407	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, DEPALLOC,
9408	    &inodedep);
9409	if (ip->i_nlink < ip->i_effnlink)
9410		panic("softdep_change_linkcnt: bad delta");
9411	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9412	FREE_LOCK(ip->i_ump);
9413}
9414
9415/*
9416 * Attach a sbdep dependency to the superblock buf so that we can keep
9417 * track of the head of the linked list of referenced but unlinked inodes.
9418 */
9419void
9420softdep_setup_sbupdate(ump, fs, bp)
9421	struct ufsmount *ump;
9422	struct fs *fs;
9423	struct buf *bp;
9424{
9425	struct sbdep *sbdep;
9426	struct worklist *wk;
9427
9428	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9429	    ("softdep_setup_sbupdate called on non-softdep filesystem"));
9430	LIST_FOREACH(wk, &bp->b_dep, wk_list)
9431		if (wk->wk_type == D_SBDEP)
9432			break;
9433	if (wk != NULL)
9434		return;
9435	sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9436	workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9437	sbdep->sb_fs = fs;
9438	sbdep->sb_ump = ump;
9439	ACQUIRE_LOCK(ump);
9440	WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9441	FREE_LOCK(ump);
9442}
9443
9444/*
9445 * Return the first unlinked inodedep which is ready to be the head of the
9446 * list.  The inodedep and all those after it must have valid next pointers.
9447 */
9448static struct inodedep *
9449first_unlinked_inodedep(ump)
9450	struct ufsmount *ump;
9451{
9452	struct inodedep *inodedep;
9453	struct inodedep *idp;
9454
9455	LOCK_OWNED(ump);
9456	for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9457	    inodedep; inodedep = idp) {
9458		if ((inodedep->id_state & UNLINKNEXT) == 0)
9459			return (NULL);
9460		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9461		if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9462			break;
9463		if ((inodedep->id_state & UNLINKPREV) == 0)
9464			break;
9465	}
9466	return (inodedep);
9467}
9468
9469/*
9470 * Set the sujfree unlinked head pointer prior to writing a superblock.
9471 */
9472static void
9473initiate_write_sbdep(sbdep)
9474	struct sbdep *sbdep;
9475{
9476	struct inodedep *inodedep;
9477	struct fs *bpfs;
9478	struct fs *fs;
9479
9480	bpfs = sbdep->sb_fs;
9481	fs = sbdep->sb_ump->um_fs;
9482	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9483	if (inodedep) {
9484		fs->fs_sujfree = inodedep->id_ino;
9485		inodedep->id_state |= UNLINKPREV;
9486	} else
9487		fs->fs_sujfree = 0;
9488	bpfs->fs_sujfree = fs->fs_sujfree;
9489}
9490
9491/*
9492 * After a superblock is written determine whether it must be written again
9493 * due to a changing unlinked list head.
9494 */
9495static int
9496handle_written_sbdep(sbdep, bp)
9497	struct sbdep *sbdep;
9498	struct buf *bp;
9499{
9500	struct inodedep *inodedep;
9501	struct fs *fs;
9502
9503	LOCK_OWNED(sbdep->sb_ump);
9504	fs = sbdep->sb_fs;
9505	/*
9506	 * If the superblock doesn't match the in-memory list start over.
9507	 */
9508	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9509	if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9510	    (inodedep == NULL && fs->fs_sujfree != 0)) {
9511		bdirty(bp);
9512		return (1);
9513	}
9514	WORKITEM_FREE(sbdep, D_SBDEP);
9515	if (fs->fs_sujfree == 0)
9516		return (0);
9517	/*
9518	 * Now that we have a record of this inode in stable store allow it
9519	 * to be written to free up pending work.  Inodes may see a lot of
9520	 * write activity after they are unlinked which we must not hold up.
9521	 */
9522	for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9523		if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9524			panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9525			    inodedep, inodedep->id_state);
9526		if (inodedep->id_state & UNLINKONLIST)
9527			break;
9528		inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9529	}
9530
9531	return (0);
9532}
9533
9534/*
9535 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9536 */
9537static void
9538unlinked_inodedep(mp, inodedep)
9539	struct mount *mp;
9540	struct inodedep *inodedep;
9541{
9542	struct ufsmount *ump;
9543
9544	ump = VFSTOUFS(mp);
9545	LOCK_OWNED(ump);
9546	if (MOUNTEDSUJ(mp) == 0)
9547		return;
9548	ump->um_fs->fs_fmod = 1;
9549	if (inodedep->id_state & UNLINKED)
9550		panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9551	inodedep->id_state |= UNLINKED;
9552	TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9553}
9554
9555/*
9556 * Remove an inodedep from the unlinked inodedep list.  This may require
9557 * disk writes if the inode has made it that far.
9558 */
9559static void
9560clear_unlinked_inodedep(inodedep)
9561	struct inodedep *inodedep;
9562{
9563	struct ufsmount *ump;
9564	struct inodedep *idp;
9565	struct inodedep *idn;
9566	struct fs *fs;
9567	struct buf *bp;
9568	ino_t ino;
9569	ino_t nino;
9570	ino_t pino;
9571	int error;
9572
9573	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9574	fs = ump->um_fs;
9575	ino = inodedep->id_ino;
9576	error = 0;
9577	for (;;) {
9578		LOCK_OWNED(ump);
9579		KASSERT((inodedep->id_state & UNLINKED) != 0,
9580		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9581		    inodedep));
9582		/*
9583		 * If nothing has yet been written simply remove us from
9584		 * the in memory list and return.  This is the most common
9585		 * case where handle_workitem_remove() loses the final
9586		 * reference.
9587		 */
9588		if ((inodedep->id_state & UNLINKLINKS) == 0)
9589			break;
9590		/*
9591		 * If we have a NEXT pointer and no PREV pointer we can simply
9592		 * clear NEXT's PREV and remove ourselves from the list.  Be
9593		 * careful not to clear PREV if the superblock points at
9594		 * next as well.
9595		 */
9596		idn = TAILQ_NEXT(inodedep, id_unlinked);
9597		if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9598			if (idn && fs->fs_sujfree != idn->id_ino)
9599				idn->id_state &= ~UNLINKPREV;
9600			break;
9601		}
9602		/*
9603		 * Here we have an inodedep which is actually linked into
9604		 * the list.  We must remove it by forcing a write to the
9605		 * link before us, whether it be the superblock or an inode.
9606		 * Unfortunately the list may change while we're waiting
9607		 * on the buf lock for either resource so we must loop until
9608		 * we lock the right one.  If both the superblock and an
9609		 * inode point to this inode we must clear the inode first
9610		 * followed by the superblock.
9611		 */
9612		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9613		pino = 0;
9614		if (idp && (idp->id_state & UNLINKNEXT))
9615			pino = idp->id_ino;
9616		FREE_LOCK(ump);
9617		if (pino == 0) {
9618			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9619			    (int)fs->fs_sbsize, 0, 0, 0);
9620		} else {
9621			error = bread(ump->um_devvp,
9622			    fsbtodb(fs, ino_to_fsba(fs, pino)),
9623			    (int)fs->fs_bsize, NOCRED, &bp);
9624			if (error)
9625				brelse(bp);
9626		}
9627		ACQUIRE_LOCK(ump);
9628		if (error)
9629			break;
9630		/* If the list has changed restart the loop. */
9631		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9632		nino = 0;
9633		if (idp && (idp->id_state & UNLINKNEXT))
9634			nino = idp->id_ino;
9635		if (nino != pino ||
9636		    (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9637			FREE_LOCK(ump);
9638			brelse(bp);
9639			ACQUIRE_LOCK(ump);
9640			continue;
9641		}
9642		nino = 0;
9643		idn = TAILQ_NEXT(inodedep, id_unlinked);
9644		if (idn)
9645			nino = idn->id_ino;
9646		/*
9647		 * Remove us from the in memory list.  After this we cannot
9648		 * access the inodedep.
9649		 */
9650		KASSERT((inodedep->id_state & UNLINKED) != 0,
9651		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9652		    inodedep));
9653		inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9654		TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9655		FREE_LOCK(ump);
9656		/*
9657		 * The predecessor's next pointer is manually updated here
9658		 * so that the NEXT flag is never cleared for an element
9659		 * that is in the list.
9660		 */
9661		if (pino == 0) {
9662			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9663			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9664			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9665			    bp);
9666		} else if (fs->fs_magic == FS_UFS1_MAGIC)
9667			((struct ufs1_dinode *)bp->b_data +
9668			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9669		else
9670			((struct ufs2_dinode *)bp->b_data +
9671			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9672		/*
9673		 * If the bwrite fails we have no recourse to recover.  The
9674		 * filesystem is corrupted already.
9675		 */
9676		bwrite(bp);
9677		ACQUIRE_LOCK(ump);
9678		/*
9679		 * If the superblock pointer still needs to be cleared force
9680		 * a write here.
9681		 */
9682		if (fs->fs_sujfree == ino) {
9683			FREE_LOCK(ump);
9684			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9685			    (int)fs->fs_sbsize, 0, 0, 0);
9686			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9687			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9688			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9689			    bp);
9690			bwrite(bp);
9691			ACQUIRE_LOCK(ump);
9692		}
9693
9694		if (fs->fs_sujfree != ino)
9695			return;
9696		panic("clear_unlinked_inodedep: Failed to clear free head");
9697	}
9698	if (inodedep->id_ino == fs->fs_sujfree)
9699		panic("clear_unlinked_inodedep: Freeing head of free list");
9700	inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9701	TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9702	return;
9703}
9704
9705/*
9706 * This workitem decrements the inode's link count.
9707 * If the link count reaches zero, the file is removed.
9708 */
9709static int
9710handle_workitem_remove(dirrem, flags)
9711	struct dirrem *dirrem;
9712	int flags;
9713{
9714	struct inodedep *inodedep;
9715	struct workhead dotdotwk;
9716	struct worklist *wk;
9717	struct ufsmount *ump;
9718	struct mount *mp;
9719	struct vnode *vp;
9720	struct inode *ip;
9721	ino_t oldinum;
9722
9723	if (dirrem->dm_state & ONWORKLIST)
9724		panic("handle_workitem_remove: dirrem %p still on worklist",
9725		    dirrem);
9726	oldinum = dirrem->dm_oldinum;
9727	mp = dirrem->dm_list.wk_mp;
9728	ump = VFSTOUFS(mp);
9729	flags |= LK_EXCLUSIVE;
9730	if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0)
9731		return (EBUSY);
9732	ip = VTOI(vp);
9733	ACQUIRE_LOCK(ump);
9734	if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
9735		panic("handle_workitem_remove: lost inodedep");
9736	if (dirrem->dm_state & ONDEPLIST)
9737		LIST_REMOVE(dirrem, dm_inonext);
9738	KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
9739	    ("handle_workitem_remove:  Journal entries not written."));
9740
9741	/*
9742	 * Move all dependencies waiting on the remove to complete
9743	 * from the dirrem to the inode inowait list to be completed
9744	 * after the inode has been updated and written to disk.  Any
9745	 * marked MKDIR_PARENT are saved to be completed when the .. ref
9746	 * is removed.
9747	 */
9748	LIST_INIT(&dotdotwk);
9749	while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
9750		WORKLIST_REMOVE(wk);
9751		if (wk->wk_state & MKDIR_PARENT) {
9752			wk->wk_state &= ~MKDIR_PARENT;
9753			WORKLIST_INSERT(&dotdotwk, wk);
9754			continue;
9755		}
9756		WORKLIST_INSERT(&inodedep->id_inowait, wk);
9757	}
9758	LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
9759	/*
9760	 * Normal file deletion.
9761	 */
9762	if ((dirrem->dm_state & RMDIR) == 0) {
9763		ip->i_nlink--;
9764		DIP_SET(ip, i_nlink, ip->i_nlink);
9765		ip->i_flag |= IN_CHANGE;
9766		if (ip->i_nlink < ip->i_effnlink)
9767			panic("handle_workitem_remove: bad file delta");
9768		if (ip->i_nlink == 0)
9769			unlinked_inodedep(mp, inodedep);
9770		inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9771		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9772		    ("handle_workitem_remove: worklist not empty. %s",
9773		    TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
9774		WORKITEM_FREE(dirrem, D_DIRREM);
9775		FREE_LOCK(ump);
9776		goto out;
9777	}
9778	/*
9779	 * Directory deletion. Decrement reference count for both the
9780	 * just deleted parent directory entry and the reference for ".".
9781	 * Arrange to have the reference count on the parent decremented
9782	 * to account for the loss of "..".
9783	 */
9784	ip->i_nlink -= 2;
9785	DIP_SET(ip, i_nlink, ip->i_nlink);
9786	ip->i_flag |= IN_CHANGE;
9787	if (ip->i_nlink < ip->i_effnlink)
9788		panic("handle_workitem_remove: bad dir delta");
9789	if (ip->i_nlink == 0)
9790		unlinked_inodedep(mp, inodedep);
9791	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9792	/*
9793	 * Rename a directory to a new parent. Since, we are both deleting
9794	 * and creating a new directory entry, the link count on the new
9795	 * directory should not change. Thus we skip the followup dirrem.
9796	 */
9797	if (dirrem->dm_state & DIRCHG) {
9798		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9799		    ("handle_workitem_remove: DIRCHG and worklist not empty."));
9800		WORKITEM_FREE(dirrem, D_DIRREM);
9801		FREE_LOCK(ump);
9802		goto out;
9803	}
9804	dirrem->dm_state = ONDEPLIST;
9805	dirrem->dm_oldinum = dirrem->dm_dirinum;
9806	/*
9807	 * Place the dirrem on the parent's diremhd list.
9808	 */
9809	if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
9810		panic("handle_workitem_remove: lost dir inodedep");
9811	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9812	/*
9813	 * If the allocated inode has never been written to disk, then
9814	 * the on-disk inode is zero'ed and we can remove the file
9815	 * immediately.  When journaling if the inode has been marked
9816	 * unlinked and not DEPCOMPLETE we know it can never be written.
9817	 */
9818	inodedep_lookup(mp, oldinum, 0, &inodedep);
9819	if (inodedep == NULL ||
9820	    (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
9821	    check_inode_unwritten(inodedep)) {
9822		FREE_LOCK(ump);
9823		vput(vp);
9824		return handle_workitem_remove(dirrem, flags);
9825	}
9826	WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
9827	FREE_LOCK(ump);
9828	ip->i_flag |= IN_CHANGE;
9829out:
9830	ffs_update(vp, 0);
9831	vput(vp);
9832	return (0);
9833}
9834
9835/*
9836 * Inode de-allocation dependencies.
9837 *
9838 * When an inode's link count is reduced to zero, it can be de-allocated. We
9839 * found it convenient to postpone de-allocation until after the inode is
9840 * written to disk with its new link count (zero).  At this point, all of the
9841 * on-disk inode's block pointers are nullified and, with careful dependency
9842 * list ordering, all dependencies related to the inode will be satisfied and
9843 * the corresponding dependency structures de-allocated.  So, if/when the
9844 * inode is reused, there will be no mixing of old dependencies with new
9845 * ones.  This artificial dependency is set up by the block de-allocation
9846 * procedure above (softdep_setup_freeblocks) and completed by the
9847 * following procedure.
9848 */
9849static void
9850handle_workitem_freefile(freefile)
9851	struct freefile *freefile;
9852{
9853	struct workhead wkhd;
9854	struct fs *fs;
9855	struct inodedep *idp;
9856	struct ufsmount *ump;
9857	int error;
9858
9859	ump = VFSTOUFS(freefile->fx_list.wk_mp);
9860	fs = ump->um_fs;
9861#ifdef DEBUG
9862	ACQUIRE_LOCK(ump);
9863	error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
9864	FREE_LOCK(ump);
9865	if (error)
9866		panic("handle_workitem_freefile: inodedep %p survived", idp);
9867#endif
9868	UFS_LOCK(ump);
9869	fs->fs_pendinginodes -= 1;
9870	UFS_UNLOCK(ump);
9871	LIST_INIT(&wkhd);
9872	LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
9873	if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
9874	    freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
9875		softdep_error("handle_workitem_freefile", error);
9876	ACQUIRE_LOCK(ump);
9877	WORKITEM_FREE(freefile, D_FREEFILE);
9878	FREE_LOCK(ump);
9879}
9880
9881
9882/*
9883 * Helper function which unlinks marker element from work list and returns
9884 * the next element on the list.
9885 */
9886static __inline struct worklist *
9887markernext(struct worklist *marker)
9888{
9889	struct worklist *next;
9890
9891	next = LIST_NEXT(marker, wk_list);
9892	LIST_REMOVE(marker, wk_list);
9893	return next;
9894}
9895
9896/*
9897 * Disk writes.
9898 *
9899 * The dependency structures constructed above are most actively used when file
9900 * system blocks are written to disk.  No constraints are placed on when a
9901 * block can be written, but unsatisfied update dependencies are made safe by
9902 * modifying (or replacing) the source memory for the duration of the disk
9903 * write.  When the disk write completes, the memory block is again brought
9904 * up-to-date.
9905 *
9906 * In-core inode structure reclamation.
9907 *
9908 * Because there are a finite number of "in-core" inode structures, they are
9909 * reused regularly.  By transferring all inode-related dependencies to the
9910 * in-memory inode block and indexing them separately (via "inodedep"s), we
9911 * can allow "in-core" inode structures to be reused at any time and avoid
9912 * any increase in contention.
9913 *
9914 * Called just before entering the device driver to initiate a new disk I/O.
9915 * The buffer must be locked, thus, no I/O completion operations can occur
9916 * while we are manipulating its associated dependencies.
9917 */
9918static void
9919softdep_disk_io_initiation(bp)
9920	struct buf *bp;		/* structure describing disk write to occur */
9921{
9922	struct worklist *wk;
9923	struct worklist marker;
9924	struct inodedep *inodedep;
9925	struct freeblks *freeblks;
9926	struct jblkdep *jblkdep;
9927	struct newblk *newblk;
9928	struct ufsmount *ump;
9929
9930	/*
9931	 * We only care about write operations. There should never
9932	 * be dependencies for reads.
9933	 */
9934	if (bp->b_iocmd != BIO_WRITE)
9935		panic("softdep_disk_io_initiation: not write");
9936
9937	if (bp->b_vflags & BV_BKGRDINPROG)
9938		panic("softdep_disk_io_initiation: Writing buffer with "
9939		    "background write in progress: %p", bp);
9940
9941	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
9942		return;
9943	ump = VFSTOUFS(wk->wk_mp);
9944
9945	marker.wk_type = D_LAST + 1;	/* Not a normal workitem */
9946	PHOLD(curproc);			/* Don't swap out kernel stack */
9947	ACQUIRE_LOCK(ump);
9948	/*
9949	 * Do any necessary pre-I/O processing.
9950	 */
9951	for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
9952	     wk = markernext(&marker)) {
9953		LIST_INSERT_AFTER(wk, &marker, wk_list);
9954		switch (wk->wk_type) {
9955
9956		case D_PAGEDEP:
9957			initiate_write_filepage(WK_PAGEDEP(wk), bp);
9958			continue;
9959
9960		case D_INODEDEP:
9961			inodedep = WK_INODEDEP(wk);
9962			if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
9963				initiate_write_inodeblock_ufs1(inodedep, bp);
9964			else
9965				initiate_write_inodeblock_ufs2(inodedep, bp);
9966			continue;
9967
9968		case D_INDIRDEP:
9969			initiate_write_indirdep(WK_INDIRDEP(wk), bp);
9970			continue;
9971
9972		case D_BMSAFEMAP:
9973			initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
9974			continue;
9975
9976		case D_JSEG:
9977			WK_JSEG(wk)->js_buf = NULL;
9978			continue;
9979
9980		case D_FREEBLKS:
9981			freeblks = WK_FREEBLKS(wk);
9982			jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
9983			/*
9984			 * We have to wait for the freeblks to be journaled
9985			 * before we can write an inodeblock with updated
9986			 * pointers.  Be careful to arrange the marker so
9987			 * we revisit the freeblks if it's not removed by
9988			 * the first jwait().
9989			 */
9990			if (jblkdep != NULL) {
9991				LIST_REMOVE(&marker, wk_list);
9992				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9993				jwait(&jblkdep->jb_list, MNT_WAIT);
9994			}
9995			continue;
9996		case D_ALLOCDIRECT:
9997		case D_ALLOCINDIR:
9998			/*
9999			 * We have to wait for the jnewblk to be journaled
10000			 * before we can write to a block if the contents
10001			 * may be confused with an earlier file's indirect
10002			 * at recovery time.  Handle the marker as described
10003			 * above.
10004			 */
10005			newblk = WK_NEWBLK(wk);
10006			if (newblk->nb_jnewblk != NULL &&
10007			    indirblk_lookup(newblk->nb_list.wk_mp,
10008			    newblk->nb_newblkno)) {
10009				LIST_REMOVE(&marker, wk_list);
10010				LIST_INSERT_BEFORE(wk, &marker, wk_list);
10011				jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
10012			}
10013			continue;
10014
10015		case D_SBDEP:
10016			initiate_write_sbdep(WK_SBDEP(wk));
10017			continue;
10018
10019		case D_MKDIR:
10020		case D_FREEWORK:
10021		case D_FREEDEP:
10022		case D_JSEGDEP:
10023			continue;
10024
10025		default:
10026			panic("handle_disk_io_initiation: Unexpected type %s",
10027			    TYPENAME(wk->wk_type));
10028			/* NOTREACHED */
10029		}
10030	}
10031	FREE_LOCK(ump);
10032	PRELE(curproc);			/* Allow swapout of kernel stack */
10033}
10034
10035/*
10036 * Called from within the procedure above to deal with unsatisfied
10037 * allocation dependencies in a directory. The buffer must be locked,
10038 * thus, no I/O completion operations can occur while we are
10039 * manipulating its associated dependencies.
10040 */
10041static void
10042initiate_write_filepage(pagedep, bp)
10043	struct pagedep *pagedep;
10044	struct buf *bp;
10045{
10046	struct jremref *jremref;
10047	struct jmvref *jmvref;
10048	struct dirrem *dirrem;
10049	struct diradd *dap;
10050	struct direct *ep;
10051	int i;
10052
10053	if (pagedep->pd_state & IOSTARTED) {
10054		/*
10055		 * This can only happen if there is a driver that does not
10056		 * understand chaining. Here biodone will reissue the call
10057		 * to strategy for the incomplete buffers.
10058		 */
10059		printf("initiate_write_filepage: already started\n");
10060		return;
10061	}
10062	pagedep->pd_state |= IOSTARTED;
10063	/*
10064	 * Wait for all journal remove dependencies to hit the disk.
10065	 * We can not allow any potentially conflicting directory adds
10066	 * to be visible before removes and rollback is too difficult.
10067	 * The per-filesystem lock may be dropped and re-acquired, however
10068	 * we hold the buf locked so the dependency can not go away.
10069	 */
10070	LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
10071		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
10072			jwait(&jremref->jr_list, MNT_WAIT);
10073	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
10074		jwait(&jmvref->jm_list, MNT_WAIT);
10075	for (i = 0; i < DAHASHSZ; i++) {
10076		LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
10077			ep = (struct direct *)
10078			    ((char *)bp->b_data + dap->da_offset);
10079			if (ep->d_ino != dap->da_newinum)
10080				panic("%s: dir inum %ju != new %ju",
10081				    "initiate_write_filepage",
10082				    (uintmax_t)ep->d_ino,
10083				    (uintmax_t)dap->da_newinum);
10084			if (dap->da_state & DIRCHG)
10085				ep->d_ino = dap->da_previous->dm_oldinum;
10086			else
10087				ep->d_ino = 0;
10088			dap->da_state &= ~ATTACHED;
10089			dap->da_state |= UNDONE;
10090		}
10091	}
10092}
10093
10094/*
10095 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
10096 * Note that any bug fixes made to this routine must be done in the
10097 * version found below.
10098 *
10099 * Called from within the procedure above to deal with unsatisfied
10100 * allocation dependencies in an inodeblock. The buffer must be
10101 * locked, thus, no I/O completion operations can occur while we
10102 * are manipulating its associated dependencies.
10103 */
10104static void
10105initiate_write_inodeblock_ufs1(inodedep, bp)
10106	struct inodedep *inodedep;
10107	struct buf *bp;			/* The inode block */
10108{
10109	struct allocdirect *adp, *lastadp;
10110	struct ufs1_dinode *dp;
10111	struct ufs1_dinode *sip;
10112	struct inoref *inoref;
10113	struct ufsmount *ump;
10114	struct fs *fs;
10115	ufs_lbn_t i;
10116#ifdef INVARIANTS
10117	ufs_lbn_t prevlbn = 0;
10118#endif
10119	int deplist;
10120
10121	if (inodedep->id_state & IOSTARTED)
10122		panic("initiate_write_inodeblock_ufs1: already started");
10123	inodedep->id_state |= IOSTARTED;
10124	fs = inodedep->id_fs;
10125	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10126	LOCK_OWNED(ump);
10127	dp = (struct ufs1_dinode *)bp->b_data +
10128	    ino_to_fsbo(fs, inodedep->id_ino);
10129
10130	/*
10131	 * If we're on the unlinked list but have not yet written our
10132	 * next pointer initialize it here.
10133	 */
10134	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10135		struct inodedep *inon;
10136
10137		inon = TAILQ_NEXT(inodedep, id_unlinked);
10138		dp->di_freelink = inon ? inon->id_ino : 0;
10139	}
10140	/*
10141	 * If the bitmap is not yet written, then the allocated
10142	 * inode cannot be written to disk.
10143	 */
10144	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10145		if (inodedep->id_savedino1 != NULL)
10146			panic("initiate_write_inodeblock_ufs1: I/O underway");
10147		FREE_LOCK(ump);
10148		sip = malloc(sizeof(struct ufs1_dinode),
10149		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10150		ACQUIRE_LOCK(ump);
10151		inodedep->id_savedino1 = sip;
10152		*inodedep->id_savedino1 = *dp;
10153		bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
10154		dp->di_gen = inodedep->id_savedino1->di_gen;
10155		dp->di_freelink = inodedep->id_savedino1->di_freelink;
10156		return;
10157	}
10158	/*
10159	 * If no dependencies, then there is nothing to roll back.
10160	 */
10161	inodedep->id_savedsize = dp->di_size;
10162	inodedep->id_savedextsize = 0;
10163	inodedep->id_savednlink = dp->di_nlink;
10164	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10165	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10166		return;
10167	/*
10168	 * Revert the link count to that of the first unwritten journal entry.
10169	 */
10170	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10171	if (inoref)
10172		dp->di_nlink = inoref->if_nlink;
10173	/*
10174	 * Set the dependencies to busy.
10175	 */
10176	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10177	     adp = TAILQ_NEXT(adp, ad_next)) {
10178#ifdef INVARIANTS
10179		if (deplist != 0 && prevlbn >= adp->ad_offset)
10180			panic("softdep_write_inodeblock: lbn order");
10181		prevlbn = adp->ad_offset;
10182		if (adp->ad_offset < NDADDR &&
10183		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10184			panic("%s: direct pointer #%jd mismatch %d != %jd",
10185			    "softdep_write_inodeblock",
10186			    (intmax_t)adp->ad_offset,
10187			    dp->di_db[adp->ad_offset],
10188			    (intmax_t)adp->ad_newblkno);
10189		if (adp->ad_offset >= NDADDR &&
10190		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10191			panic("%s: indirect pointer #%jd mismatch %d != %jd",
10192			    "softdep_write_inodeblock",
10193			    (intmax_t)adp->ad_offset - NDADDR,
10194			    dp->di_ib[adp->ad_offset - NDADDR],
10195			    (intmax_t)adp->ad_newblkno);
10196		deplist |= 1 << adp->ad_offset;
10197		if ((adp->ad_state & ATTACHED) == 0)
10198			panic("softdep_write_inodeblock: Unknown state 0x%x",
10199			    adp->ad_state);
10200#endif /* INVARIANTS */
10201		adp->ad_state &= ~ATTACHED;
10202		adp->ad_state |= UNDONE;
10203	}
10204	/*
10205	 * The on-disk inode cannot claim to be any larger than the last
10206	 * fragment that has been written. Otherwise, the on-disk inode
10207	 * might have fragments that were not the last block in the file
10208	 * which would corrupt the filesystem.
10209	 */
10210	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10211	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10212		if (adp->ad_offset >= NDADDR)
10213			break;
10214		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10215		/* keep going until hitting a rollback to a frag */
10216		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10217			continue;
10218		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10219		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10220#ifdef INVARIANTS
10221			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10222				panic("softdep_write_inodeblock: lost dep1");
10223#endif /* INVARIANTS */
10224			dp->di_db[i] = 0;
10225		}
10226		for (i = 0; i < NIADDR; i++) {
10227#ifdef INVARIANTS
10228			if (dp->di_ib[i] != 0 &&
10229			    (deplist & ((1 << NDADDR) << i)) == 0)
10230				panic("softdep_write_inodeblock: lost dep2");
10231#endif /* INVARIANTS */
10232			dp->di_ib[i] = 0;
10233		}
10234		return;
10235	}
10236	/*
10237	 * If we have zero'ed out the last allocated block of the file,
10238	 * roll back the size to the last currently allocated block.
10239	 * We know that this last allocated block is a full-sized as
10240	 * we already checked for fragments in the loop above.
10241	 */
10242	if (lastadp != NULL &&
10243	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10244		for (i = lastadp->ad_offset; i >= 0; i--)
10245			if (dp->di_db[i] != 0)
10246				break;
10247		dp->di_size = (i + 1) * fs->fs_bsize;
10248	}
10249	/*
10250	 * The only dependencies are for indirect blocks.
10251	 *
10252	 * The file size for indirect block additions is not guaranteed.
10253	 * Such a guarantee would be non-trivial to achieve. The conventional
10254	 * synchronous write implementation also does not make this guarantee.
10255	 * Fsck should catch and fix discrepancies. Arguably, the file size
10256	 * can be over-estimated without destroying integrity when the file
10257	 * moves into the indirect blocks (i.e., is large). If we want to
10258	 * postpone fsck, we are stuck with this argument.
10259	 */
10260	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10261		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10262}
10263
10264/*
10265 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10266 * Note that any bug fixes made to this routine must be done in the
10267 * version found above.
10268 *
10269 * Called from within the procedure above to deal with unsatisfied
10270 * allocation dependencies in an inodeblock. The buffer must be
10271 * locked, thus, no I/O completion operations can occur while we
10272 * are manipulating its associated dependencies.
10273 */
10274static void
10275initiate_write_inodeblock_ufs2(inodedep, bp)
10276	struct inodedep *inodedep;
10277	struct buf *bp;			/* The inode block */
10278{
10279	struct allocdirect *adp, *lastadp;
10280	struct ufs2_dinode *dp;
10281	struct ufs2_dinode *sip;
10282	struct inoref *inoref;
10283	struct ufsmount *ump;
10284	struct fs *fs;
10285	ufs_lbn_t i;
10286#ifdef INVARIANTS
10287	ufs_lbn_t prevlbn = 0;
10288#endif
10289	int deplist;
10290
10291	if (inodedep->id_state & IOSTARTED)
10292		panic("initiate_write_inodeblock_ufs2: already started");
10293	inodedep->id_state |= IOSTARTED;
10294	fs = inodedep->id_fs;
10295	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10296	LOCK_OWNED(ump);
10297	dp = (struct ufs2_dinode *)bp->b_data +
10298	    ino_to_fsbo(fs, inodedep->id_ino);
10299
10300	/*
10301	 * If we're on the unlinked list but have not yet written our
10302	 * next pointer initialize it here.
10303	 */
10304	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10305		struct inodedep *inon;
10306
10307		inon = TAILQ_NEXT(inodedep, id_unlinked);
10308		dp->di_freelink = inon ? inon->id_ino : 0;
10309	}
10310	/*
10311	 * If the bitmap is not yet written, then the allocated
10312	 * inode cannot be written to disk.
10313	 */
10314	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10315		if (inodedep->id_savedino2 != NULL)
10316			panic("initiate_write_inodeblock_ufs2: I/O underway");
10317		FREE_LOCK(ump);
10318		sip = malloc(sizeof(struct ufs2_dinode),
10319		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10320		ACQUIRE_LOCK(ump);
10321		inodedep->id_savedino2 = sip;
10322		*inodedep->id_savedino2 = *dp;
10323		bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10324		dp->di_gen = inodedep->id_savedino2->di_gen;
10325		dp->di_freelink = inodedep->id_savedino2->di_freelink;
10326		return;
10327	}
10328	/*
10329	 * If no dependencies, then there is nothing to roll back.
10330	 */
10331	inodedep->id_savedsize = dp->di_size;
10332	inodedep->id_savedextsize = dp->di_extsize;
10333	inodedep->id_savednlink = dp->di_nlink;
10334	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10335	    TAILQ_EMPTY(&inodedep->id_extupdt) &&
10336	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10337		return;
10338	/*
10339	 * Revert the link count to that of the first unwritten journal entry.
10340	 */
10341	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10342	if (inoref)
10343		dp->di_nlink = inoref->if_nlink;
10344
10345	/*
10346	 * Set the ext data dependencies to busy.
10347	 */
10348	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10349	     adp = TAILQ_NEXT(adp, ad_next)) {
10350#ifdef INVARIANTS
10351		if (deplist != 0 && prevlbn >= adp->ad_offset)
10352			panic("softdep_write_inodeblock: lbn order");
10353		prevlbn = adp->ad_offset;
10354		if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10355			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10356			    "softdep_write_inodeblock",
10357			    (intmax_t)adp->ad_offset,
10358			    (intmax_t)dp->di_extb[adp->ad_offset],
10359			    (intmax_t)adp->ad_newblkno);
10360		deplist |= 1 << adp->ad_offset;
10361		if ((adp->ad_state & ATTACHED) == 0)
10362			panic("softdep_write_inodeblock: Unknown state 0x%x",
10363			    adp->ad_state);
10364#endif /* INVARIANTS */
10365		adp->ad_state &= ~ATTACHED;
10366		adp->ad_state |= UNDONE;
10367	}
10368	/*
10369	 * The on-disk inode cannot claim to be any larger than the last
10370	 * fragment that has been written. Otherwise, the on-disk inode
10371	 * might have fragments that were not the last block in the ext
10372	 * data which would corrupt the filesystem.
10373	 */
10374	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10375	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10376		dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10377		/* keep going until hitting a rollback to a frag */
10378		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10379			continue;
10380		dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10381		for (i = adp->ad_offset + 1; i < NXADDR; i++) {
10382#ifdef INVARIANTS
10383			if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10384				panic("softdep_write_inodeblock: lost dep1");
10385#endif /* INVARIANTS */
10386			dp->di_extb[i] = 0;
10387		}
10388		lastadp = NULL;
10389		break;
10390	}
10391	/*
10392	 * If we have zero'ed out the last allocated block of the ext
10393	 * data, roll back the size to the last currently allocated block.
10394	 * We know that this last allocated block is a full-sized as
10395	 * we already checked for fragments in the loop above.
10396	 */
10397	if (lastadp != NULL &&
10398	    dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10399		for (i = lastadp->ad_offset; i >= 0; i--)
10400			if (dp->di_extb[i] != 0)
10401				break;
10402		dp->di_extsize = (i + 1) * fs->fs_bsize;
10403	}
10404	/*
10405	 * Set the file data dependencies to busy.
10406	 */
10407	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10408	     adp = TAILQ_NEXT(adp, ad_next)) {
10409#ifdef INVARIANTS
10410		if (deplist != 0 && prevlbn >= adp->ad_offset)
10411			panic("softdep_write_inodeblock: lbn order");
10412		if ((adp->ad_state & ATTACHED) == 0)
10413			panic("inodedep %p and adp %p not attached", inodedep, adp);
10414		prevlbn = adp->ad_offset;
10415		if (adp->ad_offset < NDADDR &&
10416		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10417			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10418			    "softdep_write_inodeblock",
10419			    (intmax_t)adp->ad_offset,
10420			    (intmax_t)dp->di_db[adp->ad_offset],
10421			    (intmax_t)adp->ad_newblkno);
10422		if (adp->ad_offset >= NDADDR &&
10423		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10424			panic("%s indirect pointer #%jd mismatch %jd != %jd",
10425			    "softdep_write_inodeblock:",
10426			    (intmax_t)adp->ad_offset - NDADDR,
10427			    (intmax_t)dp->di_ib[adp->ad_offset - NDADDR],
10428			    (intmax_t)adp->ad_newblkno);
10429		deplist |= 1 << adp->ad_offset;
10430		if ((adp->ad_state & ATTACHED) == 0)
10431			panic("softdep_write_inodeblock: Unknown state 0x%x",
10432			    adp->ad_state);
10433#endif /* INVARIANTS */
10434		adp->ad_state &= ~ATTACHED;
10435		adp->ad_state |= UNDONE;
10436	}
10437	/*
10438	 * The on-disk inode cannot claim to be any larger than the last
10439	 * fragment that has been written. Otherwise, the on-disk inode
10440	 * might have fragments that were not the last block in the file
10441	 * which would corrupt the filesystem.
10442	 */
10443	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10444	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10445		if (adp->ad_offset >= NDADDR)
10446			break;
10447		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10448		/* keep going until hitting a rollback to a frag */
10449		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10450			continue;
10451		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10452		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10453#ifdef INVARIANTS
10454			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10455				panic("softdep_write_inodeblock: lost dep2");
10456#endif /* INVARIANTS */
10457			dp->di_db[i] = 0;
10458		}
10459		for (i = 0; i < NIADDR; i++) {
10460#ifdef INVARIANTS
10461			if (dp->di_ib[i] != 0 &&
10462			    (deplist & ((1 << NDADDR) << i)) == 0)
10463				panic("softdep_write_inodeblock: lost dep3");
10464#endif /* INVARIANTS */
10465			dp->di_ib[i] = 0;
10466		}
10467		return;
10468	}
10469	/*
10470	 * If we have zero'ed out the last allocated block of the file,
10471	 * roll back the size to the last currently allocated block.
10472	 * We know that this last allocated block is a full-sized as
10473	 * we already checked for fragments in the loop above.
10474	 */
10475	if (lastadp != NULL &&
10476	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10477		for (i = lastadp->ad_offset; i >= 0; i--)
10478			if (dp->di_db[i] != 0)
10479				break;
10480		dp->di_size = (i + 1) * fs->fs_bsize;
10481	}
10482	/*
10483	 * The only dependencies are for indirect blocks.
10484	 *
10485	 * The file size for indirect block additions is not guaranteed.
10486	 * Such a guarantee would be non-trivial to achieve. The conventional
10487	 * synchronous write implementation also does not make this guarantee.
10488	 * Fsck should catch and fix discrepancies. Arguably, the file size
10489	 * can be over-estimated without destroying integrity when the file
10490	 * moves into the indirect blocks (i.e., is large). If we want to
10491	 * postpone fsck, we are stuck with this argument.
10492	 */
10493	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10494		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10495}
10496
10497/*
10498 * Cancel an indirdep as a result of truncation.  Release all of the
10499 * children allocindirs and place their journal work on the appropriate
10500 * list.
10501 */
10502static void
10503cancel_indirdep(indirdep, bp, freeblks)
10504	struct indirdep *indirdep;
10505	struct buf *bp;
10506	struct freeblks *freeblks;
10507{
10508	struct allocindir *aip;
10509
10510	/*
10511	 * None of the indirect pointers will ever be visible,
10512	 * so they can simply be tossed. GOINGAWAY ensures
10513	 * that allocated pointers will be saved in the buffer
10514	 * cache until they are freed. Note that they will
10515	 * only be able to be found by their physical address
10516	 * since the inode mapping the logical address will
10517	 * be gone. The save buffer used for the safe copy
10518	 * was allocated in setup_allocindir_phase2 using
10519	 * the physical address so it could be used for this
10520	 * purpose. Hence we swap the safe copy with the real
10521	 * copy, allowing the safe copy to be freed and holding
10522	 * on to the real copy for later use in indir_trunc.
10523	 */
10524	if (indirdep->ir_state & GOINGAWAY)
10525		panic("cancel_indirdep: already gone");
10526	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10527		indirdep->ir_state |= DEPCOMPLETE;
10528		LIST_REMOVE(indirdep, ir_next);
10529	}
10530	indirdep->ir_state |= GOINGAWAY;
10531	/*
10532	 * Pass in bp for blocks still have journal writes
10533	 * pending so we can cancel them on their own.
10534	 */
10535	while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL)
10536		cancel_allocindir(aip, bp, freeblks, 0);
10537	while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL)
10538		cancel_allocindir(aip, NULL, freeblks, 0);
10539	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL)
10540		cancel_allocindir(aip, NULL, freeblks, 0);
10541	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL)
10542		cancel_allocindir(aip, NULL, freeblks, 0);
10543	/*
10544	 * If there are pending partial truncations we need to keep the
10545	 * old block copy around until they complete.  This is because
10546	 * the current b_data is not a perfect superset of the available
10547	 * blocks.
10548	 */
10549	if (TAILQ_EMPTY(&indirdep->ir_trunc))
10550		bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10551	else
10552		bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10553	WORKLIST_REMOVE(&indirdep->ir_list);
10554	WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10555	indirdep->ir_bp = NULL;
10556	indirdep->ir_freeblks = freeblks;
10557}
10558
10559/*
10560 * Free an indirdep once it no longer has new pointers to track.
10561 */
10562static void
10563free_indirdep(indirdep)
10564	struct indirdep *indirdep;
10565{
10566
10567	KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10568	    ("free_indirdep: Indir trunc list not empty."));
10569	KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10570	    ("free_indirdep: Complete head not empty."));
10571	KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10572	    ("free_indirdep: write head not empty."));
10573	KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10574	    ("free_indirdep: done head not empty."));
10575	KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10576	    ("free_indirdep: deplist head not empty."));
10577	KASSERT((indirdep->ir_state & DEPCOMPLETE),
10578	    ("free_indirdep: %p still on newblk list.", indirdep));
10579	KASSERT(indirdep->ir_saveddata == NULL,
10580	    ("free_indirdep: %p still has saved data.", indirdep));
10581	if (indirdep->ir_state & ONWORKLIST)
10582		WORKLIST_REMOVE(&indirdep->ir_list);
10583	WORKITEM_FREE(indirdep, D_INDIRDEP);
10584}
10585
10586/*
10587 * Called before a write to an indirdep.  This routine is responsible for
10588 * rolling back pointers to a safe state which includes only those
10589 * allocindirs which have been completed.
10590 */
10591static void
10592initiate_write_indirdep(indirdep, bp)
10593	struct indirdep *indirdep;
10594	struct buf *bp;
10595{
10596	struct ufsmount *ump;
10597
10598	indirdep->ir_state |= IOSTARTED;
10599	if (indirdep->ir_state & GOINGAWAY)
10600		panic("disk_io_initiation: indirdep gone");
10601	/*
10602	 * If there are no remaining dependencies, this will be writing
10603	 * the real pointers.
10604	 */
10605	if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10606	    TAILQ_EMPTY(&indirdep->ir_trunc))
10607		return;
10608	/*
10609	 * Replace up-to-date version with safe version.
10610	 */
10611	if (indirdep->ir_saveddata == NULL) {
10612		ump = VFSTOUFS(indirdep->ir_list.wk_mp);
10613		LOCK_OWNED(ump);
10614		FREE_LOCK(ump);
10615		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10616		    M_SOFTDEP_FLAGS);
10617		ACQUIRE_LOCK(ump);
10618	}
10619	indirdep->ir_state &= ~ATTACHED;
10620	indirdep->ir_state |= UNDONE;
10621	bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10622	bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10623	    bp->b_bcount);
10624}
10625
10626/*
10627 * Called when an inode has been cleared in a cg bitmap.  This finally
10628 * eliminates any canceled jaddrefs
10629 */
10630void
10631softdep_setup_inofree(mp, bp, ino, wkhd)
10632	struct mount *mp;
10633	struct buf *bp;
10634	ino_t ino;
10635	struct workhead *wkhd;
10636{
10637	struct worklist *wk, *wkn;
10638	struct inodedep *inodedep;
10639	struct ufsmount *ump;
10640	uint8_t *inosused;
10641	struct cg *cgp;
10642	struct fs *fs;
10643
10644	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
10645	    ("softdep_setup_inofree called on non-softdep filesystem"));
10646	ump = VFSTOUFS(mp);
10647	ACQUIRE_LOCK(ump);
10648	fs = ump->um_fs;
10649	cgp = (struct cg *)bp->b_data;
10650	inosused = cg_inosused(cgp);
10651	if (isset(inosused, ino % fs->fs_ipg))
10652		panic("softdep_setup_inofree: inode %ju not freed.",
10653		    (uintmax_t)ino);
10654	if (inodedep_lookup(mp, ino, 0, &inodedep))
10655		panic("softdep_setup_inofree: ino %ju has existing inodedep %p",
10656		    (uintmax_t)ino, inodedep);
10657	if (wkhd) {
10658		LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10659			if (wk->wk_type != D_JADDREF)
10660				continue;
10661			WORKLIST_REMOVE(wk);
10662			/*
10663			 * We can free immediately even if the jaddref
10664			 * isn't attached in a background write as now
10665			 * the bitmaps are reconciled.
10666			 */
10667			wk->wk_state |= COMPLETE | ATTACHED;
10668			free_jaddref(WK_JADDREF(wk));
10669		}
10670		jwork_move(&bp->b_dep, wkhd);
10671	}
10672	FREE_LOCK(ump);
10673}
10674
10675
10676/*
10677 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10678 * map.  Any dependencies waiting for the write to clear are added to the
10679 * buf's list and any jnewblks that are being canceled are discarded
10680 * immediately.
10681 */
10682void
10683softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
10684	struct mount *mp;
10685	struct buf *bp;
10686	ufs2_daddr_t blkno;
10687	int frags;
10688	struct workhead *wkhd;
10689{
10690	struct bmsafemap *bmsafemap;
10691	struct jnewblk *jnewblk;
10692	struct ufsmount *ump;
10693	struct worklist *wk;
10694	struct fs *fs;
10695#ifdef SUJ_DEBUG
10696	uint8_t *blksfree;
10697	struct cg *cgp;
10698	ufs2_daddr_t jstart;
10699	ufs2_daddr_t jend;
10700	ufs2_daddr_t end;
10701	long bno;
10702	int i;
10703#endif
10704
10705	CTR3(KTR_SUJ,
10706	    "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
10707	    blkno, frags, wkhd);
10708
10709	ump = VFSTOUFS(mp);
10710	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
10711	    ("softdep_setup_blkfree called on non-softdep filesystem"));
10712	ACQUIRE_LOCK(ump);
10713	/* Lookup the bmsafemap so we track when it is dirty. */
10714	fs = ump->um_fs;
10715	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10716	/*
10717	 * Detach any jnewblks which have been canceled.  They must linger
10718	 * until the bitmap is cleared again by ffs_blkfree() to prevent
10719	 * an unjournaled allocation from hitting the disk.
10720	 */
10721	if (wkhd) {
10722		while ((wk = LIST_FIRST(wkhd)) != NULL) {
10723			CTR2(KTR_SUJ,
10724			    "softdep_setup_blkfree: blkno %jd wk type %d",
10725			    blkno, wk->wk_type);
10726			WORKLIST_REMOVE(wk);
10727			if (wk->wk_type != D_JNEWBLK) {
10728				WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
10729				continue;
10730			}
10731			jnewblk = WK_JNEWBLK(wk);
10732			KASSERT(jnewblk->jn_state & GOINGAWAY,
10733			    ("softdep_setup_blkfree: jnewblk not canceled."));
10734#ifdef SUJ_DEBUG
10735			/*
10736			 * Assert that this block is free in the bitmap
10737			 * before we discard the jnewblk.
10738			 */
10739			cgp = (struct cg *)bp->b_data;
10740			blksfree = cg_blksfree(cgp);
10741			bno = dtogd(fs, jnewblk->jn_blkno);
10742			for (i = jnewblk->jn_oldfrags;
10743			    i < jnewblk->jn_frags; i++) {
10744				if (isset(blksfree, bno + i))
10745					continue;
10746				panic("softdep_setup_blkfree: not free");
10747			}
10748#endif
10749			/*
10750			 * Even if it's not attached we can free immediately
10751			 * as the new bitmap is correct.
10752			 */
10753			wk->wk_state |= COMPLETE | ATTACHED;
10754			free_jnewblk(jnewblk);
10755		}
10756	}
10757
10758#ifdef SUJ_DEBUG
10759	/*
10760	 * Assert that we are not freeing a block which has an outstanding
10761	 * allocation dependency.
10762	 */
10763	fs = VFSTOUFS(mp)->um_fs;
10764	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10765	end = blkno + frags;
10766	LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10767		/*
10768		 * Don't match against blocks that will be freed when the
10769		 * background write is done.
10770		 */
10771		if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
10772		    (COMPLETE | DEPCOMPLETE))
10773			continue;
10774		jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
10775		jend = jnewblk->jn_blkno + jnewblk->jn_frags;
10776		if ((blkno >= jstart && blkno < jend) ||
10777		    (end > jstart && end <= jend)) {
10778			printf("state 0x%X %jd - %d %d dep %p\n",
10779			    jnewblk->jn_state, jnewblk->jn_blkno,
10780			    jnewblk->jn_oldfrags, jnewblk->jn_frags,
10781			    jnewblk->jn_dep);
10782			panic("softdep_setup_blkfree: "
10783			    "%jd-%jd(%d) overlaps with %jd-%jd",
10784			    blkno, end, frags, jstart, jend);
10785		}
10786	}
10787#endif
10788	FREE_LOCK(ump);
10789}
10790
10791/*
10792 * Revert a block allocation when the journal record that describes it
10793 * is not yet written.
10794 */
10795static int
10796jnewblk_rollback(jnewblk, fs, cgp, blksfree)
10797	struct jnewblk *jnewblk;
10798	struct fs *fs;
10799	struct cg *cgp;
10800	uint8_t *blksfree;
10801{
10802	ufs1_daddr_t fragno;
10803	long cgbno, bbase;
10804	int frags, blk;
10805	int i;
10806
10807	frags = 0;
10808	cgbno = dtogd(fs, jnewblk->jn_blkno);
10809	/*
10810	 * We have to test which frags need to be rolled back.  We may
10811	 * be operating on a stale copy when doing background writes.
10812	 */
10813	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
10814		if (isclr(blksfree, cgbno + i))
10815			frags++;
10816	if (frags == 0)
10817		return (0);
10818	/*
10819	 * This is mostly ffs_blkfree() sans some validation and
10820	 * superblock updates.
10821	 */
10822	if (frags == fs->fs_frag) {
10823		fragno = fragstoblks(fs, cgbno);
10824		ffs_setblock(fs, blksfree, fragno);
10825		ffs_clusteracct(fs, cgp, fragno, 1);
10826		cgp->cg_cs.cs_nbfree++;
10827	} else {
10828		cgbno += jnewblk->jn_oldfrags;
10829		bbase = cgbno - fragnum(fs, cgbno);
10830		/* Decrement the old frags.  */
10831		blk = blkmap(fs, blksfree, bbase);
10832		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
10833		/* Deallocate the fragment */
10834		for (i = 0; i < frags; i++)
10835			setbit(blksfree, cgbno + i);
10836		cgp->cg_cs.cs_nffree += frags;
10837		/* Add back in counts associated with the new frags */
10838		blk = blkmap(fs, blksfree, bbase);
10839		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
10840		/* If a complete block has been reassembled, account for it. */
10841		fragno = fragstoblks(fs, bbase);
10842		if (ffs_isblock(fs, blksfree, fragno)) {
10843			cgp->cg_cs.cs_nffree -= fs->fs_frag;
10844			ffs_clusteracct(fs, cgp, fragno, 1);
10845			cgp->cg_cs.cs_nbfree++;
10846		}
10847	}
10848	stat_jnewblk++;
10849	jnewblk->jn_state &= ~ATTACHED;
10850	jnewblk->jn_state |= UNDONE;
10851
10852	return (frags);
10853}
10854
10855static void
10856initiate_write_bmsafemap(bmsafemap, bp)
10857	struct bmsafemap *bmsafemap;
10858	struct buf *bp;			/* The cg block. */
10859{
10860	struct jaddref *jaddref;
10861	struct jnewblk *jnewblk;
10862	uint8_t *inosused;
10863	uint8_t *blksfree;
10864	struct cg *cgp;
10865	struct fs *fs;
10866	ino_t ino;
10867
10868	if (bmsafemap->sm_state & IOSTARTED)
10869		return;
10870	bmsafemap->sm_state |= IOSTARTED;
10871	/*
10872	 * Clear any inode allocations which are pending journal writes.
10873	 */
10874	if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
10875		cgp = (struct cg *)bp->b_data;
10876		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10877		inosused = cg_inosused(cgp);
10878		LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
10879			ino = jaddref->ja_ino % fs->fs_ipg;
10880			if (isset(inosused, ino)) {
10881				if ((jaddref->ja_mode & IFMT) == IFDIR)
10882					cgp->cg_cs.cs_ndir--;
10883				cgp->cg_cs.cs_nifree++;
10884				clrbit(inosused, ino);
10885				jaddref->ja_state &= ~ATTACHED;
10886				jaddref->ja_state |= UNDONE;
10887				stat_jaddref++;
10888			} else
10889				panic("initiate_write_bmsafemap: inode %ju "
10890				    "marked free", (uintmax_t)jaddref->ja_ino);
10891		}
10892	}
10893	/*
10894	 * Clear any block allocations which are pending journal writes.
10895	 */
10896	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
10897		cgp = (struct cg *)bp->b_data;
10898		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10899		blksfree = cg_blksfree(cgp);
10900		LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10901			if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
10902				continue;
10903			panic("initiate_write_bmsafemap: block %jd "
10904			    "marked free", jnewblk->jn_blkno);
10905		}
10906	}
10907	/*
10908	 * Move allocation lists to the written lists so they can be
10909	 * cleared once the block write is complete.
10910	 */
10911	LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
10912	    inodedep, id_deps);
10913	LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
10914	    newblk, nb_deps);
10915	LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
10916	    wk_list);
10917}
10918
10919/*
10920 * This routine is called during the completion interrupt
10921 * service routine for a disk write (from the procedure called
10922 * by the device driver to inform the filesystem caches of
10923 * a request completion).  It should be called early in this
10924 * procedure, before the block is made available to other
10925 * processes or other routines are called.
10926 *
10927 */
10928static void
10929softdep_disk_write_complete(bp)
10930	struct buf *bp;		/* describes the completed disk write */
10931{
10932	struct worklist *wk;
10933	struct worklist *owk;
10934	struct ufsmount *ump;
10935	struct workhead reattach;
10936	struct freeblks *freeblks;
10937	struct buf *sbp;
10938
10939	/*
10940	 * If an error occurred while doing the write, then the data
10941	 * has not hit the disk and the dependencies cannot be unrolled.
10942	 */
10943	if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0)
10944		return;
10945	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
10946		return;
10947	ump = VFSTOUFS(wk->wk_mp);
10948	LIST_INIT(&reattach);
10949	/*
10950	 * This lock must not be released anywhere in this code segment.
10951	 */
10952	sbp = NULL;
10953	owk = NULL;
10954	ACQUIRE_LOCK(ump);
10955	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
10956		WORKLIST_REMOVE(wk);
10957		atomic_add_long(&dep_write[wk->wk_type], 1);
10958		if (wk == owk)
10959			panic("duplicate worklist: %p\n", wk);
10960		owk = wk;
10961		switch (wk->wk_type) {
10962
10963		case D_PAGEDEP:
10964			if (handle_written_filepage(WK_PAGEDEP(wk), bp))
10965				WORKLIST_INSERT(&reattach, wk);
10966			continue;
10967
10968		case D_INODEDEP:
10969			if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
10970				WORKLIST_INSERT(&reattach, wk);
10971			continue;
10972
10973		case D_BMSAFEMAP:
10974			if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp))
10975				WORKLIST_INSERT(&reattach, wk);
10976			continue;
10977
10978		case D_MKDIR:
10979			handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
10980			continue;
10981
10982		case D_ALLOCDIRECT:
10983			wk->wk_state |= COMPLETE;
10984			handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
10985			continue;
10986
10987		case D_ALLOCINDIR:
10988			wk->wk_state |= COMPLETE;
10989			handle_allocindir_partdone(WK_ALLOCINDIR(wk));
10990			continue;
10991
10992		case D_INDIRDEP:
10993			if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp))
10994				WORKLIST_INSERT(&reattach, wk);
10995			continue;
10996
10997		case D_FREEBLKS:
10998			wk->wk_state |= COMPLETE;
10999			freeblks = WK_FREEBLKS(wk);
11000			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
11001			    LIST_EMPTY(&freeblks->fb_jblkdephd))
11002				add_to_worklist(wk, WK_NODELAY);
11003			continue;
11004
11005		case D_FREEWORK:
11006			handle_written_freework(WK_FREEWORK(wk));
11007			break;
11008
11009		case D_JSEGDEP:
11010			free_jsegdep(WK_JSEGDEP(wk));
11011			continue;
11012
11013		case D_JSEG:
11014			handle_written_jseg(WK_JSEG(wk), bp);
11015			continue;
11016
11017		case D_SBDEP:
11018			if (handle_written_sbdep(WK_SBDEP(wk), bp))
11019				WORKLIST_INSERT(&reattach, wk);
11020			continue;
11021
11022		case D_FREEDEP:
11023			free_freedep(WK_FREEDEP(wk));
11024			continue;
11025
11026		default:
11027			panic("handle_disk_write_complete: Unknown type %s",
11028			    TYPENAME(wk->wk_type));
11029			/* NOTREACHED */
11030		}
11031	}
11032	/*
11033	 * Reattach any requests that must be redone.
11034	 */
11035	while ((wk = LIST_FIRST(&reattach)) != NULL) {
11036		WORKLIST_REMOVE(wk);
11037		WORKLIST_INSERT(&bp->b_dep, wk);
11038	}
11039	FREE_LOCK(ump);
11040	if (sbp)
11041		brelse(sbp);
11042}
11043
11044/*
11045 * Called from within softdep_disk_write_complete above. Note that
11046 * this routine is always called from interrupt level with further
11047 * splbio interrupts blocked.
11048 */
11049static void
11050handle_allocdirect_partdone(adp, wkhd)
11051	struct allocdirect *adp;	/* the completed allocdirect */
11052	struct workhead *wkhd;		/* Work to do when inode is writtne. */
11053{
11054	struct allocdirectlst *listhead;
11055	struct allocdirect *listadp;
11056	struct inodedep *inodedep;
11057	long bsize;
11058
11059	if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11060		return;
11061	/*
11062	 * The on-disk inode cannot claim to be any larger than the last
11063	 * fragment that has been written. Otherwise, the on-disk inode
11064	 * might have fragments that were not the last block in the file
11065	 * which would corrupt the filesystem. Thus, we cannot free any
11066	 * allocdirects after one whose ad_oldblkno claims a fragment as
11067	 * these blocks must be rolled back to zero before writing the inode.
11068	 * We check the currently active set of allocdirects in id_inoupdt
11069	 * or id_extupdt as appropriate.
11070	 */
11071	inodedep = adp->ad_inodedep;
11072	bsize = inodedep->id_fs->fs_bsize;
11073	if (adp->ad_state & EXTDATA)
11074		listhead = &inodedep->id_extupdt;
11075	else
11076		listhead = &inodedep->id_inoupdt;
11077	TAILQ_FOREACH(listadp, listhead, ad_next) {
11078		/* found our block */
11079		if (listadp == adp)
11080			break;
11081		/* continue if ad_oldlbn is not a fragment */
11082		if (listadp->ad_oldsize == 0 ||
11083		    listadp->ad_oldsize == bsize)
11084			continue;
11085		/* hit a fragment */
11086		return;
11087	}
11088	/*
11089	 * If we have reached the end of the current list without
11090	 * finding the just finished dependency, then it must be
11091	 * on the future dependency list. Future dependencies cannot
11092	 * be freed until they are moved to the current list.
11093	 */
11094	if (listadp == NULL) {
11095#ifdef DEBUG
11096		if (adp->ad_state & EXTDATA)
11097			listhead = &inodedep->id_newextupdt;
11098		else
11099			listhead = &inodedep->id_newinoupdt;
11100		TAILQ_FOREACH(listadp, listhead, ad_next)
11101			/* found our block */
11102			if (listadp == adp)
11103				break;
11104		if (listadp == NULL)
11105			panic("handle_allocdirect_partdone: lost dep");
11106#endif /* DEBUG */
11107		return;
11108	}
11109	/*
11110	 * If we have found the just finished dependency, then queue
11111	 * it along with anything that follows it that is complete.
11112	 * Since the pointer has not yet been written in the inode
11113	 * as the dependency prevents it, place the allocdirect on the
11114	 * bufwait list where it will be freed once the pointer is
11115	 * valid.
11116	 */
11117	if (wkhd == NULL)
11118		wkhd = &inodedep->id_bufwait;
11119	for (; adp; adp = listadp) {
11120		listadp = TAILQ_NEXT(adp, ad_next);
11121		if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11122			return;
11123		TAILQ_REMOVE(listhead, adp, ad_next);
11124		WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
11125	}
11126}
11127
11128/*
11129 * Called from within softdep_disk_write_complete above.  This routine
11130 * completes successfully written allocindirs.
11131 */
11132static void
11133handle_allocindir_partdone(aip)
11134	struct allocindir *aip;		/* the completed allocindir */
11135{
11136	struct indirdep *indirdep;
11137
11138	if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
11139		return;
11140	indirdep = aip->ai_indirdep;
11141	LIST_REMOVE(aip, ai_next);
11142	/*
11143	 * Don't set a pointer while the buffer is undergoing IO or while
11144	 * we have active truncations.
11145	 */
11146	if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
11147		LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
11148		return;
11149	}
11150	if (indirdep->ir_state & UFS1FMT)
11151		((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11152		    aip->ai_newblkno;
11153	else
11154		((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11155		    aip->ai_newblkno;
11156	/*
11157	 * Await the pointer write before freeing the allocindir.
11158	 */
11159	LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
11160}
11161
11162/*
11163 * Release segments held on a jwork list.
11164 */
11165static void
11166handle_jwork(wkhd)
11167	struct workhead *wkhd;
11168{
11169	struct worklist *wk;
11170
11171	while ((wk = LIST_FIRST(wkhd)) != NULL) {
11172		WORKLIST_REMOVE(wk);
11173		switch (wk->wk_type) {
11174		case D_JSEGDEP:
11175			free_jsegdep(WK_JSEGDEP(wk));
11176			continue;
11177		case D_FREEDEP:
11178			free_freedep(WK_FREEDEP(wk));
11179			continue;
11180		case D_FREEFRAG:
11181			rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
11182			WORKITEM_FREE(wk, D_FREEFRAG);
11183			continue;
11184		case D_FREEWORK:
11185			handle_written_freework(WK_FREEWORK(wk));
11186			continue;
11187		default:
11188			panic("handle_jwork: Unknown type %s\n",
11189			    TYPENAME(wk->wk_type));
11190		}
11191	}
11192}
11193
11194/*
11195 * Handle the bufwait list on an inode when it is safe to release items
11196 * held there.  This normally happens after an inode block is written but
11197 * may be delayed and handled later if there are pending journal items that
11198 * are not yet safe to be released.
11199 */
11200static struct freefile *
11201handle_bufwait(inodedep, refhd)
11202	struct inodedep *inodedep;
11203	struct workhead *refhd;
11204{
11205	struct jaddref *jaddref;
11206	struct freefile *freefile;
11207	struct worklist *wk;
11208
11209	freefile = NULL;
11210	while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
11211		WORKLIST_REMOVE(wk);
11212		switch (wk->wk_type) {
11213		case D_FREEFILE:
11214			/*
11215			 * We defer adding freefile to the worklist
11216			 * until all other additions have been made to
11217			 * ensure that it will be done after all the
11218			 * old blocks have been freed.
11219			 */
11220			if (freefile != NULL)
11221				panic("handle_bufwait: freefile");
11222			freefile = WK_FREEFILE(wk);
11223			continue;
11224
11225		case D_MKDIR:
11226			handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
11227			continue;
11228
11229		case D_DIRADD:
11230			diradd_inode_written(WK_DIRADD(wk), inodedep);
11231			continue;
11232
11233		case D_FREEFRAG:
11234			wk->wk_state |= COMPLETE;
11235			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
11236				add_to_worklist(wk, 0);
11237			continue;
11238
11239		case D_DIRREM:
11240			wk->wk_state |= COMPLETE;
11241			add_to_worklist(wk, 0);
11242			continue;
11243
11244		case D_ALLOCDIRECT:
11245		case D_ALLOCINDIR:
11246			free_newblk(WK_NEWBLK(wk));
11247			continue;
11248
11249		case D_JNEWBLK:
11250			wk->wk_state |= COMPLETE;
11251			free_jnewblk(WK_JNEWBLK(wk));
11252			continue;
11253
11254		/*
11255		 * Save freed journal segments and add references on
11256		 * the supplied list which will delay their release
11257		 * until the cg bitmap is cleared on disk.
11258		 */
11259		case D_JSEGDEP:
11260			if (refhd == NULL)
11261				free_jsegdep(WK_JSEGDEP(wk));
11262			else
11263				WORKLIST_INSERT(refhd, wk);
11264			continue;
11265
11266		case D_JADDREF:
11267			jaddref = WK_JADDREF(wk);
11268			TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11269			    if_deps);
11270			/*
11271			 * Transfer any jaddrefs to the list to be freed with
11272			 * the bitmap if we're handling a removed file.
11273			 */
11274			if (refhd == NULL) {
11275				wk->wk_state |= COMPLETE;
11276				free_jaddref(jaddref);
11277			} else
11278				WORKLIST_INSERT(refhd, wk);
11279			continue;
11280
11281		default:
11282			panic("handle_bufwait: Unknown type %p(%s)",
11283			    wk, TYPENAME(wk->wk_type));
11284			/* NOTREACHED */
11285		}
11286	}
11287	return (freefile);
11288}
11289/*
11290 * Called from within softdep_disk_write_complete above to restore
11291 * in-memory inode block contents to their most up-to-date state. Note
11292 * that this routine is always called from interrupt level with further
11293 * splbio interrupts blocked.
11294 */
11295static int
11296handle_written_inodeblock(inodedep, bp)
11297	struct inodedep *inodedep;
11298	struct buf *bp;		/* buffer containing the inode block */
11299{
11300	struct freefile *freefile;
11301	struct allocdirect *adp, *nextadp;
11302	struct ufs1_dinode *dp1 = NULL;
11303	struct ufs2_dinode *dp2 = NULL;
11304	struct workhead wkhd;
11305	int hadchanges, fstype;
11306	ino_t freelink;
11307
11308	LIST_INIT(&wkhd);
11309	hadchanges = 0;
11310	freefile = NULL;
11311	if ((inodedep->id_state & IOSTARTED) == 0)
11312		panic("handle_written_inodeblock: not started");
11313	inodedep->id_state &= ~IOSTARTED;
11314	if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11315		fstype = UFS1;
11316		dp1 = (struct ufs1_dinode *)bp->b_data +
11317		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11318		freelink = dp1->di_freelink;
11319	} else {
11320		fstype = UFS2;
11321		dp2 = (struct ufs2_dinode *)bp->b_data +
11322		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11323		freelink = dp2->di_freelink;
11324	}
11325	/*
11326	 * Leave this inodeblock dirty until it's in the list.
11327	 */
11328	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) {
11329		struct inodedep *inon;
11330
11331		inon = TAILQ_NEXT(inodedep, id_unlinked);
11332		if ((inon == NULL && freelink == 0) ||
11333		    (inon && inon->id_ino == freelink)) {
11334			if (inon)
11335				inon->id_state |= UNLINKPREV;
11336			inodedep->id_state |= UNLINKNEXT;
11337		}
11338		hadchanges = 1;
11339	}
11340	/*
11341	 * If we had to rollback the inode allocation because of
11342	 * bitmaps being incomplete, then simply restore it.
11343	 * Keep the block dirty so that it will not be reclaimed until
11344	 * all associated dependencies have been cleared and the
11345	 * corresponding updates written to disk.
11346	 */
11347	if (inodedep->id_savedino1 != NULL) {
11348		hadchanges = 1;
11349		if (fstype == UFS1)
11350			*dp1 = *inodedep->id_savedino1;
11351		else
11352			*dp2 = *inodedep->id_savedino2;
11353		free(inodedep->id_savedino1, M_SAVEDINO);
11354		inodedep->id_savedino1 = NULL;
11355		if ((bp->b_flags & B_DELWRI) == 0)
11356			stat_inode_bitmap++;
11357		bdirty(bp);
11358		/*
11359		 * If the inode is clear here and GOINGAWAY it will never
11360		 * be written.  Process the bufwait and clear any pending
11361		 * work which may include the freefile.
11362		 */
11363		if (inodedep->id_state & GOINGAWAY)
11364			goto bufwait;
11365		return (1);
11366	}
11367	inodedep->id_state |= COMPLETE;
11368	/*
11369	 * Roll forward anything that had to be rolled back before
11370	 * the inode could be updated.
11371	 */
11372	for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11373		nextadp = TAILQ_NEXT(adp, ad_next);
11374		if (adp->ad_state & ATTACHED)
11375			panic("handle_written_inodeblock: new entry");
11376		if (fstype == UFS1) {
11377			if (adp->ad_offset < NDADDR) {
11378				if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11379					panic("%s %s #%jd mismatch %d != %jd",
11380					    "handle_written_inodeblock:",
11381					    "direct pointer",
11382					    (intmax_t)adp->ad_offset,
11383					    dp1->di_db[adp->ad_offset],
11384					    (intmax_t)adp->ad_oldblkno);
11385				dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11386			} else {
11387				if (dp1->di_ib[adp->ad_offset - NDADDR] != 0)
11388					panic("%s: %s #%jd allocated as %d",
11389					    "handle_written_inodeblock",
11390					    "indirect pointer",
11391					    (intmax_t)adp->ad_offset - NDADDR,
11392					    dp1->di_ib[adp->ad_offset - NDADDR]);
11393				dp1->di_ib[adp->ad_offset - NDADDR] =
11394				    adp->ad_newblkno;
11395			}
11396		} else {
11397			if (adp->ad_offset < NDADDR) {
11398				if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11399					panic("%s: %s #%jd %s %jd != %jd",
11400					    "handle_written_inodeblock",
11401					    "direct pointer",
11402					    (intmax_t)adp->ad_offset, "mismatch",
11403					    (intmax_t)dp2->di_db[adp->ad_offset],
11404					    (intmax_t)adp->ad_oldblkno);
11405				dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11406			} else {
11407				if (dp2->di_ib[adp->ad_offset - NDADDR] != 0)
11408					panic("%s: %s #%jd allocated as %jd",
11409					    "handle_written_inodeblock",
11410					    "indirect pointer",
11411					    (intmax_t)adp->ad_offset - NDADDR,
11412					    (intmax_t)
11413					    dp2->di_ib[adp->ad_offset - NDADDR]);
11414				dp2->di_ib[adp->ad_offset - NDADDR] =
11415				    adp->ad_newblkno;
11416			}
11417		}
11418		adp->ad_state &= ~UNDONE;
11419		adp->ad_state |= ATTACHED;
11420		hadchanges = 1;
11421	}
11422	for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11423		nextadp = TAILQ_NEXT(adp, ad_next);
11424		if (adp->ad_state & ATTACHED)
11425			panic("handle_written_inodeblock: new entry");
11426		if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11427			panic("%s: direct pointers #%jd %s %jd != %jd",
11428			    "handle_written_inodeblock",
11429			    (intmax_t)adp->ad_offset, "mismatch",
11430			    (intmax_t)dp2->di_extb[adp->ad_offset],
11431			    (intmax_t)adp->ad_oldblkno);
11432		dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11433		adp->ad_state &= ~UNDONE;
11434		adp->ad_state |= ATTACHED;
11435		hadchanges = 1;
11436	}
11437	if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11438		stat_direct_blk_ptrs++;
11439	/*
11440	 * Reset the file size to its most up-to-date value.
11441	 */
11442	if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11443		panic("handle_written_inodeblock: bad size");
11444	if (inodedep->id_savednlink > LINK_MAX)
11445		panic("handle_written_inodeblock: Invalid link count "
11446		    "%d for inodedep %p", inodedep->id_savednlink, inodedep);
11447	if (fstype == UFS1) {
11448		if (dp1->di_nlink != inodedep->id_savednlink) {
11449			dp1->di_nlink = inodedep->id_savednlink;
11450			hadchanges = 1;
11451		}
11452		if (dp1->di_size != inodedep->id_savedsize) {
11453			dp1->di_size = inodedep->id_savedsize;
11454			hadchanges = 1;
11455		}
11456	} else {
11457		if (dp2->di_nlink != inodedep->id_savednlink) {
11458			dp2->di_nlink = inodedep->id_savednlink;
11459			hadchanges = 1;
11460		}
11461		if (dp2->di_size != inodedep->id_savedsize) {
11462			dp2->di_size = inodedep->id_savedsize;
11463			hadchanges = 1;
11464		}
11465		if (dp2->di_extsize != inodedep->id_savedextsize) {
11466			dp2->di_extsize = inodedep->id_savedextsize;
11467			hadchanges = 1;
11468		}
11469	}
11470	inodedep->id_savedsize = -1;
11471	inodedep->id_savedextsize = -1;
11472	inodedep->id_savednlink = -1;
11473	/*
11474	 * If there were any rollbacks in the inode block, then it must be
11475	 * marked dirty so that its will eventually get written back in
11476	 * its correct form.
11477	 */
11478	if (hadchanges)
11479		bdirty(bp);
11480bufwait:
11481	/*
11482	 * Process any allocdirects that completed during the update.
11483	 */
11484	if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11485		handle_allocdirect_partdone(adp, &wkhd);
11486	if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11487		handle_allocdirect_partdone(adp, &wkhd);
11488	/*
11489	 * Process deallocations that were held pending until the
11490	 * inode had been written to disk. Freeing of the inode
11491	 * is delayed until after all blocks have been freed to
11492	 * avoid creation of new <vfsid, inum, lbn> triples
11493	 * before the old ones have been deleted.  Completely
11494	 * unlinked inodes are not processed until the unlinked
11495	 * inode list is written or the last reference is removed.
11496	 */
11497	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11498		freefile = handle_bufwait(inodedep, NULL);
11499		if (freefile && !LIST_EMPTY(&wkhd)) {
11500			WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11501			freefile = NULL;
11502		}
11503	}
11504	/*
11505	 * Move rolled forward dependency completions to the bufwait list
11506	 * now that those that were already written have been processed.
11507	 */
11508	if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11509		panic("handle_written_inodeblock: bufwait but no changes");
11510	jwork_move(&inodedep->id_bufwait, &wkhd);
11511
11512	if (freefile != NULL) {
11513		/*
11514		 * If the inode is goingaway it was never written.  Fake up
11515		 * the state here so free_inodedep() can succeed.
11516		 */
11517		if (inodedep->id_state & GOINGAWAY)
11518			inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11519		if (free_inodedep(inodedep) == 0)
11520			panic("handle_written_inodeblock: live inodedep %p",
11521			    inodedep);
11522		add_to_worklist(&freefile->fx_list, 0);
11523		return (0);
11524	}
11525
11526	/*
11527	 * If no outstanding dependencies, free it.
11528	 */
11529	if (free_inodedep(inodedep) ||
11530	    (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11531	     TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11532	     TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11533	     LIST_FIRST(&inodedep->id_bufwait) == 0))
11534		return (0);
11535	return (hadchanges);
11536}
11537
11538static int
11539handle_written_indirdep(indirdep, bp, bpp)
11540	struct indirdep *indirdep;
11541	struct buf *bp;
11542	struct buf **bpp;
11543{
11544	struct allocindir *aip;
11545	struct buf *sbp;
11546	int chgs;
11547
11548	if (indirdep->ir_state & GOINGAWAY)
11549		panic("handle_written_indirdep: indirdep gone");
11550	if ((indirdep->ir_state & IOSTARTED) == 0)
11551		panic("handle_written_indirdep: IO not started");
11552	chgs = 0;
11553	/*
11554	 * If there were rollbacks revert them here.
11555	 */
11556	if (indirdep->ir_saveddata) {
11557		bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11558		if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11559			free(indirdep->ir_saveddata, M_INDIRDEP);
11560			indirdep->ir_saveddata = NULL;
11561		}
11562		chgs = 1;
11563	}
11564	indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11565	indirdep->ir_state |= ATTACHED;
11566	/*
11567	 * Move allocindirs with written pointers to the completehd if
11568	 * the indirdep's pointer is not yet written.  Otherwise
11569	 * free them here.
11570	 */
11571	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) {
11572		LIST_REMOVE(aip, ai_next);
11573		if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11574			LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11575			    ai_next);
11576			newblk_freefrag(&aip->ai_block);
11577			continue;
11578		}
11579		free_newblk(&aip->ai_block);
11580	}
11581	/*
11582	 * Move allocindirs that have finished dependency processing from
11583	 * the done list to the write list after updating the pointers.
11584	 */
11585	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11586		while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) {
11587			handle_allocindir_partdone(aip);
11588			if (aip == LIST_FIRST(&indirdep->ir_donehd))
11589				panic("disk_write_complete: not gone");
11590			chgs = 1;
11591		}
11592	}
11593	/*
11594	 * Preserve the indirdep if there were any changes or if it is not
11595	 * yet valid on disk.
11596	 */
11597	if (chgs) {
11598		stat_indir_blk_ptrs++;
11599		bdirty(bp);
11600		return (1);
11601	}
11602	/*
11603	 * If there were no changes we can discard the savedbp and detach
11604	 * ourselves from the buf.  We are only carrying completed pointers
11605	 * in this case.
11606	 */
11607	sbp = indirdep->ir_savebp;
11608	sbp->b_flags |= B_INVAL | B_NOCACHE;
11609	indirdep->ir_savebp = NULL;
11610	indirdep->ir_bp = NULL;
11611	if (*bpp != NULL)
11612		panic("handle_written_indirdep: bp already exists.");
11613	*bpp = sbp;
11614	/*
11615	 * The indirdep may not be freed until its parent points at it.
11616	 */
11617	if (indirdep->ir_state & DEPCOMPLETE)
11618		free_indirdep(indirdep);
11619
11620	return (0);
11621}
11622
11623/*
11624 * Process a diradd entry after its dependent inode has been written.
11625 * This routine must be called with splbio interrupts blocked.
11626 */
11627static void
11628diradd_inode_written(dap, inodedep)
11629	struct diradd *dap;
11630	struct inodedep *inodedep;
11631{
11632
11633	dap->da_state |= COMPLETE;
11634	complete_diradd(dap);
11635	WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
11636}
11637
11638/*
11639 * Returns true if the bmsafemap will have rollbacks when written.  Must only
11640 * be called with the per-filesystem lock and the buf lock on the cg held.
11641 */
11642static int
11643bmsafemap_backgroundwrite(bmsafemap, bp)
11644	struct bmsafemap *bmsafemap;
11645	struct buf *bp;
11646{
11647	int dirty;
11648
11649	LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp));
11650	dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
11651	    !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
11652	/*
11653	 * If we're initiating a background write we need to process the
11654	 * rollbacks as they exist now, not as they exist when IO starts.
11655	 * No other consumers will look at the contents of the shadowed
11656	 * buf so this is safe to do here.
11657	 */
11658	if (bp->b_xflags & BX_BKGRDMARKER)
11659		initiate_write_bmsafemap(bmsafemap, bp);
11660
11661	return (dirty);
11662}
11663
11664/*
11665 * Re-apply an allocation when a cg write is complete.
11666 */
11667static int
11668jnewblk_rollforward(jnewblk, fs, cgp, blksfree)
11669	struct jnewblk *jnewblk;
11670	struct fs *fs;
11671	struct cg *cgp;
11672	uint8_t *blksfree;
11673{
11674	ufs1_daddr_t fragno;
11675	ufs2_daddr_t blkno;
11676	long cgbno, bbase;
11677	int frags, blk;
11678	int i;
11679
11680	frags = 0;
11681	cgbno = dtogd(fs, jnewblk->jn_blkno);
11682	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
11683		if (isclr(blksfree, cgbno + i))
11684			panic("jnewblk_rollforward: re-allocated fragment");
11685		frags++;
11686	}
11687	if (frags == fs->fs_frag) {
11688		blkno = fragstoblks(fs, cgbno);
11689		ffs_clrblock(fs, blksfree, (long)blkno);
11690		ffs_clusteracct(fs, cgp, blkno, -1);
11691		cgp->cg_cs.cs_nbfree--;
11692	} else {
11693		bbase = cgbno - fragnum(fs, cgbno);
11694		cgbno += jnewblk->jn_oldfrags;
11695                /* If a complete block had been reassembled, account for it. */
11696		fragno = fragstoblks(fs, bbase);
11697		if (ffs_isblock(fs, blksfree, fragno)) {
11698			cgp->cg_cs.cs_nffree += fs->fs_frag;
11699			ffs_clusteracct(fs, cgp, fragno, -1);
11700			cgp->cg_cs.cs_nbfree--;
11701		}
11702		/* Decrement the old frags.  */
11703		blk = blkmap(fs, blksfree, bbase);
11704		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11705		/* Allocate the fragment */
11706		for (i = 0; i < frags; i++)
11707			clrbit(blksfree, cgbno + i);
11708		cgp->cg_cs.cs_nffree -= frags;
11709		/* Add back in counts associated with the new frags */
11710		blk = blkmap(fs, blksfree, bbase);
11711		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11712	}
11713	return (frags);
11714}
11715
11716/*
11717 * Complete a write to a bmsafemap structure.  Roll forward any bitmap
11718 * changes if it's not a background write.  Set all written dependencies
11719 * to DEPCOMPLETE and free the structure if possible.
11720 */
11721static int
11722handle_written_bmsafemap(bmsafemap, bp)
11723	struct bmsafemap *bmsafemap;
11724	struct buf *bp;
11725{
11726	struct newblk *newblk;
11727	struct inodedep *inodedep;
11728	struct jaddref *jaddref, *jatmp;
11729	struct jnewblk *jnewblk, *jntmp;
11730	struct ufsmount *ump;
11731	uint8_t *inosused;
11732	uint8_t *blksfree;
11733	struct cg *cgp;
11734	struct fs *fs;
11735	ino_t ino;
11736	int foreground;
11737	int chgs;
11738
11739	if ((bmsafemap->sm_state & IOSTARTED) == 0)
11740		panic("initiate_write_bmsafemap: Not started\n");
11741	ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
11742	chgs = 0;
11743	bmsafemap->sm_state &= ~IOSTARTED;
11744	foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0;
11745	/*
11746	 * Release journal work that was waiting on the write.
11747	 */
11748	handle_jwork(&bmsafemap->sm_freewr);
11749
11750	/*
11751	 * Restore unwritten inode allocation pending jaddref writes.
11752	 */
11753	if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
11754		cgp = (struct cg *)bp->b_data;
11755		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11756		inosused = cg_inosused(cgp);
11757		LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
11758		    ja_bmdeps, jatmp) {
11759			if ((jaddref->ja_state & UNDONE) == 0)
11760				continue;
11761			ino = jaddref->ja_ino % fs->fs_ipg;
11762			if (isset(inosused, ino))
11763				panic("handle_written_bmsafemap: "
11764				    "re-allocated inode");
11765			/* Do the roll-forward only if it's a real copy. */
11766			if (foreground) {
11767				if ((jaddref->ja_mode & IFMT) == IFDIR)
11768					cgp->cg_cs.cs_ndir++;
11769				cgp->cg_cs.cs_nifree--;
11770				setbit(inosused, ino);
11771				chgs = 1;
11772			}
11773			jaddref->ja_state &= ~UNDONE;
11774			jaddref->ja_state |= ATTACHED;
11775			free_jaddref(jaddref);
11776		}
11777	}
11778	/*
11779	 * Restore any block allocations which are pending journal writes.
11780	 */
11781	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11782		cgp = (struct cg *)bp->b_data;
11783		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11784		blksfree = cg_blksfree(cgp);
11785		LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
11786		    jntmp) {
11787			if ((jnewblk->jn_state & UNDONE) == 0)
11788				continue;
11789			/* Do the roll-forward only if it's a real copy. */
11790			if (foreground &&
11791			    jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
11792				chgs = 1;
11793			jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
11794			jnewblk->jn_state |= ATTACHED;
11795			free_jnewblk(jnewblk);
11796		}
11797	}
11798	while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
11799		newblk->nb_state |= DEPCOMPLETE;
11800		newblk->nb_state &= ~ONDEPLIST;
11801		newblk->nb_bmsafemap = NULL;
11802		LIST_REMOVE(newblk, nb_deps);
11803		if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
11804			handle_allocdirect_partdone(
11805			    WK_ALLOCDIRECT(&newblk->nb_list), NULL);
11806		else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
11807			handle_allocindir_partdone(
11808			    WK_ALLOCINDIR(&newblk->nb_list));
11809		else if (newblk->nb_list.wk_type != D_NEWBLK)
11810			panic("handle_written_bmsafemap: Unexpected type: %s",
11811			    TYPENAME(newblk->nb_list.wk_type));
11812	}
11813	while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
11814		inodedep->id_state |= DEPCOMPLETE;
11815		inodedep->id_state &= ~ONDEPLIST;
11816		LIST_REMOVE(inodedep, id_deps);
11817		inodedep->id_bmsafemap = NULL;
11818	}
11819	LIST_REMOVE(bmsafemap, sm_next);
11820	if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
11821	    LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
11822	    LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
11823	    LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
11824	    LIST_EMPTY(&bmsafemap->sm_freehd)) {
11825		LIST_REMOVE(bmsafemap, sm_hash);
11826		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
11827		return (0);
11828	}
11829	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
11830	if (foreground)
11831		bdirty(bp);
11832	return (1);
11833}
11834
11835/*
11836 * Try to free a mkdir dependency.
11837 */
11838static void
11839complete_mkdir(mkdir)
11840	struct mkdir *mkdir;
11841{
11842	struct diradd *dap;
11843
11844	if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
11845		return;
11846	LIST_REMOVE(mkdir, md_mkdirs);
11847	dap = mkdir->md_diradd;
11848	dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
11849	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
11850		dap->da_state |= DEPCOMPLETE;
11851		complete_diradd(dap);
11852	}
11853	WORKITEM_FREE(mkdir, D_MKDIR);
11854}
11855
11856/*
11857 * Handle the completion of a mkdir dependency.
11858 */
11859static void
11860handle_written_mkdir(mkdir, type)
11861	struct mkdir *mkdir;
11862	int type;
11863{
11864
11865	if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
11866		panic("handle_written_mkdir: bad type");
11867	mkdir->md_state |= COMPLETE;
11868	complete_mkdir(mkdir);
11869}
11870
11871static int
11872free_pagedep(pagedep)
11873	struct pagedep *pagedep;
11874{
11875	int i;
11876
11877	if (pagedep->pd_state & NEWBLOCK)
11878		return (0);
11879	if (!LIST_EMPTY(&pagedep->pd_dirremhd))
11880		return (0);
11881	for (i = 0; i < DAHASHSZ; i++)
11882		if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
11883			return (0);
11884	if (!LIST_EMPTY(&pagedep->pd_pendinghd))
11885		return (0);
11886	if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
11887		return (0);
11888	if (pagedep->pd_state & ONWORKLIST)
11889		WORKLIST_REMOVE(&pagedep->pd_list);
11890	LIST_REMOVE(pagedep, pd_hash);
11891	WORKITEM_FREE(pagedep, D_PAGEDEP);
11892
11893	return (1);
11894}
11895
11896/*
11897 * Called from within softdep_disk_write_complete above.
11898 * A write operation was just completed. Removed inodes can
11899 * now be freed and associated block pointers may be committed.
11900 * Note that this routine is always called from interrupt level
11901 * with further splbio interrupts blocked.
11902 */
11903static int
11904handle_written_filepage(pagedep, bp)
11905	struct pagedep *pagedep;
11906	struct buf *bp;		/* buffer containing the written page */
11907{
11908	struct dirrem *dirrem;
11909	struct diradd *dap, *nextdap;
11910	struct direct *ep;
11911	int i, chgs;
11912
11913	if ((pagedep->pd_state & IOSTARTED) == 0)
11914		panic("handle_written_filepage: not started");
11915	pagedep->pd_state &= ~IOSTARTED;
11916	/*
11917	 * Process any directory removals that have been committed.
11918	 */
11919	while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
11920		LIST_REMOVE(dirrem, dm_next);
11921		dirrem->dm_state |= COMPLETE;
11922		dirrem->dm_dirinum = pagedep->pd_ino;
11923		KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
11924		    ("handle_written_filepage: Journal entries not written."));
11925		add_to_worklist(&dirrem->dm_list, 0);
11926	}
11927	/*
11928	 * Free any directory additions that have been committed.
11929	 * If it is a newly allocated block, we have to wait until
11930	 * the on-disk directory inode claims the new block.
11931	 */
11932	if ((pagedep->pd_state & NEWBLOCK) == 0)
11933		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
11934			free_diradd(dap, NULL);
11935	/*
11936	 * Uncommitted directory entries must be restored.
11937	 */
11938	for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
11939		for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
11940		     dap = nextdap) {
11941			nextdap = LIST_NEXT(dap, da_pdlist);
11942			if (dap->da_state & ATTACHED)
11943				panic("handle_written_filepage: attached");
11944			ep = (struct direct *)
11945			    ((char *)bp->b_data + dap->da_offset);
11946			ep->d_ino = dap->da_newinum;
11947			dap->da_state &= ~UNDONE;
11948			dap->da_state |= ATTACHED;
11949			chgs = 1;
11950			/*
11951			 * If the inode referenced by the directory has
11952			 * been written out, then the dependency can be
11953			 * moved to the pending list.
11954			 */
11955			if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
11956				LIST_REMOVE(dap, da_pdlist);
11957				LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
11958				    da_pdlist);
11959			}
11960		}
11961	}
11962	/*
11963	 * If there were any rollbacks in the directory, then it must be
11964	 * marked dirty so that its will eventually get written back in
11965	 * its correct form.
11966	 */
11967	if (chgs) {
11968		if ((bp->b_flags & B_DELWRI) == 0)
11969			stat_dir_entry++;
11970		bdirty(bp);
11971		return (1);
11972	}
11973	/*
11974	 * If we are not waiting for a new directory block to be
11975	 * claimed by its inode, then the pagedep will be freed.
11976	 * Otherwise it will remain to track any new entries on
11977	 * the page in case they are fsync'ed.
11978	 */
11979	free_pagedep(pagedep);
11980	return (0);
11981}
11982
11983/*
11984 * Writing back in-core inode structures.
11985 *
11986 * The filesystem only accesses an inode's contents when it occupies an
11987 * "in-core" inode structure.  These "in-core" structures are separate from
11988 * the page frames used to cache inode blocks.  Only the latter are
11989 * transferred to/from the disk.  So, when the updated contents of the
11990 * "in-core" inode structure are copied to the corresponding in-memory inode
11991 * block, the dependencies are also transferred.  The following procedure is
11992 * called when copying a dirty "in-core" inode to a cached inode block.
11993 */
11994
11995/*
11996 * Called when an inode is loaded from disk. If the effective link count
11997 * differed from the actual link count when it was last flushed, then we
11998 * need to ensure that the correct effective link count is put back.
11999 */
12000void
12001softdep_load_inodeblock(ip)
12002	struct inode *ip;	/* the "in_core" copy of the inode */
12003{
12004	struct inodedep *inodedep;
12005
12006	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
12007	    ("softdep_load_inodeblock called on non-softdep filesystem"));
12008	/*
12009	 * Check for alternate nlink count.
12010	 */
12011	ip->i_effnlink = ip->i_nlink;
12012	ACQUIRE_LOCK(ip->i_ump);
12013	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
12014	    &inodedep) == 0) {
12015		FREE_LOCK(ip->i_ump);
12016		return;
12017	}
12018	ip->i_effnlink -= inodedep->id_nlinkdelta;
12019	FREE_LOCK(ip->i_ump);
12020}
12021
12022/*
12023 * This routine is called just before the "in-core" inode
12024 * information is to be copied to the in-memory inode block.
12025 * Recall that an inode block contains several inodes. If
12026 * the force flag is set, then the dependencies will be
12027 * cleared so that the update can always be made. Note that
12028 * the buffer is locked when this routine is called, so we
12029 * will never be in the middle of writing the inode block
12030 * to disk.
12031 */
12032void
12033softdep_update_inodeblock(ip, bp, waitfor)
12034	struct inode *ip;	/* the "in_core" copy of the inode */
12035	struct buf *bp;		/* the buffer containing the inode block */
12036	int waitfor;		/* nonzero => update must be allowed */
12037{
12038	struct inodedep *inodedep;
12039	struct inoref *inoref;
12040	struct ufsmount *ump;
12041	struct worklist *wk;
12042	struct mount *mp;
12043	struct buf *ibp;
12044	struct fs *fs;
12045	int error;
12046
12047	ump = ip->i_ump;
12048	mp = UFSTOVFS(ump);
12049	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
12050	    ("softdep_update_inodeblock called on non-softdep filesystem"));
12051	fs = ip->i_fs;
12052	/*
12053	 * Preserve the freelink that is on disk.  clear_unlinked_inodedep()
12054	 * does not have access to the in-core ip so must write directly into
12055	 * the inode block buffer when setting freelink.
12056	 */
12057	if (fs->fs_magic == FS_UFS1_MAGIC)
12058		DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data +
12059		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12060	else
12061		DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data +
12062		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12063	/*
12064	 * If the effective link count is not equal to the actual link
12065	 * count, then we must track the difference in an inodedep while
12066	 * the inode is (potentially) tossed out of the cache. Otherwise,
12067	 * if there is no existing inodedep, then there are no dependencies
12068	 * to track.
12069	 */
12070	ACQUIRE_LOCK(ump);
12071again:
12072	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12073		FREE_LOCK(ump);
12074		if (ip->i_effnlink != ip->i_nlink)
12075			panic("softdep_update_inodeblock: bad link count");
12076		return;
12077	}
12078	if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
12079		panic("softdep_update_inodeblock: bad delta");
12080	/*
12081	 * If we're flushing all dependencies we must also move any waiting
12082	 * for journal writes onto the bufwait list prior to I/O.
12083	 */
12084	if (waitfor) {
12085		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12086			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12087			    == DEPCOMPLETE) {
12088				jwait(&inoref->if_list, MNT_WAIT);
12089				goto again;
12090			}
12091		}
12092	}
12093	/*
12094	 * Changes have been initiated. Anything depending on these
12095	 * changes cannot occur until this inode has been written.
12096	 */
12097	inodedep->id_state &= ~COMPLETE;
12098	if ((inodedep->id_state & ONWORKLIST) == 0)
12099		WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
12100	/*
12101	 * Any new dependencies associated with the incore inode must
12102	 * now be moved to the list associated with the buffer holding
12103	 * the in-memory copy of the inode. Once merged process any
12104	 * allocdirects that are completed by the merger.
12105	 */
12106	merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
12107	if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
12108		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
12109		    NULL);
12110	merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
12111	if (!TAILQ_EMPTY(&inodedep->id_extupdt))
12112		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
12113		    NULL);
12114	/*
12115	 * Now that the inode has been pushed into the buffer, the
12116	 * operations dependent on the inode being written to disk
12117	 * can be moved to the id_bufwait so that they will be
12118	 * processed when the buffer I/O completes.
12119	 */
12120	while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
12121		WORKLIST_REMOVE(wk);
12122		WORKLIST_INSERT(&inodedep->id_bufwait, wk);
12123	}
12124	/*
12125	 * Newly allocated inodes cannot be written until the bitmap
12126	 * that allocates them have been written (indicated by
12127	 * DEPCOMPLETE being set in id_state). If we are doing a
12128	 * forced sync (e.g., an fsync on a file), we force the bitmap
12129	 * to be written so that the update can be done.
12130	 */
12131	if (waitfor == 0) {
12132		FREE_LOCK(ump);
12133		return;
12134	}
12135retry:
12136	if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
12137		FREE_LOCK(ump);
12138		return;
12139	}
12140	ibp = inodedep->id_bmsafemap->sm_buf;
12141	ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT);
12142	if (ibp == NULL) {
12143		/*
12144		 * If ibp came back as NULL, the dependency could have been
12145		 * freed while we slept.  Look it up again, and check to see
12146		 * that it has completed.
12147		 */
12148		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
12149			goto retry;
12150		FREE_LOCK(ump);
12151		return;
12152	}
12153	FREE_LOCK(ump);
12154	if ((error = bwrite(ibp)) != 0)
12155		softdep_error("softdep_update_inodeblock: bwrite", error);
12156}
12157
12158/*
12159 * Merge the a new inode dependency list (such as id_newinoupdt) into an
12160 * old inode dependency list (such as id_inoupdt). This routine must be
12161 * called with splbio interrupts blocked.
12162 */
12163static void
12164merge_inode_lists(newlisthead, oldlisthead)
12165	struct allocdirectlst *newlisthead;
12166	struct allocdirectlst *oldlisthead;
12167{
12168	struct allocdirect *listadp, *newadp;
12169
12170	newadp = TAILQ_FIRST(newlisthead);
12171	for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
12172		if (listadp->ad_offset < newadp->ad_offset) {
12173			listadp = TAILQ_NEXT(listadp, ad_next);
12174			continue;
12175		}
12176		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12177		TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
12178		if (listadp->ad_offset == newadp->ad_offset) {
12179			allocdirect_merge(oldlisthead, newadp,
12180			    listadp);
12181			listadp = newadp;
12182		}
12183		newadp = TAILQ_FIRST(newlisthead);
12184	}
12185	while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
12186		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12187		TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
12188	}
12189}
12190
12191/*
12192 * If we are doing an fsync, then we must ensure that any directory
12193 * entries for the inode have been written after the inode gets to disk.
12194 */
12195int
12196softdep_fsync(vp)
12197	struct vnode *vp;	/* the "in_core" copy of the inode */
12198{
12199	struct inodedep *inodedep;
12200	struct pagedep *pagedep;
12201	struct inoref *inoref;
12202	struct ufsmount *ump;
12203	struct worklist *wk;
12204	struct diradd *dap;
12205	struct mount *mp;
12206	struct vnode *pvp;
12207	struct inode *ip;
12208	struct buf *bp;
12209	struct fs *fs;
12210	struct thread *td = curthread;
12211	int error, flushparent, pagedep_new_block;
12212	ino_t parentino;
12213	ufs_lbn_t lbn;
12214
12215	ip = VTOI(vp);
12216	fs = ip->i_fs;
12217	ump = ip->i_ump;
12218	mp = vp->v_mount;
12219	if (MOUNTEDSOFTDEP(mp) == 0)
12220		return (0);
12221	ACQUIRE_LOCK(ump);
12222restart:
12223	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12224		FREE_LOCK(ump);
12225		return (0);
12226	}
12227	TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12228		if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12229		    == DEPCOMPLETE) {
12230			jwait(&inoref->if_list, MNT_WAIT);
12231			goto restart;
12232		}
12233	}
12234	if (!LIST_EMPTY(&inodedep->id_inowait) ||
12235	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
12236	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
12237	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
12238	    !TAILQ_EMPTY(&inodedep->id_newinoupdt))
12239		panic("softdep_fsync: pending ops %p", inodedep);
12240	for (error = 0, flushparent = 0; ; ) {
12241		if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
12242			break;
12243		if (wk->wk_type != D_DIRADD)
12244			panic("softdep_fsync: Unexpected type %s",
12245			    TYPENAME(wk->wk_type));
12246		dap = WK_DIRADD(wk);
12247		/*
12248		 * Flush our parent if this directory entry has a MKDIR_PARENT
12249		 * dependency or is contained in a newly allocated block.
12250		 */
12251		if (dap->da_state & DIRCHG)
12252			pagedep = dap->da_previous->dm_pagedep;
12253		else
12254			pagedep = dap->da_pagedep;
12255		parentino = pagedep->pd_ino;
12256		lbn = pagedep->pd_lbn;
12257		if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
12258			panic("softdep_fsync: dirty");
12259		if ((dap->da_state & MKDIR_PARENT) ||
12260		    (pagedep->pd_state & NEWBLOCK))
12261			flushparent = 1;
12262		else
12263			flushparent = 0;
12264		/*
12265		 * If we are being fsync'ed as part of vgone'ing this vnode,
12266		 * then we will not be able to release and recover the
12267		 * vnode below, so we just have to give up on writing its
12268		 * directory entry out. It will eventually be written, just
12269		 * not now, but then the user was not asking to have it
12270		 * written, so we are not breaking any promises.
12271		 */
12272		if (vp->v_iflag & VI_DOOMED)
12273			break;
12274		/*
12275		 * We prevent deadlock by always fetching inodes from the
12276		 * root, moving down the directory tree. Thus, when fetching
12277		 * our parent directory, we first try to get the lock. If
12278		 * that fails, we must unlock ourselves before requesting
12279		 * the lock on our parent. See the comment in ufs_lookup
12280		 * for details on possible races.
12281		 */
12282		FREE_LOCK(ump);
12283		if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp,
12284		    FFSV_FORCEINSMQ)) {
12285			error = vfs_busy(mp, MBF_NOWAIT);
12286			if (error != 0) {
12287				vfs_ref(mp);
12288				VOP_UNLOCK(vp, 0);
12289				error = vfs_busy(mp, 0);
12290				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12291				vfs_rel(mp);
12292				if (error != 0)
12293					return (ENOENT);
12294				if (vp->v_iflag & VI_DOOMED) {
12295					vfs_unbusy(mp);
12296					return (ENOENT);
12297				}
12298			}
12299			VOP_UNLOCK(vp, 0);
12300			error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE,
12301			    &pvp, FFSV_FORCEINSMQ);
12302			vfs_unbusy(mp);
12303			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12304			if (vp->v_iflag & VI_DOOMED) {
12305				if (error == 0)
12306					vput(pvp);
12307				error = ENOENT;
12308			}
12309			if (error != 0)
12310				return (error);
12311		}
12312		/*
12313		 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12314		 * that are contained in direct blocks will be resolved by
12315		 * doing a ffs_update. Pagedeps contained in indirect blocks
12316		 * may require a complete sync'ing of the directory. So, we
12317		 * try the cheap and fast ffs_update first, and if that fails,
12318		 * then we do the slower ffs_syncvnode of the directory.
12319		 */
12320		if (flushparent) {
12321			int locked;
12322
12323			if ((error = ffs_update(pvp, 1)) != 0) {
12324				vput(pvp);
12325				return (error);
12326			}
12327			ACQUIRE_LOCK(ump);
12328			locked = 1;
12329			if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12330				if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12331					if (wk->wk_type != D_DIRADD)
12332						panic("softdep_fsync: Unexpected type %s",
12333						      TYPENAME(wk->wk_type));
12334					dap = WK_DIRADD(wk);
12335					if (dap->da_state & DIRCHG)
12336						pagedep = dap->da_previous->dm_pagedep;
12337					else
12338						pagedep = dap->da_pagedep;
12339					pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12340					FREE_LOCK(ump);
12341					locked = 0;
12342					if (pagedep_new_block && (error =
12343					    ffs_syncvnode(pvp, MNT_WAIT, 0))) {
12344						vput(pvp);
12345						return (error);
12346					}
12347				}
12348			}
12349			if (locked)
12350				FREE_LOCK(ump);
12351		}
12352		/*
12353		 * Flush directory page containing the inode's name.
12354		 */
12355		error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12356		    &bp);
12357		if (error == 0)
12358			error = bwrite(bp);
12359		else
12360			brelse(bp);
12361		vput(pvp);
12362		if (error != 0)
12363			return (error);
12364		ACQUIRE_LOCK(ump);
12365		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12366			break;
12367	}
12368	FREE_LOCK(ump);
12369	return (0);
12370}
12371
12372/*
12373 * Flush all the dirty bitmaps associated with the block device
12374 * before flushing the rest of the dirty blocks so as to reduce
12375 * the number of dependencies that will have to be rolled back.
12376 *
12377 * XXX Unused?
12378 */
12379void
12380softdep_fsync_mountdev(vp)
12381	struct vnode *vp;
12382{
12383	struct buf *bp, *nbp;
12384	struct worklist *wk;
12385	struct bufobj *bo;
12386
12387	if (!vn_isdisk(vp, NULL))
12388		panic("softdep_fsync_mountdev: vnode not a disk");
12389	bo = &vp->v_bufobj;
12390restart:
12391	BO_LOCK(bo);
12392	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12393		/*
12394		 * If it is already scheduled, skip to the next buffer.
12395		 */
12396		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12397			continue;
12398
12399		if ((bp->b_flags & B_DELWRI) == 0)
12400			panic("softdep_fsync_mountdev: not dirty");
12401		/*
12402		 * We are only interested in bitmaps with outstanding
12403		 * dependencies.
12404		 */
12405		if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12406		    wk->wk_type != D_BMSAFEMAP ||
12407		    (bp->b_vflags & BV_BKGRDINPROG)) {
12408			BUF_UNLOCK(bp);
12409			continue;
12410		}
12411		BO_UNLOCK(bo);
12412		bremfree(bp);
12413		(void) bawrite(bp);
12414		goto restart;
12415	}
12416	drain_output(vp);
12417	BO_UNLOCK(bo);
12418}
12419
12420/*
12421 * Sync all cylinder groups that were dirty at the time this function is
12422 * called.  Newly dirtied cgs will be inserted before the sentinel.  This
12423 * is used to flush freedep activity that may be holding up writes to a
12424 * indirect block.
12425 */
12426static int
12427sync_cgs(mp, waitfor)
12428	struct mount *mp;
12429	int waitfor;
12430{
12431	struct bmsafemap *bmsafemap;
12432	struct bmsafemap *sentinel;
12433	struct ufsmount *ump;
12434	struct buf *bp;
12435	int error;
12436
12437	sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12438	sentinel->sm_cg = -1;
12439	ump = VFSTOUFS(mp);
12440	error = 0;
12441	ACQUIRE_LOCK(ump);
12442	LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12443	for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12444	    bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12445		/* Skip sentinels and cgs with no work to release. */
12446		if (bmsafemap->sm_cg == -1 ||
12447		    (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12448		    LIST_EMPTY(&bmsafemap->sm_freewr))) {
12449			LIST_REMOVE(sentinel, sm_next);
12450			LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12451			continue;
12452		}
12453		/*
12454		 * If we don't get the lock and we're waiting try again, if
12455		 * not move on to the next buf and try to sync it.
12456		 */
12457		bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor);
12458		if (bp == NULL && waitfor == MNT_WAIT)
12459			continue;
12460		LIST_REMOVE(sentinel, sm_next);
12461		LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12462		if (bp == NULL)
12463			continue;
12464		FREE_LOCK(ump);
12465		if (waitfor == MNT_NOWAIT)
12466			bawrite(bp);
12467		else
12468			error = bwrite(bp);
12469		ACQUIRE_LOCK(ump);
12470		if (error)
12471			break;
12472	}
12473	LIST_REMOVE(sentinel, sm_next);
12474	FREE_LOCK(ump);
12475	free(sentinel, M_BMSAFEMAP);
12476	return (error);
12477}
12478
12479/*
12480 * This routine is called when we are trying to synchronously flush a
12481 * file. This routine must eliminate any filesystem metadata dependencies
12482 * so that the syncing routine can succeed.
12483 */
12484int
12485softdep_sync_metadata(struct vnode *vp)
12486{
12487	struct inode *ip;
12488	int error;
12489
12490	ip = VTOI(vp);
12491	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
12492	    ("softdep_sync_metadata called on non-softdep filesystem"));
12493	/*
12494	 * Ensure that any direct block dependencies have been cleared,
12495	 * truncations are started, and inode references are journaled.
12496	 */
12497	ACQUIRE_LOCK(ip->i_ump);
12498	/*
12499	 * Write all journal records to prevent rollbacks on devvp.
12500	 */
12501	if (vp->v_type == VCHR)
12502		softdep_flushjournal(vp->v_mount);
12503	error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number);
12504	/*
12505	 * Ensure that all truncates are written so we won't find deps on
12506	 * indirect blocks.
12507	 */
12508	process_truncates(vp);
12509	FREE_LOCK(ip->i_ump);
12510
12511	return (error);
12512}
12513
12514/*
12515 * This routine is called when we are attempting to sync a buf with
12516 * dependencies.  If waitfor is MNT_NOWAIT it attempts to schedule any
12517 * other IO it can but returns EBUSY if the buffer is not yet able to
12518 * be written.  Dependencies which will not cause rollbacks will always
12519 * return 0.
12520 */
12521int
12522softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
12523{
12524	struct indirdep *indirdep;
12525	struct pagedep *pagedep;
12526	struct allocindir *aip;
12527	struct newblk *newblk;
12528	struct ufsmount *ump;
12529	struct buf *nbp;
12530	struct worklist *wk;
12531	int i, error;
12532
12533	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12534	    ("softdep_sync_buf called on non-softdep filesystem"));
12535	/*
12536	 * For VCHR we just don't want to force flush any dependencies that
12537	 * will cause rollbacks.
12538	 */
12539	if (vp->v_type == VCHR) {
12540		if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12541			return (EBUSY);
12542		return (0);
12543	}
12544	ump = VTOI(vp)->i_ump;
12545	ACQUIRE_LOCK(ump);
12546	/*
12547	 * As we hold the buffer locked, none of its dependencies
12548	 * will disappear.
12549	 */
12550	error = 0;
12551top:
12552	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12553		switch (wk->wk_type) {
12554
12555		case D_ALLOCDIRECT:
12556		case D_ALLOCINDIR:
12557			newblk = WK_NEWBLK(wk);
12558			if (newblk->nb_jnewblk != NULL) {
12559				if (waitfor == MNT_NOWAIT) {
12560					error = EBUSY;
12561					goto out_unlock;
12562				}
12563				jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12564				goto top;
12565			}
12566			if (newblk->nb_state & DEPCOMPLETE ||
12567			    waitfor == MNT_NOWAIT)
12568				continue;
12569			nbp = newblk->nb_bmsafemap->sm_buf;
12570			nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12571			if (nbp == NULL)
12572				goto top;
12573			FREE_LOCK(ump);
12574			if ((error = bwrite(nbp)) != 0)
12575				goto out;
12576			ACQUIRE_LOCK(ump);
12577			continue;
12578
12579		case D_INDIRDEP:
12580			indirdep = WK_INDIRDEP(wk);
12581			if (waitfor == MNT_NOWAIT) {
12582				if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
12583				    !LIST_EMPTY(&indirdep->ir_deplisthd)) {
12584					error = EBUSY;
12585					goto out_unlock;
12586				}
12587			}
12588			if (!TAILQ_EMPTY(&indirdep->ir_trunc))
12589				panic("softdep_sync_buf: truncation pending.");
12590		restart:
12591			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
12592				newblk = (struct newblk *)aip;
12593				if (newblk->nb_jnewblk != NULL) {
12594					jwait(&newblk->nb_jnewblk->jn_list,
12595					    waitfor);
12596					goto restart;
12597				}
12598				if (newblk->nb_state & DEPCOMPLETE)
12599					continue;
12600				nbp = newblk->nb_bmsafemap->sm_buf;
12601				nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12602				if (nbp == NULL)
12603					goto restart;
12604				FREE_LOCK(ump);
12605				if ((error = bwrite(nbp)) != 0)
12606					goto out;
12607				ACQUIRE_LOCK(ump);
12608				goto restart;
12609			}
12610			continue;
12611
12612		case D_PAGEDEP:
12613			/*
12614			 * Only flush directory entries in synchronous passes.
12615			 */
12616			if (waitfor != MNT_WAIT) {
12617				error = EBUSY;
12618				goto out_unlock;
12619			}
12620			/*
12621			 * While syncing snapshots, we must allow recursive
12622			 * lookups.
12623			 */
12624			BUF_AREC(bp);
12625			/*
12626			 * We are trying to sync a directory that may
12627			 * have dependencies on both its own metadata
12628			 * and/or dependencies on the inodes of any
12629			 * recently allocated files. We walk its diradd
12630			 * lists pushing out the associated inode.
12631			 */
12632			pagedep = WK_PAGEDEP(wk);
12633			for (i = 0; i < DAHASHSZ; i++) {
12634				if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
12635					continue;
12636				if ((error = flush_pagedep_deps(vp, wk->wk_mp,
12637				    &pagedep->pd_diraddhd[i]))) {
12638					BUF_NOREC(bp);
12639					goto out_unlock;
12640				}
12641			}
12642			BUF_NOREC(bp);
12643			continue;
12644
12645		case D_FREEWORK:
12646		case D_FREEDEP:
12647		case D_JSEGDEP:
12648		case D_JNEWBLK:
12649			continue;
12650
12651		default:
12652			panic("softdep_sync_buf: Unknown type %s",
12653			    TYPENAME(wk->wk_type));
12654			/* NOTREACHED */
12655		}
12656	}
12657out_unlock:
12658	FREE_LOCK(ump);
12659out:
12660	return (error);
12661}
12662
12663/*
12664 * Flush the dependencies associated with an inodedep.
12665 * Called with splbio blocked.
12666 */
12667static int
12668flush_inodedep_deps(vp, mp, ino)
12669	struct vnode *vp;
12670	struct mount *mp;
12671	ino_t ino;
12672{
12673	struct inodedep *inodedep;
12674	struct inoref *inoref;
12675	struct ufsmount *ump;
12676	int error, waitfor;
12677
12678	/*
12679	 * This work is done in two passes. The first pass grabs most
12680	 * of the buffers and begins asynchronously writing them. The
12681	 * only way to wait for these asynchronous writes is to sleep
12682	 * on the filesystem vnode which may stay busy for a long time
12683	 * if the filesystem is active. So, instead, we make a second
12684	 * pass over the dependencies blocking on each write. In the
12685	 * usual case we will be blocking against a write that we
12686	 * initiated, so when it is done the dependency will have been
12687	 * resolved. Thus the second pass is expected to end quickly.
12688	 * We give a brief window at the top of the loop to allow
12689	 * any pending I/O to complete.
12690	 */
12691	ump = VFSTOUFS(mp);
12692	LOCK_OWNED(ump);
12693	for (error = 0, waitfor = MNT_NOWAIT; ; ) {
12694		if (error)
12695			return (error);
12696		FREE_LOCK(ump);
12697		ACQUIRE_LOCK(ump);
12698restart:
12699		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
12700			return (0);
12701		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12702			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12703			    == DEPCOMPLETE) {
12704				jwait(&inoref->if_list, MNT_WAIT);
12705				goto restart;
12706			}
12707		}
12708		if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
12709		    flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
12710		    flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
12711		    flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
12712			continue;
12713		/*
12714		 * If pass2, we are done, otherwise do pass 2.
12715		 */
12716		if (waitfor == MNT_WAIT)
12717			break;
12718		waitfor = MNT_WAIT;
12719	}
12720	/*
12721	 * Try freeing inodedep in case all dependencies have been removed.
12722	 */
12723	if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
12724		(void) free_inodedep(inodedep);
12725	return (0);
12726}
12727
12728/*
12729 * Flush an inode dependency list.
12730 * Called with splbio blocked.
12731 */
12732static int
12733flush_deplist(listhead, waitfor, errorp)
12734	struct allocdirectlst *listhead;
12735	int waitfor;
12736	int *errorp;
12737{
12738	struct allocdirect *adp;
12739	struct newblk *newblk;
12740	struct ufsmount *ump;
12741	struct buf *bp;
12742
12743	if ((adp = TAILQ_FIRST(listhead)) == NULL)
12744		return (0);
12745	ump = VFSTOUFS(adp->ad_list.wk_mp);
12746	LOCK_OWNED(ump);
12747	TAILQ_FOREACH(adp, listhead, ad_next) {
12748		newblk = (struct newblk *)adp;
12749		if (newblk->nb_jnewblk != NULL) {
12750			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12751			return (1);
12752		}
12753		if (newblk->nb_state & DEPCOMPLETE)
12754			continue;
12755		bp = newblk->nb_bmsafemap->sm_buf;
12756		bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor);
12757		if (bp == NULL) {
12758			if (waitfor == MNT_NOWAIT)
12759				continue;
12760			return (1);
12761		}
12762		FREE_LOCK(ump);
12763		if (waitfor == MNT_NOWAIT)
12764			bawrite(bp);
12765		else
12766			*errorp = bwrite(bp);
12767		ACQUIRE_LOCK(ump);
12768		return (1);
12769	}
12770	return (0);
12771}
12772
12773/*
12774 * Flush dependencies associated with an allocdirect block.
12775 */
12776static int
12777flush_newblk_dep(vp, mp, lbn)
12778	struct vnode *vp;
12779	struct mount *mp;
12780	ufs_lbn_t lbn;
12781{
12782	struct newblk *newblk;
12783	struct ufsmount *ump;
12784	struct bufobj *bo;
12785	struct inode *ip;
12786	struct buf *bp;
12787	ufs2_daddr_t blkno;
12788	int error;
12789
12790	error = 0;
12791	bo = &vp->v_bufobj;
12792	ip = VTOI(vp);
12793	blkno = DIP(ip, i_db[lbn]);
12794	if (blkno == 0)
12795		panic("flush_newblk_dep: Missing block");
12796	ump = VFSTOUFS(mp);
12797	ACQUIRE_LOCK(ump);
12798	/*
12799	 * Loop until all dependencies related to this block are satisfied.
12800	 * We must be careful to restart after each sleep in case a write
12801	 * completes some part of this process for us.
12802	 */
12803	for (;;) {
12804		if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
12805			FREE_LOCK(ump);
12806			break;
12807		}
12808		if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
12809			panic("flush_newblk_deps: Bad newblk %p", newblk);
12810		/*
12811		 * Flush the journal.
12812		 */
12813		if (newblk->nb_jnewblk != NULL) {
12814			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12815			continue;
12816		}
12817		/*
12818		 * Write the bitmap dependency.
12819		 */
12820		if ((newblk->nb_state & DEPCOMPLETE) == 0) {
12821			bp = newblk->nb_bmsafemap->sm_buf;
12822			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
12823			if (bp == NULL)
12824				continue;
12825			FREE_LOCK(ump);
12826			error = bwrite(bp);
12827			if (error)
12828				break;
12829			ACQUIRE_LOCK(ump);
12830			continue;
12831		}
12832		/*
12833		 * Write the buffer.
12834		 */
12835		FREE_LOCK(ump);
12836		BO_LOCK(bo);
12837		bp = gbincore(bo, lbn);
12838		if (bp != NULL) {
12839			error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
12840			    LK_INTERLOCK, BO_LOCKPTR(bo));
12841			if (error == ENOLCK) {
12842				ACQUIRE_LOCK(ump);
12843				continue; /* Slept, retry */
12844			}
12845			if (error != 0)
12846				break;	/* Failed */
12847			if (bp->b_flags & B_DELWRI) {
12848				bremfree(bp);
12849				error = bwrite(bp);
12850				if (error)
12851					break;
12852			} else
12853				BUF_UNLOCK(bp);
12854		} else
12855			BO_UNLOCK(bo);
12856		/*
12857		 * We have to wait for the direct pointers to
12858		 * point at the newdirblk before the dependency
12859		 * will go away.
12860		 */
12861		error = ffs_update(vp, 1);
12862		if (error)
12863			break;
12864		ACQUIRE_LOCK(ump);
12865	}
12866	return (error);
12867}
12868
12869/*
12870 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
12871 * Called with splbio blocked.
12872 */
12873static int
12874flush_pagedep_deps(pvp, mp, diraddhdp)
12875	struct vnode *pvp;
12876	struct mount *mp;
12877	struct diraddhd *diraddhdp;
12878{
12879	struct inodedep *inodedep;
12880	struct inoref *inoref;
12881	struct ufsmount *ump;
12882	struct diradd *dap;
12883	struct vnode *vp;
12884	int error = 0;
12885	struct buf *bp;
12886	ino_t inum;
12887	struct diraddhd unfinished;
12888
12889	LIST_INIT(&unfinished);
12890	ump = VFSTOUFS(mp);
12891	LOCK_OWNED(ump);
12892restart:
12893	while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
12894		/*
12895		 * Flush ourselves if this directory entry
12896		 * has a MKDIR_PARENT dependency.
12897		 */
12898		if (dap->da_state & MKDIR_PARENT) {
12899			FREE_LOCK(ump);
12900			if ((error = ffs_update(pvp, 1)) != 0)
12901				break;
12902			ACQUIRE_LOCK(ump);
12903			/*
12904			 * If that cleared dependencies, go on to next.
12905			 */
12906			if (dap != LIST_FIRST(diraddhdp))
12907				continue;
12908			/*
12909			 * All MKDIR_PARENT dependencies and all the
12910			 * NEWBLOCK pagedeps that are contained in direct
12911			 * blocks were resolved by doing above ffs_update.
12912			 * Pagedeps contained in indirect blocks may
12913			 * require a complete sync'ing of the directory.
12914			 * We are in the midst of doing a complete sync,
12915			 * so if they are not resolved in this pass we
12916			 * defer them for now as they will be sync'ed by
12917			 * our caller shortly.
12918			 */
12919			LIST_REMOVE(dap, da_pdlist);
12920			LIST_INSERT_HEAD(&unfinished, dap, da_pdlist);
12921			continue;
12922		}
12923		/*
12924		 * A newly allocated directory must have its "." and
12925		 * ".." entries written out before its name can be
12926		 * committed in its parent.
12927		 */
12928		inum = dap->da_newinum;
12929		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12930			panic("flush_pagedep_deps: lost inode1");
12931		/*
12932		 * Wait for any pending journal adds to complete so we don't
12933		 * cause rollbacks while syncing.
12934		 */
12935		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12936			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12937			    == DEPCOMPLETE) {
12938				jwait(&inoref->if_list, MNT_WAIT);
12939				goto restart;
12940			}
12941		}
12942		if (dap->da_state & MKDIR_BODY) {
12943			FREE_LOCK(ump);
12944			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12945			    FFSV_FORCEINSMQ)))
12946				break;
12947			error = flush_newblk_dep(vp, mp, 0);
12948			/*
12949			 * If we still have the dependency we might need to
12950			 * update the vnode to sync the new link count to
12951			 * disk.
12952			 */
12953			if (error == 0 && dap == LIST_FIRST(diraddhdp))
12954				error = ffs_update(vp, 1);
12955			vput(vp);
12956			if (error != 0)
12957				break;
12958			ACQUIRE_LOCK(ump);
12959			/*
12960			 * If that cleared dependencies, go on to next.
12961			 */
12962			if (dap != LIST_FIRST(diraddhdp))
12963				continue;
12964			if (dap->da_state & MKDIR_BODY) {
12965				inodedep_lookup(UFSTOVFS(ump), inum, 0,
12966				    &inodedep);
12967				panic("flush_pagedep_deps: MKDIR_BODY "
12968				    "inodedep %p dap %p vp %p",
12969				    inodedep, dap, vp);
12970			}
12971		}
12972		/*
12973		 * Flush the inode on which the directory entry depends.
12974		 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
12975		 * the only remaining dependency is that the updated inode
12976		 * count must get pushed to disk. The inode has already
12977		 * been pushed into its inode buffer (via VOP_UPDATE) at
12978		 * the time of the reference count change. So we need only
12979		 * locate that buffer, ensure that there will be no rollback
12980		 * caused by a bitmap dependency, then write the inode buffer.
12981		 */
12982retry:
12983		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12984			panic("flush_pagedep_deps: lost inode");
12985		/*
12986		 * If the inode still has bitmap dependencies,
12987		 * push them to disk.
12988		 */
12989		if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
12990			bp = inodedep->id_bmsafemap->sm_buf;
12991			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
12992			if (bp == NULL)
12993				goto retry;
12994			FREE_LOCK(ump);
12995			if ((error = bwrite(bp)) != 0)
12996				break;
12997			ACQUIRE_LOCK(ump);
12998			if (dap != LIST_FIRST(diraddhdp))
12999				continue;
13000		}
13001		/*
13002		 * If the inode is still sitting in a buffer waiting
13003		 * to be written or waiting for the link count to be
13004		 * adjusted update it here to flush it to disk.
13005		 */
13006		if (dap == LIST_FIRST(diraddhdp)) {
13007			FREE_LOCK(ump);
13008			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
13009			    FFSV_FORCEINSMQ)))
13010				break;
13011			error = ffs_update(vp, 1);
13012			vput(vp);
13013			if (error)
13014				break;
13015			ACQUIRE_LOCK(ump);
13016		}
13017		/*
13018		 * If we have failed to get rid of all the dependencies
13019		 * then something is seriously wrong.
13020		 */
13021		if (dap == LIST_FIRST(diraddhdp)) {
13022			inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
13023			panic("flush_pagedep_deps: failed to flush "
13024			    "inodedep %p ino %ju dap %p",
13025			    inodedep, (uintmax_t)inum, dap);
13026		}
13027	}
13028	if (error)
13029		ACQUIRE_LOCK(ump);
13030	while ((dap = LIST_FIRST(&unfinished)) != NULL) {
13031		LIST_REMOVE(dap, da_pdlist);
13032		LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist);
13033	}
13034	return (error);
13035}
13036
13037/*
13038 * A large burst of file addition or deletion activity can drive the
13039 * memory load excessively high. First attempt to slow things down
13040 * using the techniques below. If that fails, this routine requests
13041 * the offending operations to fall back to running synchronously
13042 * until the memory load returns to a reasonable level.
13043 */
13044int
13045softdep_slowdown(vp)
13046	struct vnode *vp;
13047{
13048	struct ufsmount *ump;
13049	int jlow;
13050	int max_softdeps_hard;
13051
13052	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
13053	    ("softdep_slowdown called on non-softdep filesystem"));
13054	ump = VFSTOUFS(vp->v_mount);
13055	ACQUIRE_LOCK(ump);
13056	jlow = 0;
13057	/*
13058	 * Check for journal space if needed.
13059	 */
13060	if (DOINGSUJ(vp)) {
13061		if (journal_space(ump, 0) == 0)
13062			jlow = 1;
13063	}
13064	/*
13065	 * If the system is under its limits and our filesystem is
13066	 * not responsible for more than our share of the usage and
13067	 * we are not low on journal space, then no need to slow down.
13068	 */
13069	max_softdeps_hard = max_softdeps * 11 / 10;
13070	if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
13071	    dep_current[D_INODEDEP] < max_softdeps_hard &&
13072	    dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 &&
13073	    dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 &&
13074	    ump->softdep_curdeps[D_DIRREM] <
13075	    (max_softdeps_hard / 2) / stat_flush_threads &&
13076	    ump->softdep_curdeps[D_INODEDEP] <
13077	    max_softdeps_hard / stat_flush_threads &&
13078	    ump->softdep_curdeps[D_INDIRDEP] <
13079	    (max_softdeps_hard / 1000) / stat_flush_threads &&
13080	    ump->softdep_curdeps[D_FREEBLKS] <
13081	    max_softdeps_hard / stat_flush_threads) {
13082		FREE_LOCK(ump);
13083  		return (0);
13084	}
13085	/*
13086	 * If the journal is low or our filesystem is over its limit
13087	 * then speedup the cleanup.
13088	 */
13089	if (ump->softdep_curdeps[D_INDIRDEP] <
13090	    (max_softdeps_hard / 1000) / stat_flush_threads || jlow)
13091		softdep_speedup(ump);
13092	stat_sync_limit_hit += 1;
13093	FREE_LOCK(ump);
13094	/*
13095	 * We only slow down the rate at which new dependencies are
13096	 * generated if we are not using journaling. With journaling,
13097	 * the cleanup should always be sufficient to keep things
13098	 * under control.
13099	 */
13100	if (DOINGSUJ(vp))
13101		return (0);
13102	return (1);
13103}
13104
13105/*
13106 * Called by the allocation routines when they are about to fail
13107 * in the hope that we can free up the requested resource (inodes
13108 * or disk space).
13109 *
13110 * First check to see if the work list has anything on it. If it has,
13111 * clean up entries until we successfully free the requested resource.
13112 * Because this process holds inodes locked, we cannot handle any remove
13113 * requests that might block on a locked inode as that could lead to
13114 * deadlock. If the worklist yields none of the requested resource,
13115 * start syncing out vnodes to free up the needed space.
13116 */
13117int
13118softdep_request_cleanup(fs, vp, cred, resource)
13119	struct fs *fs;
13120	struct vnode *vp;
13121	struct ucred *cred;
13122	int resource;
13123{
13124	struct ufsmount *ump;
13125	struct mount *mp;
13126	struct vnode *lvp, *mvp;
13127	long starttime;
13128	ufs2_daddr_t needed;
13129	int error;
13130
13131	/*
13132	 * If we are being called because of a process doing a
13133	 * copy-on-write, then it is not safe to process any
13134	 * worklist items as we will recurse into the copyonwrite
13135	 * routine.  This will result in an incoherent snapshot.
13136	 * If the vnode that we hold is a snapshot, we must avoid
13137	 * handling other resources that could cause deadlock.
13138	 */
13139	if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
13140		return (0);
13141
13142	if (resource == FLUSH_BLOCKS_WAIT)
13143		stat_cleanup_blkrequests += 1;
13144	else
13145		stat_cleanup_inorequests += 1;
13146
13147	mp = vp->v_mount;
13148	ump = VFSTOUFS(mp);
13149	mtx_assert(UFS_MTX(ump), MA_OWNED);
13150	UFS_UNLOCK(ump);
13151	error = ffs_update(vp, 1);
13152	if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) {
13153		UFS_LOCK(ump);
13154		return (0);
13155	}
13156	/*
13157	 * If we are in need of resources, start by cleaning up
13158	 * any block removals associated with our inode.
13159	 */
13160	ACQUIRE_LOCK(ump);
13161	process_removes(vp);
13162	process_truncates(vp);
13163	FREE_LOCK(ump);
13164	/*
13165	 * Now clean up at least as many resources as we will need.
13166	 *
13167	 * When requested to clean up inodes, the number that are needed
13168	 * is set by the number of simultaneous writers (mnt_writeopcount)
13169	 * plus a bit of slop (2) in case some more writers show up while
13170	 * we are cleaning.
13171	 *
13172	 * When requested to free up space, the amount of space that
13173	 * we need is enough blocks to allocate a full-sized segment
13174	 * (fs_contigsumsize). The number of such segments that will
13175	 * be needed is set by the number of simultaneous writers
13176	 * (mnt_writeopcount) plus a bit of slop (2) in case some more
13177	 * writers show up while we are cleaning.
13178	 *
13179	 * Additionally, if we are unpriviledged and allocating space,
13180	 * we need to ensure that we clean up enough blocks to get the
13181	 * needed number of blocks over the threshhold of the minimum
13182	 * number of blocks required to be kept free by the filesystem
13183	 * (fs_minfree).
13184	 */
13185	if (resource == FLUSH_INODES_WAIT) {
13186		needed = vp->v_mount->mnt_writeopcount + 2;
13187	} else if (resource == FLUSH_BLOCKS_WAIT) {
13188		needed = (vp->v_mount->mnt_writeopcount + 2) *
13189		    fs->fs_contigsumsize;
13190		if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0))
13191			needed += fragstoblks(fs,
13192			    roundup((fs->fs_dsize * fs->fs_minfree / 100) -
13193			    fs->fs_cstotal.cs_nffree, fs->fs_frag));
13194	} else {
13195		UFS_LOCK(ump);
13196		printf("softdep_request_cleanup: Unknown resource type %d\n",
13197		    resource);
13198		return (0);
13199	}
13200	starttime = time_second;
13201retry:
13202	if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
13203	    fs->fs_cstotal.cs_nbfree <= needed) ||
13204	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13205	    fs->fs_cstotal.cs_nifree <= needed)) {
13206		ACQUIRE_LOCK(ump);
13207		if (ump->softdep_on_worklist > 0 &&
13208		    process_worklist_item(UFSTOVFS(ump),
13209		    ump->softdep_on_worklist, LK_NOWAIT) != 0)
13210			stat_worklist_push += 1;
13211		FREE_LOCK(ump);
13212	}
13213	/*
13214	 * If we still need resources and there are no more worklist
13215	 * entries to process to obtain them, we have to start flushing
13216	 * the dirty vnodes to force the release of additional requests
13217	 * to the worklist that we can then process to reap addition
13218	 * resources. We walk the vnodes associated with the mount point
13219	 * until we get the needed worklist requests that we can reap.
13220	 */
13221	if ((resource == FLUSH_BLOCKS_WAIT &&
13222	     fs->fs_cstotal.cs_nbfree <= needed) ||
13223	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13224	     fs->fs_cstotal.cs_nifree <= needed)) {
13225		MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
13226			if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
13227				VI_UNLOCK(lvp);
13228				continue;
13229			}
13230			if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
13231			    curthread))
13232				continue;
13233			if (lvp->v_vflag & VV_NOSYNC) {	/* unlinked */
13234				vput(lvp);
13235				continue;
13236			}
13237			(void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
13238			vput(lvp);
13239		}
13240		lvp = ump->um_devvp;
13241		if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
13242			VOP_FSYNC(lvp, MNT_NOWAIT, curthread);
13243			VOP_UNLOCK(lvp, 0);
13244		}
13245		if (ump->softdep_on_worklist > 0) {
13246			stat_cleanup_retries += 1;
13247			goto retry;
13248		}
13249		stat_cleanup_failures += 1;
13250	}
13251	if (time_second - starttime > stat_cleanup_high_delay)
13252		stat_cleanup_high_delay = time_second - starttime;
13253	UFS_LOCK(ump);
13254	return (1);
13255}
13256
13257static bool
13258softdep_excess_items(struct ufsmount *ump, int item)
13259{
13260
13261	KASSERT(item >= 0 && item < D_LAST, ("item %d", item));
13262	return (dep_current[item] > max_softdeps &&
13263	    ump->softdep_curdeps[item] > max_softdeps /
13264	    stat_flush_threads);
13265}
13266
13267static void
13268schedule_cleanup(struct mount *mp)
13269{
13270	struct ufsmount *ump;
13271	struct thread *td;
13272
13273	ump = VFSTOUFS(mp);
13274	LOCK_OWNED(ump);
13275	FREE_LOCK(ump);
13276	td = curthread;
13277	if ((td->td_pflags & TDP_KTHREAD) != 0 &&
13278	    (td->td_proc->p_flag2 & P2_AST_SU) == 0) {
13279		/*
13280		 * No ast is delivered to kernel threads, so nobody
13281		 * would deref the mp.  Some kernel threads
13282		 * explicitely check for AST, e.g. NFS daemon does
13283		 * this in the serving loop.
13284		 */
13285		return;
13286	}
13287	if (td->td_su != NULL)
13288		vfs_rel(td->td_su);
13289	vfs_ref(mp);
13290	td->td_su = mp;
13291	thread_lock(td);
13292	td->td_flags |= TDF_ASTPENDING;
13293	thread_unlock(td);
13294}
13295
13296static void
13297softdep_ast_cleanup_proc(void)
13298{
13299	struct thread *td;
13300	struct mount *mp;
13301	struct ufsmount *ump;
13302	int error;
13303	bool req;
13304
13305	td = curthread;
13306	while ((mp = td->td_su) != NULL) {
13307		td->td_su = NULL;
13308		error = vfs_busy(mp, MBF_NOWAIT);
13309		vfs_rel(mp);
13310		if (error != 0)
13311			return;
13312		if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) {
13313			ump = VFSTOUFS(mp);
13314			for (;;) {
13315				req = false;
13316				ACQUIRE_LOCK(ump);
13317				if (softdep_excess_items(ump, D_INODEDEP)) {
13318					req = true;
13319					request_cleanup(mp, FLUSH_INODES);
13320				}
13321				if (softdep_excess_items(ump, D_DIRREM)) {
13322					req = true;
13323					request_cleanup(mp, FLUSH_BLOCKS);
13324				}
13325				FREE_LOCK(ump);
13326				if (softdep_excess_items(ump, D_NEWBLK) ||
13327				    softdep_excess_items(ump, D_ALLOCDIRECT) ||
13328				    softdep_excess_items(ump, D_ALLOCINDIR)) {
13329					error = vn_start_write(NULL, &mp,
13330					    V_WAIT);
13331					if (error == 0) {
13332						req = true;
13333						VFS_SYNC(mp, MNT_WAIT);
13334						vn_finished_write(mp);
13335					}
13336				}
13337				if ((td->td_pflags & TDP_KTHREAD) != 0 || !req)
13338					break;
13339			}
13340		}
13341		vfs_unbusy(mp);
13342	}
13343}
13344
13345/*
13346 * If memory utilization has gotten too high, deliberately slow things
13347 * down and speed up the I/O processing.
13348 */
13349static int
13350request_cleanup(mp, resource)
13351	struct mount *mp;
13352	int resource;
13353{
13354	struct thread *td = curthread;
13355	struct ufsmount *ump;
13356
13357	ump = VFSTOUFS(mp);
13358	LOCK_OWNED(ump);
13359	/*
13360	 * We never hold up the filesystem syncer or buf daemon.
13361	 */
13362	if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
13363		return (0);
13364	/*
13365	 * First check to see if the work list has gotten backlogged.
13366	 * If it has, co-opt this process to help clean up two entries.
13367	 * Because this process may hold inodes locked, we cannot
13368	 * handle any remove requests that might block on a locked
13369	 * inode as that could lead to deadlock.  We set TDP_SOFTDEP
13370	 * to avoid recursively processing the worklist.
13371	 */
13372	if (ump->softdep_on_worklist > max_softdeps / 10) {
13373		td->td_pflags |= TDP_SOFTDEP;
13374		process_worklist_item(mp, 2, LK_NOWAIT);
13375		td->td_pflags &= ~TDP_SOFTDEP;
13376		stat_worklist_push += 2;
13377		return(1);
13378	}
13379	/*
13380	 * Next, we attempt to speed up the syncer process. If that
13381	 * is successful, then we allow the process to continue.
13382	 */
13383	if (softdep_speedup(ump) &&
13384	    resource != FLUSH_BLOCKS_WAIT &&
13385	    resource != FLUSH_INODES_WAIT)
13386		return(0);
13387	/*
13388	 * If we are resource constrained on inode dependencies, try
13389	 * flushing some dirty inodes. Otherwise, we are constrained
13390	 * by file deletions, so try accelerating flushes of directories
13391	 * with removal dependencies. We would like to do the cleanup
13392	 * here, but we probably hold an inode locked at this point and
13393	 * that might deadlock against one that we try to clean. So,
13394	 * the best that we can do is request the syncer daemon to do
13395	 * the cleanup for us.
13396	 */
13397	switch (resource) {
13398
13399	case FLUSH_INODES:
13400	case FLUSH_INODES_WAIT:
13401		ACQUIRE_GBLLOCK(&lk);
13402		stat_ino_limit_push += 1;
13403		req_clear_inodedeps += 1;
13404		FREE_GBLLOCK(&lk);
13405		stat_countp = &stat_ino_limit_hit;
13406		break;
13407
13408	case FLUSH_BLOCKS:
13409	case FLUSH_BLOCKS_WAIT:
13410		ACQUIRE_GBLLOCK(&lk);
13411		stat_blk_limit_push += 1;
13412		req_clear_remove += 1;
13413		FREE_GBLLOCK(&lk);
13414		stat_countp = &stat_blk_limit_hit;
13415		break;
13416
13417	default:
13418		panic("request_cleanup: unknown type");
13419	}
13420	/*
13421	 * Hopefully the syncer daemon will catch up and awaken us.
13422	 * We wait at most tickdelay before proceeding in any case.
13423	 */
13424	ACQUIRE_GBLLOCK(&lk);
13425	FREE_LOCK(ump);
13426	proc_waiting += 1;
13427	if (callout_pending(&softdep_callout) == FALSE)
13428		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13429		    pause_timer, 0);
13430
13431	if ((td->td_pflags & TDP_KTHREAD) == 0)
13432		msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
13433	proc_waiting -= 1;
13434	FREE_GBLLOCK(&lk);
13435	ACQUIRE_LOCK(ump);
13436	return (1);
13437}
13438
13439/*
13440 * Awaken processes pausing in request_cleanup and clear proc_waiting
13441 * to indicate that there is no longer a timer running. Pause_timer
13442 * will be called with the global softdep mutex (&lk) locked.
13443 */
13444static void
13445pause_timer(arg)
13446	void *arg;
13447{
13448
13449	GBLLOCK_OWNED(&lk);
13450	/*
13451	 * The callout_ API has acquired mtx and will hold it around this
13452	 * function call.
13453	 */
13454	*stat_countp += proc_waiting;
13455	wakeup(&proc_waiting);
13456}
13457
13458/*
13459 * If requested, try removing inode or removal dependencies.
13460 */
13461static void
13462check_clear_deps(mp)
13463	struct mount *mp;
13464{
13465
13466	/*
13467	 * If we are suspended, it may be because of our using
13468	 * too many inodedeps, so help clear them out.
13469	 */
13470	if (MOUNTEDSUJ(mp) && VFSTOUFS(mp)->softdep_jblocks->jb_suspended)
13471		clear_inodedeps(mp);
13472	/*
13473	 * General requests for cleanup of backed up dependencies
13474	 */
13475	ACQUIRE_GBLLOCK(&lk);
13476	if (req_clear_inodedeps) {
13477		req_clear_inodedeps -= 1;
13478		FREE_GBLLOCK(&lk);
13479		clear_inodedeps(mp);
13480		ACQUIRE_GBLLOCK(&lk);
13481		wakeup(&proc_waiting);
13482	}
13483	if (req_clear_remove) {
13484		req_clear_remove -= 1;
13485		FREE_GBLLOCK(&lk);
13486		clear_remove(mp);
13487		ACQUIRE_GBLLOCK(&lk);
13488		wakeup(&proc_waiting);
13489	}
13490	FREE_GBLLOCK(&lk);
13491}
13492
13493/*
13494 * Flush out a directory with at least one removal dependency in an effort to
13495 * reduce the number of dirrem, freefile, and freeblks dependency structures.
13496 */
13497static void
13498clear_remove(mp)
13499	struct mount *mp;
13500{
13501	struct pagedep_hashhead *pagedephd;
13502	struct pagedep *pagedep;
13503	struct ufsmount *ump;
13504	struct vnode *vp;
13505	struct bufobj *bo;
13506	int error, cnt;
13507	ino_t ino;
13508
13509	ump = VFSTOUFS(mp);
13510	LOCK_OWNED(ump);
13511
13512	for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) {
13513		pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++];
13514		if (ump->pagedep_nextclean > ump->pagedep_hash_size)
13515			ump->pagedep_nextclean = 0;
13516		LIST_FOREACH(pagedep, pagedephd, pd_hash) {
13517			if (LIST_EMPTY(&pagedep->pd_dirremhd))
13518				continue;
13519			ino = pagedep->pd_ino;
13520			if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13521				continue;
13522			FREE_LOCK(ump);
13523
13524			/*
13525			 * Let unmount clear deps
13526			 */
13527			error = vfs_busy(mp, MBF_NOWAIT);
13528			if (error != 0)
13529				goto finish_write;
13530			error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13531			     FFSV_FORCEINSMQ);
13532			vfs_unbusy(mp);
13533			if (error != 0) {
13534				softdep_error("clear_remove: vget", error);
13535				goto finish_write;
13536			}
13537			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13538				softdep_error("clear_remove: fsync", error);
13539			bo = &vp->v_bufobj;
13540			BO_LOCK(bo);
13541			drain_output(vp);
13542			BO_UNLOCK(bo);
13543			vput(vp);
13544		finish_write:
13545			vn_finished_write(mp);
13546			ACQUIRE_LOCK(ump);
13547			return;
13548		}
13549	}
13550}
13551
13552/*
13553 * Clear out a block of dirty inodes in an effort to reduce
13554 * the number of inodedep dependency structures.
13555 */
13556static void
13557clear_inodedeps(mp)
13558	struct mount *mp;
13559{
13560	struct inodedep_hashhead *inodedephd;
13561	struct inodedep *inodedep;
13562	struct ufsmount *ump;
13563	struct vnode *vp;
13564	struct fs *fs;
13565	int error, cnt;
13566	ino_t firstino, lastino, ino;
13567
13568	ump = VFSTOUFS(mp);
13569	fs = ump->um_fs;
13570	LOCK_OWNED(ump);
13571	/*
13572	 * Pick a random inode dependency to be cleared.
13573	 * We will then gather up all the inodes in its block
13574	 * that have dependencies and flush them out.
13575	 */
13576	for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) {
13577		inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++];
13578		if (ump->inodedep_nextclean > ump->inodedep_hash_size)
13579			ump->inodedep_nextclean = 0;
13580		if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
13581			break;
13582	}
13583	if (inodedep == NULL)
13584		return;
13585	/*
13586	 * Find the last inode in the block with dependencies.
13587	 */
13588	firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
13589	for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
13590		if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
13591			break;
13592	/*
13593	 * Asynchronously push all but the last inode with dependencies.
13594	 * Synchronously push the last inode with dependencies to ensure
13595	 * that the inode block gets written to free up the inodedeps.
13596	 */
13597	for (ino = firstino; ino <= lastino; ino++) {
13598		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13599			continue;
13600		if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13601			continue;
13602		FREE_LOCK(ump);
13603		error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
13604		if (error != 0) {
13605			vn_finished_write(mp);
13606			ACQUIRE_LOCK(ump);
13607			return;
13608		}
13609		if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13610		    FFSV_FORCEINSMQ)) != 0) {
13611			softdep_error("clear_inodedeps: vget", error);
13612			vfs_unbusy(mp);
13613			vn_finished_write(mp);
13614			ACQUIRE_LOCK(ump);
13615			return;
13616		}
13617		vfs_unbusy(mp);
13618		if (ino == lastino) {
13619			if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)))
13620				softdep_error("clear_inodedeps: fsync1", error);
13621		} else {
13622			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13623				softdep_error("clear_inodedeps: fsync2", error);
13624			BO_LOCK(&vp->v_bufobj);
13625			drain_output(vp);
13626			BO_UNLOCK(&vp->v_bufobj);
13627		}
13628		vput(vp);
13629		vn_finished_write(mp);
13630		ACQUIRE_LOCK(ump);
13631	}
13632}
13633
13634void
13635softdep_buf_append(bp, wkhd)
13636	struct buf *bp;
13637	struct workhead *wkhd;
13638{
13639	struct worklist *wk;
13640	struct ufsmount *ump;
13641
13642	if ((wk = LIST_FIRST(wkhd)) == NULL)
13643		return;
13644	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13645	    ("softdep_buf_append called on non-softdep filesystem"));
13646	ump = VFSTOUFS(wk->wk_mp);
13647	ACQUIRE_LOCK(ump);
13648	while ((wk = LIST_FIRST(wkhd)) != NULL) {
13649		WORKLIST_REMOVE(wk);
13650		WORKLIST_INSERT(&bp->b_dep, wk);
13651	}
13652	FREE_LOCK(ump);
13653
13654}
13655
13656void
13657softdep_inode_append(ip, cred, wkhd)
13658	struct inode *ip;
13659	struct ucred *cred;
13660	struct workhead *wkhd;
13661{
13662	struct buf *bp;
13663	struct fs *fs;
13664	int error;
13665
13666	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
13667	    ("softdep_inode_append called on non-softdep filesystem"));
13668	fs = ip->i_fs;
13669	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
13670	    (int)fs->fs_bsize, cred, &bp);
13671	if (error) {
13672		bqrelse(bp);
13673		softdep_freework(wkhd);
13674		return;
13675	}
13676	softdep_buf_append(bp, wkhd);
13677	bqrelse(bp);
13678}
13679
13680void
13681softdep_freework(wkhd)
13682	struct workhead *wkhd;
13683{
13684	struct worklist *wk;
13685	struct ufsmount *ump;
13686
13687	if ((wk = LIST_FIRST(wkhd)) == NULL)
13688		return;
13689	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13690	    ("softdep_freework called on non-softdep filesystem"));
13691	ump = VFSTOUFS(wk->wk_mp);
13692	ACQUIRE_LOCK(ump);
13693	handle_jwork(wkhd);
13694	FREE_LOCK(ump);
13695}
13696
13697/*
13698 * Function to determine if the buffer has outstanding dependencies
13699 * that will cause a roll-back if the buffer is written. If wantcount
13700 * is set, return number of dependencies, otherwise just yes or no.
13701 */
13702static int
13703softdep_count_dependencies(bp, wantcount)
13704	struct buf *bp;
13705	int wantcount;
13706{
13707	struct worklist *wk;
13708	struct ufsmount *ump;
13709	struct bmsafemap *bmsafemap;
13710	struct freework *freework;
13711	struct inodedep *inodedep;
13712	struct indirdep *indirdep;
13713	struct freeblks *freeblks;
13714	struct allocindir *aip;
13715	struct pagedep *pagedep;
13716	struct dirrem *dirrem;
13717	struct newblk *newblk;
13718	struct mkdir *mkdir;
13719	struct diradd *dap;
13720	int i, retval;
13721
13722	retval = 0;
13723	if ((wk = LIST_FIRST(&bp->b_dep)) == NULL)
13724		return (0);
13725	ump = VFSTOUFS(wk->wk_mp);
13726	ACQUIRE_LOCK(ump);
13727	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
13728		switch (wk->wk_type) {
13729
13730		case D_INODEDEP:
13731			inodedep = WK_INODEDEP(wk);
13732			if ((inodedep->id_state & DEPCOMPLETE) == 0) {
13733				/* bitmap allocation dependency */
13734				retval += 1;
13735				if (!wantcount)
13736					goto out;
13737			}
13738			if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
13739				/* direct block pointer dependency */
13740				retval += 1;
13741				if (!wantcount)
13742					goto out;
13743			}
13744			if (TAILQ_FIRST(&inodedep->id_extupdt)) {
13745				/* direct block pointer dependency */
13746				retval += 1;
13747				if (!wantcount)
13748					goto out;
13749			}
13750			if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
13751				/* Add reference dependency. */
13752				retval += 1;
13753				if (!wantcount)
13754					goto out;
13755			}
13756			continue;
13757
13758		case D_INDIRDEP:
13759			indirdep = WK_INDIRDEP(wk);
13760
13761			TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
13762				/* indirect truncation dependency */
13763				retval += 1;
13764				if (!wantcount)
13765					goto out;
13766			}
13767
13768			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13769				/* indirect block pointer dependency */
13770				retval += 1;
13771				if (!wantcount)
13772					goto out;
13773			}
13774			continue;
13775
13776		case D_PAGEDEP:
13777			pagedep = WK_PAGEDEP(wk);
13778			LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
13779				if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
13780					/* Journal remove ref dependency. */
13781					retval += 1;
13782					if (!wantcount)
13783						goto out;
13784				}
13785			}
13786			for (i = 0; i < DAHASHSZ; i++) {
13787
13788				LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
13789					/* directory entry dependency */
13790					retval += 1;
13791					if (!wantcount)
13792						goto out;
13793				}
13794			}
13795			continue;
13796
13797		case D_BMSAFEMAP:
13798			bmsafemap = WK_BMSAFEMAP(wk);
13799			if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
13800				/* Add reference dependency. */
13801				retval += 1;
13802				if (!wantcount)
13803					goto out;
13804			}
13805			if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
13806				/* Allocate block dependency. */
13807				retval += 1;
13808				if (!wantcount)
13809					goto out;
13810			}
13811			continue;
13812
13813		case D_FREEBLKS:
13814			freeblks = WK_FREEBLKS(wk);
13815			if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
13816				/* Freeblk journal dependency. */
13817				retval += 1;
13818				if (!wantcount)
13819					goto out;
13820			}
13821			continue;
13822
13823		case D_ALLOCDIRECT:
13824		case D_ALLOCINDIR:
13825			newblk = WK_NEWBLK(wk);
13826			if (newblk->nb_jnewblk) {
13827				/* Journal allocate dependency. */
13828				retval += 1;
13829				if (!wantcount)
13830					goto out;
13831			}
13832			continue;
13833
13834		case D_MKDIR:
13835			mkdir = WK_MKDIR(wk);
13836			if (mkdir->md_jaddref) {
13837				/* Journal reference dependency. */
13838				retval += 1;
13839				if (!wantcount)
13840					goto out;
13841			}
13842			continue;
13843
13844		case D_FREEWORK:
13845		case D_FREEDEP:
13846		case D_JSEGDEP:
13847		case D_JSEG:
13848		case D_SBDEP:
13849			/* never a dependency on these blocks */
13850			continue;
13851
13852		default:
13853			panic("softdep_count_dependencies: Unexpected type %s",
13854			    TYPENAME(wk->wk_type));
13855			/* NOTREACHED */
13856		}
13857	}
13858out:
13859	FREE_LOCK(ump);
13860	return retval;
13861}
13862
13863/*
13864 * Acquire exclusive access to a buffer.
13865 * Must be called with a locked mtx parameter.
13866 * Return acquired buffer or NULL on failure.
13867 */
13868static struct buf *
13869getdirtybuf(bp, lock, waitfor)
13870	struct buf *bp;
13871	struct rwlock *lock;
13872	int waitfor;
13873{
13874	int error;
13875
13876	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
13877		if (waitfor != MNT_WAIT)
13878			return (NULL);
13879		error = BUF_LOCK(bp,
13880		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock);
13881		/*
13882		 * Even if we sucessfully acquire bp here, we have dropped
13883		 * lock, which may violates our guarantee.
13884		 */
13885		if (error == 0)
13886			BUF_UNLOCK(bp);
13887		else if (error != ENOLCK)
13888			panic("getdirtybuf: inconsistent lock: %d", error);
13889		rw_wlock(lock);
13890		return (NULL);
13891	}
13892	if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13893		if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) {
13894			rw_wunlock(lock);
13895			BO_LOCK(bp->b_bufobj);
13896			BUF_UNLOCK(bp);
13897			if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13898				bp->b_vflags |= BV_BKGRDWAIT;
13899				msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj),
13900				       PRIBIO | PDROP, "getbuf", 0);
13901			} else
13902				BO_UNLOCK(bp->b_bufobj);
13903			rw_wlock(lock);
13904			return (NULL);
13905		}
13906		BUF_UNLOCK(bp);
13907		if (waitfor != MNT_WAIT)
13908			return (NULL);
13909		/*
13910		 * The lock argument must be bp->b_vp's mutex in
13911		 * this case.
13912		 */
13913#ifdef	DEBUG_VFS_LOCKS
13914		if (bp->b_vp->v_type != VCHR)
13915			ASSERT_BO_WLOCKED(bp->b_bufobj);
13916#endif
13917		bp->b_vflags |= BV_BKGRDWAIT;
13918		rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0);
13919		return (NULL);
13920	}
13921	if ((bp->b_flags & B_DELWRI) == 0) {
13922		BUF_UNLOCK(bp);
13923		return (NULL);
13924	}
13925	bremfree(bp);
13926	return (bp);
13927}
13928
13929
13930/*
13931 * Check if it is safe to suspend the file system now.  On entry,
13932 * the vnode interlock for devvp should be held.  Return 0 with
13933 * the mount interlock held if the file system can be suspended now,
13934 * otherwise return EAGAIN with the mount interlock held.
13935 */
13936int
13937softdep_check_suspend(struct mount *mp,
13938		      struct vnode *devvp,
13939		      int softdep_depcnt,
13940		      int softdep_accdepcnt,
13941		      int secondary_writes,
13942		      int secondary_accwrites)
13943{
13944	struct bufobj *bo;
13945	struct ufsmount *ump;
13946	struct inodedep *inodedep;
13947	int error, unlinked;
13948
13949	bo = &devvp->v_bufobj;
13950	ASSERT_BO_WLOCKED(bo);
13951
13952	/*
13953	 * If we are not running with soft updates, then we need only
13954	 * deal with secondary writes as we try to suspend.
13955	 */
13956	if (MOUNTEDSOFTDEP(mp) == 0) {
13957		MNT_ILOCK(mp);
13958		while (mp->mnt_secondary_writes != 0) {
13959			BO_UNLOCK(bo);
13960			msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
13961			    (PUSER - 1) | PDROP, "secwr", 0);
13962			BO_LOCK(bo);
13963			MNT_ILOCK(mp);
13964		}
13965
13966		/*
13967		 * Reasons for needing more work before suspend:
13968		 * - Dirty buffers on devvp.
13969		 * - Secondary writes occurred after start of vnode sync loop
13970		 */
13971		error = 0;
13972		if (bo->bo_numoutput > 0 ||
13973		    bo->bo_dirty.bv_cnt > 0 ||
13974		    secondary_writes != 0 ||
13975		    mp->mnt_secondary_writes != 0 ||
13976		    secondary_accwrites != mp->mnt_secondary_accwrites)
13977			error = EAGAIN;
13978		BO_UNLOCK(bo);
13979		return (error);
13980	}
13981
13982	/*
13983	 * If we are running with soft updates, then we need to coordinate
13984	 * with them as we try to suspend.
13985	 */
13986	ump = VFSTOUFS(mp);
13987	for (;;) {
13988		if (!TRY_ACQUIRE_LOCK(ump)) {
13989			BO_UNLOCK(bo);
13990			ACQUIRE_LOCK(ump);
13991			FREE_LOCK(ump);
13992			BO_LOCK(bo);
13993			continue;
13994		}
13995		MNT_ILOCK(mp);
13996		if (mp->mnt_secondary_writes != 0) {
13997			FREE_LOCK(ump);
13998			BO_UNLOCK(bo);
13999			msleep(&mp->mnt_secondary_writes,
14000			       MNT_MTX(mp),
14001			       (PUSER - 1) | PDROP, "secwr", 0);
14002			BO_LOCK(bo);
14003			continue;
14004		}
14005		break;
14006	}
14007
14008	unlinked = 0;
14009	if (MOUNTEDSUJ(mp)) {
14010		for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked);
14011		    inodedep != NULL;
14012		    inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
14013			if ((inodedep->id_state & (UNLINKED | UNLINKLINKS |
14014			    UNLINKONLIST)) != (UNLINKED | UNLINKLINKS |
14015			    UNLINKONLIST) ||
14016			    !check_inodedep_free(inodedep))
14017				continue;
14018			unlinked++;
14019		}
14020	}
14021
14022	/*
14023	 * Reasons for needing more work before suspend:
14024	 * - Dirty buffers on devvp.
14025	 * - Softdep activity occurred after start of vnode sync loop
14026	 * - Secondary writes occurred after start of vnode sync loop
14027	 */
14028	error = 0;
14029	if (bo->bo_numoutput > 0 ||
14030	    bo->bo_dirty.bv_cnt > 0 ||
14031	    softdep_depcnt != unlinked ||
14032	    ump->softdep_deps != unlinked ||
14033	    softdep_accdepcnt != ump->softdep_accdeps ||
14034	    secondary_writes != 0 ||
14035	    mp->mnt_secondary_writes != 0 ||
14036	    secondary_accwrites != mp->mnt_secondary_accwrites)
14037		error = EAGAIN;
14038	FREE_LOCK(ump);
14039	BO_UNLOCK(bo);
14040	return (error);
14041}
14042
14043
14044/*
14045 * Get the number of dependency structures for the file system, both
14046 * the current number and the total number allocated.  These will
14047 * later be used to detect that softdep processing has occurred.
14048 */
14049void
14050softdep_get_depcounts(struct mount *mp,
14051		      int *softdep_depsp,
14052		      int *softdep_accdepsp)
14053{
14054	struct ufsmount *ump;
14055
14056	if (MOUNTEDSOFTDEP(mp) == 0) {
14057		*softdep_depsp = 0;
14058		*softdep_accdepsp = 0;
14059		return;
14060	}
14061	ump = VFSTOUFS(mp);
14062	ACQUIRE_LOCK(ump);
14063	*softdep_depsp = ump->softdep_deps;
14064	*softdep_accdepsp = ump->softdep_accdeps;
14065	FREE_LOCK(ump);
14066}
14067
14068/*
14069 * Wait for pending output on a vnode to complete.
14070 * Must be called with vnode lock and interlock locked.
14071 *
14072 * XXX: Should just be a call to bufobj_wwait().
14073 */
14074static void
14075drain_output(vp)
14076	struct vnode *vp;
14077{
14078	struct bufobj *bo;
14079
14080	bo = &vp->v_bufobj;
14081	ASSERT_VOP_LOCKED(vp, "drain_output");
14082	ASSERT_BO_WLOCKED(bo);
14083
14084	while (bo->bo_numoutput) {
14085		bo->bo_flag |= BO_WWAIT;
14086		msleep((caddr_t)&bo->bo_numoutput,
14087		    BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0);
14088	}
14089}
14090
14091/*
14092 * Called whenever a buffer that is being invalidated or reallocated
14093 * contains dependencies. This should only happen if an I/O error has
14094 * occurred. The routine is called with the buffer locked.
14095 */
14096static void
14097softdep_deallocate_dependencies(bp)
14098	struct buf *bp;
14099{
14100
14101	if ((bp->b_ioflags & BIO_ERROR) == 0)
14102		panic("softdep_deallocate_dependencies: dangling deps");
14103	if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL)
14104		softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
14105	else
14106		printf("softdep_deallocate_dependencies: "
14107		    "got error %d while accessing filesystem\n", bp->b_error);
14108	if (bp->b_error != ENXIO)
14109		panic("softdep_deallocate_dependencies: unrecovered I/O error");
14110}
14111
14112/*
14113 * Function to handle asynchronous write errors in the filesystem.
14114 */
14115static void
14116softdep_error(func, error)
14117	char *func;
14118	int error;
14119{
14120
14121	/* XXX should do something better! */
14122	printf("%s: got error %d while accessing filesystem\n", func, error);
14123}
14124
14125#ifdef DDB
14126
14127static void
14128inodedep_print(struct inodedep *inodedep, int verbose)
14129{
14130	db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d"
14131	    " saveino %p\n",
14132	    inodedep, inodedep->id_fs, inodedep->id_state,
14133	    (intmax_t)inodedep->id_ino,
14134	    (intmax_t)fsbtodb(inodedep->id_fs,
14135	    ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
14136	    inodedep->id_nlinkdelta, inodedep->id_savednlink,
14137	    inodedep->id_savedino1);
14138
14139	if (verbose == 0)
14140		return;
14141
14142	db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, "
14143	    "mkdiradd %p\n",
14144	    LIST_FIRST(&inodedep->id_pendinghd),
14145	    LIST_FIRST(&inodedep->id_bufwait),
14146	    LIST_FIRST(&inodedep->id_inowait),
14147	    TAILQ_FIRST(&inodedep->id_inoreflst),
14148	    inodedep->id_mkdiradd);
14149	db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n",
14150	    TAILQ_FIRST(&inodedep->id_inoupdt),
14151	    TAILQ_FIRST(&inodedep->id_newinoupdt),
14152	    TAILQ_FIRST(&inodedep->id_extupdt),
14153	    TAILQ_FIRST(&inodedep->id_newextupdt));
14154}
14155
14156DB_SHOW_COMMAND(inodedep, db_show_inodedep)
14157{
14158
14159	if (have_addr == 0) {
14160		db_printf("Address required\n");
14161		return;
14162	}
14163	inodedep_print((struct inodedep*)addr, 1);
14164}
14165
14166DB_SHOW_COMMAND(inodedeps, db_show_inodedeps)
14167{
14168	struct inodedep_hashhead *inodedephd;
14169	struct inodedep *inodedep;
14170	struct ufsmount *ump;
14171	int cnt;
14172
14173	if (have_addr == 0) {
14174		db_printf("Address required\n");
14175		return;
14176	}
14177	ump = (struct ufsmount *)addr;
14178	for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) {
14179		inodedephd = &ump->inodedep_hashtbl[cnt];
14180		LIST_FOREACH(inodedep, inodedephd, id_hash) {
14181			inodedep_print(inodedep, 0);
14182		}
14183	}
14184}
14185
14186DB_SHOW_COMMAND(worklist, db_show_worklist)
14187{
14188	struct worklist *wk;
14189
14190	if (have_addr == 0) {
14191		db_printf("Address required\n");
14192		return;
14193	}
14194	wk = (struct worklist *)addr;
14195	printf("worklist: %p type %s state 0x%X\n",
14196	    wk, TYPENAME(wk->wk_type), wk->wk_state);
14197}
14198
14199DB_SHOW_COMMAND(workhead, db_show_workhead)
14200{
14201	struct workhead *wkhd;
14202	struct worklist *wk;
14203	int i;
14204
14205	if (have_addr == 0) {
14206		db_printf("Address required\n");
14207		return;
14208	}
14209	wkhd = (struct workhead *)addr;
14210	wk = LIST_FIRST(wkhd);
14211	for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list))
14212		db_printf("worklist: %p type %s state 0x%X",
14213		    wk, TYPENAME(wk->wk_type), wk->wk_state);
14214	if (i == 100)
14215		db_printf("workhead overflow");
14216	printf("\n");
14217}
14218
14219
14220DB_SHOW_COMMAND(mkdirs, db_show_mkdirs)
14221{
14222	struct mkdirlist *mkdirlisthd;
14223	struct jaddref *jaddref;
14224	struct diradd *diradd;
14225	struct mkdir *mkdir;
14226
14227	if (have_addr == 0) {
14228		db_printf("Address required\n");
14229		return;
14230	}
14231	mkdirlisthd = (struct mkdirlist *)addr;
14232	LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) {
14233		diradd = mkdir->md_diradd;
14234		db_printf("mkdir: %p state 0x%X dap %p state 0x%X",
14235		    mkdir, mkdir->md_state, diradd, diradd->da_state);
14236		if ((jaddref = mkdir->md_jaddref) != NULL)
14237			db_printf(" jaddref %p jaddref state 0x%X",
14238			    jaddref, jaddref->ja_state);
14239		db_printf("\n");
14240	}
14241}
14242
14243/* exported to ffs_vfsops.c */
14244extern void db_print_ffs(struct ufsmount *ump);
14245void
14246db_print_ffs(struct ufsmount *ump)
14247{
14248	db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n",
14249	    ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname,
14250	    ump->um_devvp, ump->um_fs, ump->softdep_on_worklist,
14251	    ump->softdep_deps, ump->softdep_req);
14252}
14253
14254#endif /* DDB */
14255
14256#endif /* SOFTUPDATES */
14257