1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 1998, 2000 Marshall Kirk McKusick.
5 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
6 * All rights reserved.
7 *
8 * The soft updates code is derived from the appendix of a University
9 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
10 * "Soft Updates: A Solution to the Metadata Update Problem in File
11 * Systems", CSE-TR-254-95, August 1995).
12 *
13 * Further information about soft updates can be obtained from:
14 *
15 *	Marshall Kirk McKusick		http://www.mckusick.com/softdep/
16 *	1614 Oxford Street		mckusick@mckusick.com
17 *	Berkeley, CA 94709-1608		+1-510-843-9542
18 *	USA
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * 1. Redistributions of source code must retain the above copyright
25 *    notice, this list of conditions and the following disclaimer.
26 * 2. Redistributions in binary form must reproduce the above copyright
27 *    notice, this list of conditions and the following disclaimer in the
28 *    documentation and/or other materials provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
31 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
33 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
34 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
35 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
36 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
37 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
38 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
39 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42#include <sys/cdefs.h>
43#include "opt_ffs.h"
44#include "opt_quota.h"
45#include "opt_ddb.h"
46
47#include <sys/param.h>
48#include <sys/kernel.h>
49#include <sys/systm.h>
50#include <sys/bio.h>
51#include <sys/buf.h>
52#include <sys/kdb.h>
53#include <sys/kthread.h>
54#include <sys/ktr.h>
55#include <sys/limits.h>
56#include <sys/lock.h>
57#include <sys/malloc.h>
58#include <sys/mount.h>
59#include <sys/mutex.h>
60#include <sys/namei.h>
61#include <sys/priv.h>
62#include <sys/proc.h>
63#include <sys/racct.h>
64#include <sys/rwlock.h>
65#include <sys/stat.h>
66#include <sys/sysctl.h>
67#include <sys/syslog.h>
68#include <sys/vnode.h>
69#include <sys/conf.h>
70
71#include <ufs/ufs/dir.h>
72#include <ufs/ufs/extattr.h>
73#include <ufs/ufs/quota.h>
74#include <ufs/ufs/inode.h>
75#include <ufs/ufs/ufsmount.h>
76#include <ufs/ffs/fs.h>
77#include <ufs/ffs/softdep.h>
78#include <ufs/ffs/ffs_extern.h>
79#include <ufs/ufs/ufs_extern.h>
80
81#include <vm/vm.h>
82#include <vm/vm_extern.h>
83#include <vm/vm_object.h>
84
85#include <geom/geom.h>
86#include <geom/geom_vfs.h>
87
88#include <ddb/ddb.h>
89
90#define	KTR_SUJ	0	/* Define to KTR_SPARE. */
91
92#ifndef SOFTUPDATES
93
94int
95softdep_flushfiles(struct mount *oldmnt,
96	int flags,
97	struct thread *td)
98{
99
100	panic("softdep_flushfiles called");
101}
102
103int
104softdep_mount(struct vnode *devvp,
105	struct mount *mp,
106	struct fs *fs,
107	struct ucred *cred)
108{
109
110	return (0);
111}
112
113void
114softdep_initialize(void)
115{
116
117	return;
118}
119
120void
121softdep_uninitialize(void)
122{
123
124	return;
125}
126
127void
128softdep_unmount(struct mount *mp)
129{
130
131	panic("softdep_unmount called");
132}
133
134void
135softdep_setup_sbupdate(struct ufsmount *ump,
136	struct fs *fs,
137	struct buf *bp)
138{
139
140	panic("softdep_setup_sbupdate called");
141}
142
143void
144softdep_setup_inomapdep(struct buf *bp,
145	struct inode *ip,
146	ino_t newinum,
147	int mode)
148{
149
150	panic("softdep_setup_inomapdep called");
151}
152
153void
154softdep_setup_blkmapdep(struct buf *bp,
155	struct mount *mp,
156	ufs2_daddr_t newblkno,
157	int frags,
158	int oldfrags)
159{
160
161	panic("softdep_setup_blkmapdep called");
162}
163
164void
165softdep_setup_allocdirect(struct inode *ip,
166	ufs_lbn_t lbn,
167	ufs2_daddr_t newblkno,
168	ufs2_daddr_t oldblkno,
169	long newsize,
170	long oldsize,
171	struct buf *bp)
172{
173
174	panic("softdep_setup_allocdirect called");
175}
176
177void
178softdep_setup_allocext(struct inode *ip,
179	ufs_lbn_t lbn,
180	ufs2_daddr_t newblkno,
181	ufs2_daddr_t oldblkno,
182	long newsize,
183	long oldsize,
184	struct buf *bp)
185{
186
187	panic("softdep_setup_allocext called");
188}
189
190void
191softdep_setup_allocindir_page(struct inode *ip,
192	ufs_lbn_t lbn,
193	struct buf *bp,
194	int ptrno,
195	ufs2_daddr_t newblkno,
196	ufs2_daddr_t oldblkno,
197	struct buf *nbp)
198{
199
200	panic("softdep_setup_allocindir_page called");
201}
202
203void
204softdep_setup_allocindir_meta(struct buf *nbp,
205	struct inode *ip,
206	struct buf *bp,
207	int ptrno,
208	ufs2_daddr_t newblkno)
209{
210
211	panic("softdep_setup_allocindir_meta called");
212}
213
214void
215softdep_journal_freeblocks(struct inode *ip,
216	struct ucred *cred,
217	off_t length,
218	int flags)
219{
220
221	panic("softdep_journal_freeblocks called");
222}
223
224void
225softdep_journal_fsync(struct inode *ip)
226{
227
228	panic("softdep_journal_fsync called");
229}
230
231void
232softdep_setup_freeblocks(struct inode *ip,
233	off_t length,
234	int flags)
235{
236
237	panic("softdep_setup_freeblocks called");
238}
239
240void
241softdep_freefile(struct vnode *pvp,
242		ino_t ino,
243		int mode)
244{
245
246	panic("softdep_freefile called");
247}
248
249int
250softdep_setup_directory_add(struct buf *bp,
251	struct inode *dp,
252	off_t diroffset,
253	ino_t newinum,
254	struct buf *newdirbp,
255	int isnewblk)
256{
257
258	panic("softdep_setup_directory_add called");
259}
260
261void
262softdep_change_directoryentry_offset(struct buf *bp,
263	struct inode *dp,
264	caddr_t base,
265	caddr_t oldloc,
266	caddr_t newloc,
267	int entrysize)
268{
269
270	panic("softdep_change_directoryentry_offset called");
271}
272
273void
274softdep_setup_remove(struct buf *bp,
275	struct inode *dp,
276	struct inode *ip,
277	int isrmdir)
278{
279
280	panic("softdep_setup_remove called");
281}
282
283void
284softdep_setup_directory_change(struct buf *bp,
285	struct inode *dp,
286	struct inode *ip,
287	ino_t newinum,
288	int isrmdir)
289{
290
291	panic("softdep_setup_directory_change called");
292}
293
294void
295softdep_setup_blkfree(struct mount *mp,
296	struct buf *bp,
297	ufs2_daddr_t blkno,
298	int frags,
299	struct workhead *wkhd,
300	bool doingrecovery)
301{
302
303	panic("%s called", __FUNCTION__);
304}
305
306void
307softdep_setup_inofree(struct mount *mp,
308	struct buf *bp,
309	ino_t ino,
310	struct workhead *wkhd,
311	bool doingrecovery)
312{
313
314	panic("%s called", __FUNCTION__);
315}
316
317void
318softdep_setup_unlink(struct inode *dp, struct inode *ip)
319{
320
321	panic("%s called", __FUNCTION__);
322}
323
324void
325softdep_setup_link(struct inode *dp, struct inode *ip)
326{
327
328	panic("%s called", __FUNCTION__);
329}
330
331void
332softdep_revert_link(struct inode *dp, struct inode *ip)
333{
334
335	panic("%s called", __FUNCTION__);
336}
337
338void
339softdep_setup_rmdir(struct inode *dp, struct inode *ip)
340{
341
342	panic("%s called", __FUNCTION__);
343}
344
345void
346softdep_revert_rmdir(struct inode *dp, struct inode *ip)
347{
348
349	panic("%s called", __FUNCTION__);
350}
351
352void
353softdep_setup_create(struct inode *dp, struct inode *ip)
354{
355
356	panic("%s called", __FUNCTION__);
357}
358
359void
360softdep_revert_create(struct inode *dp, struct inode *ip)
361{
362
363	panic("%s called", __FUNCTION__);
364}
365
366void
367softdep_setup_mkdir(struct inode *dp, struct inode *ip)
368{
369
370	panic("%s called", __FUNCTION__);
371}
372
373void
374softdep_revert_mkdir(struct inode *dp, struct inode *ip)
375{
376
377	panic("%s called", __FUNCTION__);
378}
379
380void
381softdep_setup_dotdot_link(struct inode *dp, struct inode *ip)
382{
383
384	panic("%s called", __FUNCTION__);
385}
386
387int
388softdep_prealloc(struct vnode *vp, int waitok)
389{
390
391	panic("%s called", __FUNCTION__);
392}
393
394int
395softdep_journal_lookup(struct mount *mp, struct vnode **vpp)
396{
397
398	return (ENOENT);
399}
400
401void
402softdep_change_linkcnt(struct inode *ip)
403{
404
405	panic("softdep_change_linkcnt called");
406}
407
408void
409softdep_load_inodeblock(struct inode *ip)
410{
411
412	panic("softdep_load_inodeblock called");
413}
414
415void
416softdep_update_inodeblock(struct inode *ip,
417	struct buf *bp,
418	int waitfor)
419{
420
421	panic("softdep_update_inodeblock called");
422}
423
424int
425softdep_fsync(struct vnode *vp)	/* the "in_core" copy of the inode */
426{
427
428	return (0);
429}
430
431void
432softdep_fsync_mountdev(struct vnode *vp)
433{
434
435	return;
436}
437
438int
439softdep_flushworklist(struct mount *oldmnt,
440	int *countp,
441	struct thread *td)
442{
443
444	*countp = 0;
445	return (0);
446}
447
448int
449softdep_sync_metadata(struct vnode *vp)
450{
451
452	panic("softdep_sync_metadata called");
453}
454
455int
456softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
457{
458
459	panic("softdep_sync_buf called");
460}
461
462int
463softdep_slowdown(struct vnode *vp)
464{
465
466	panic("softdep_slowdown called");
467}
468
469int
470softdep_request_cleanup(struct fs *fs,
471	struct vnode *vp,
472	struct ucred *cred,
473	int resource)
474{
475
476	return (0);
477}
478
479int
480softdep_check_suspend(struct mount *mp,
481		      struct vnode *devvp,
482		      int softdep_depcnt,
483		      int softdep_accdepcnt,
484		      int secondary_writes,
485		      int secondary_accwrites)
486{
487	struct bufobj *bo;
488	int error;
489
490	(void) softdep_depcnt,
491	(void) softdep_accdepcnt;
492
493	bo = &devvp->v_bufobj;
494	ASSERT_BO_WLOCKED(bo);
495
496	MNT_ILOCK(mp);
497	while (mp->mnt_secondary_writes != 0) {
498		BO_UNLOCK(bo);
499		msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
500		    (PUSER - 1) | PDROP, "secwr", 0);
501		BO_LOCK(bo);
502		MNT_ILOCK(mp);
503	}
504
505	/*
506	 * Reasons for needing more work before suspend:
507	 * - Dirty buffers on devvp.
508	 * - Secondary writes occurred after start of vnode sync loop
509	 */
510	error = 0;
511	if (bo->bo_numoutput > 0 ||
512	    bo->bo_dirty.bv_cnt > 0 ||
513	    secondary_writes != 0 ||
514	    mp->mnt_secondary_writes != 0 ||
515	    secondary_accwrites != mp->mnt_secondary_accwrites)
516		error = EAGAIN;
517	BO_UNLOCK(bo);
518	return (error);
519}
520
521void
522softdep_get_depcounts(struct mount *mp,
523		      int *softdepactivep,
524		      int *softdepactiveaccp)
525{
526	(void) mp;
527	*softdepactivep = 0;
528	*softdepactiveaccp = 0;
529}
530
531void
532softdep_buf_append(struct buf *bp, struct workhead *wkhd)
533{
534
535	panic("softdep_buf_appendwork called");
536}
537
538void
539softdep_inode_append(struct inode *ip,
540	struct ucred *cred,
541	struct workhead *wkhd)
542{
543
544	panic("softdep_inode_appendwork called");
545}
546
547void
548softdep_freework(struct workhead *wkhd)
549{
550
551	panic("softdep_freework called");
552}
553
554int
555softdep_prerename(struct vnode *fdvp,
556	struct vnode *fvp,
557	struct vnode *tdvp,
558	struct vnode *tvp)
559{
560
561	panic("softdep_prerename called");
562}
563
564int
565softdep_prelink(struct vnode *dvp,
566	struct vnode *vp,
567	struct componentname *cnp)
568{
569
570	panic("softdep_prelink called");
571}
572
573#else
574
575FEATURE(softupdates, "FFS soft-updates support");
576
577static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
578    "soft updates stats");
579static SYSCTL_NODE(_debug_softdep, OID_AUTO, total,
580    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
581    "total dependencies allocated");
582static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse,
583    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
584    "high use dependencies allocated");
585static SYSCTL_NODE(_debug_softdep, OID_AUTO, current,
586    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
587    "current dependencies allocated");
588static SYSCTL_NODE(_debug_softdep, OID_AUTO, write,
589    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
590    "current dependencies written");
591
592unsigned long dep_current[D_LAST + 1];
593unsigned long dep_highuse[D_LAST + 1];
594unsigned long dep_total[D_LAST + 1];
595unsigned long dep_write[D_LAST + 1];
596
597#define	SOFTDEP_TYPE(type, str, long)					\
598    static MALLOC_DEFINE(M_ ## type, #str, long);			\
599    SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD,	\
600	&dep_total[D_ ## type], 0, "");					\
601    SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, 	\
602	&dep_current[D_ ## type], 0, "");				\
603    SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, 	\
604	&dep_highuse[D_ ## type], 0, "");				\
605    SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, 	\
606	&dep_write[D_ ## type], 0, "");
607
608SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
609SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
610SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
611    "Block or frag allocated from cyl group map");
612SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
613SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
614SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
615SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
616SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
617SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
618SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
619SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
620SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
621SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
622SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
623SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
624SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
625SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
626SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
627SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
628SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
629SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
630SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
631SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
632SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
633SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
634SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
635SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
636
637static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
638
639static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
640static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
641static MALLOC_DEFINE(M_MOUNTDATA, "softdep", "Softdep per-mount data");
642
643#define M_SOFTDEP_FLAGS	(M_WAITOK)
644
645/*
646 * translate from workitem type to memory type
647 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
648 */
649static struct malloc_type *memtype[] = {
650	NULL,
651	M_PAGEDEP,
652	M_INODEDEP,
653	M_BMSAFEMAP,
654	M_NEWBLK,
655	M_ALLOCDIRECT,
656	M_INDIRDEP,
657	M_ALLOCINDIR,
658	M_FREEFRAG,
659	M_FREEBLKS,
660	M_FREEFILE,
661	M_DIRADD,
662	M_MKDIR,
663	M_DIRREM,
664	M_NEWDIRBLK,
665	M_FREEWORK,
666	M_FREEDEP,
667	M_JADDREF,
668	M_JREMREF,
669	M_JMVREF,
670	M_JNEWBLK,
671	M_JFREEBLK,
672	M_JFREEFRAG,
673	M_JSEG,
674	M_JSEGDEP,
675	M_SBDEP,
676	M_JTRUNC,
677	M_JFSYNC,
678	M_SENTINEL
679};
680
681#define DtoM(type) (memtype[type])
682
683/*
684 * Names of malloc types.
685 */
686#define TYPENAME(type)  \
687	((unsigned)(type) <= D_LAST && (unsigned)(type) >= D_FIRST ? \
688	memtype[type]->ks_shortdesc : "???")
689/*
690 * End system adaptation definitions.
691 */
692
693#define	DOTDOT_OFFSET	offsetof(struct dirtemplate, dotdot_ino)
694#define	DOT_OFFSET	offsetof(struct dirtemplate, dot_ino)
695
696/*
697 * Internal function prototypes.
698 */
699static	void check_clear_deps(struct mount *);
700static	void softdep_error(char *, int);
701static	int softdep_prerename_vnode(struct ufsmount *, struct vnode *);
702static	int softdep_process_worklist(struct mount *, int);
703static	int softdep_waitidle(struct mount *, int);
704static	void drain_output(struct vnode *);
705static	struct buf *getdirtybuf(struct buf *, struct rwlock *, int);
706static	int check_inodedep_free(struct inodedep *);
707static	void clear_remove(struct mount *);
708static	void clear_inodedeps(struct mount *);
709static	void unlinked_inodedep(struct mount *, struct inodedep *);
710static	void clear_unlinked_inodedep(struct inodedep *);
711static	struct inodedep *first_unlinked_inodedep(struct ufsmount *);
712static	int flush_pagedep_deps(struct vnode *, struct mount *,
713	    struct diraddhd *, struct buf *);
714static	int free_pagedep(struct pagedep *);
715static	int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
716static	int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
717static	int flush_deplist(struct allocdirectlst *, int, int *);
718static	int sync_cgs(struct mount *, int);
719static	int handle_written_filepage(struct pagedep *, struct buf *, int);
720static	int handle_written_sbdep(struct sbdep *, struct buf *);
721static	void initiate_write_sbdep(struct sbdep *);
722static	void diradd_inode_written(struct diradd *, struct inodedep *);
723static	int handle_written_indirdep(struct indirdep *, struct buf *,
724	    struct buf**, int);
725static	int handle_written_inodeblock(struct inodedep *, struct buf *, int);
726static	int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
727	    uint8_t *);
728static	int handle_written_bmsafemap(struct bmsafemap *, struct buf *, int);
729static	void handle_written_jaddref(struct jaddref *);
730static	void handle_written_jremref(struct jremref *);
731static	void handle_written_jseg(struct jseg *, struct buf *);
732static	void handle_written_jnewblk(struct jnewblk *);
733static	void handle_written_jblkdep(struct jblkdep *);
734static	void handle_written_jfreefrag(struct jfreefrag *);
735static	void complete_jseg(struct jseg *);
736static	void complete_jsegs(struct jseg *);
737static	void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
738static	void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
739static	void jremref_write(struct jremref *, struct jseg *, uint8_t *);
740static	void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
741static	void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
742static	void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
743static	void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
744static	void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
745static	void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
746static	inline void inoref_write(struct inoref *, struct jseg *,
747	    struct jrefrec *);
748static	void handle_allocdirect_partdone(struct allocdirect *,
749	    struct workhead *);
750static	struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
751	    struct workhead *);
752static	void indirdep_complete(struct indirdep *);
753static	int indirblk_lookup(struct mount *, ufs2_daddr_t);
754static	void indirblk_insert(struct freework *);
755static	void indirblk_remove(struct freework *);
756static	void handle_allocindir_partdone(struct allocindir *);
757static	void initiate_write_filepage(struct pagedep *, struct buf *);
758static	void initiate_write_indirdep(struct indirdep*, struct buf *);
759static	void handle_written_mkdir(struct mkdir *, int);
760static	int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
761	    uint8_t *);
762static	void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
763static	void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
764static	void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
765static	void handle_workitem_freefile(struct freefile *);
766static	int handle_workitem_remove(struct dirrem *, int);
767static	struct dirrem *newdirrem(struct buf *, struct inode *,
768	    struct inode *, int, struct dirrem **);
769static	struct indirdep *indirdep_lookup(struct mount *, struct inode *,
770	    struct buf *);
771static	void cancel_indirdep(struct indirdep *, struct buf *,
772	    struct freeblks *);
773static	void free_indirdep(struct indirdep *);
774static	void free_diradd(struct diradd *, struct workhead *);
775static	void merge_diradd(struct inodedep *, struct diradd *);
776static	void complete_diradd(struct diradd *);
777static	struct diradd *diradd_lookup(struct pagedep *, int);
778static	struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
779	    struct jremref *);
780static	struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
781	    struct jremref *);
782static	void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
783	    struct jremref *, struct jremref *);
784static	void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
785	    struct jremref *);
786static	void cancel_allocindir(struct allocindir *, struct buf *bp,
787	    struct freeblks *, int);
788static	int setup_trunc_indir(struct freeblks *, struct inode *,
789	    ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
790static	void complete_trunc_indir(struct freework *);
791static	void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
792	    int);
793static	void complete_mkdir(struct mkdir *);
794static	void free_newdirblk(struct newdirblk *);
795static	void free_jremref(struct jremref *);
796static	void free_jaddref(struct jaddref *);
797static	void free_jsegdep(struct jsegdep *);
798static	void free_jsegs(struct jblocks *);
799static	void rele_jseg(struct jseg *);
800static	void free_jseg(struct jseg *, struct jblocks *);
801static	void free_jnewblk(struct jnewblk *);
802static	void free_jblkdep(struct jblkdep *);
803static	void free_jfreefrag(struct jfreefrag *);
804static	void free_freedep(struct freedep *);
805static	void journal_jremref(struct dirrem *, struct jremref *,
806	    struct inodedep *);
807static	void cancel_jnewblk(struct jnewblk *, struct workhead *);
808static	int cancel_jaddref(struct jaddref *, struct inodedep *,
809	    struct workhead *);
810static	void cancel_jfreefrag(struct jfreefrag *);
811static	inline void setup_freedirect(struct freeblks *, struct inode *,
812	    int, int);
813static	inline void setup_freeext(struct freeblks *, struct inode *, int, int);
814static	inline void setup_freeindir(struct freeblks *, struct inode *, int,
815	    ufs_lbn_t, int);
816static	inline struct freeblks *newfreeblks(struct mount *, struct inode *);
817static	void freeblks_free(struct ufsmount *, struct freeblks *, int);
818static	void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
819static	ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
820static	int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
821static	void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
822	    int, int);
823static	void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
824static 	int cancel_pagedep(struct pagedep *, struct freeblks *, int);
825static	int deallocate_dependencies(struct buf *, struct freeblks *, int);
826static	void newblk_freefrag(struct newblk*);
827static	void free_newblk(struct newblk *);
828static	void cancel_allocdirect(struct allocdirectlst *,
829	    struct allocdirect *, struct freeblks *);
830static	int check_inode_unwritten(struct inodedep *);
831static	int free_inodedep(struct inodedep *);
832static	void freework_freeblock(struct freework *, uint64_t);
833static	void freework_enqueue(struct freework *);
834static	int handle_workitem_freeblocks(struct freeblks *, int);
835static	int handle_complete_freeblocks(struct freeblks *, int);
836static	void handle_workitem_indirblk(struct freework *);
837static	void handle_written_freework(struct freework *);
838static	void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
839static	struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
840	    struct workhead *);
841static	struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
842	    struct inodedep *, struct allocindir *, ufs_lbn_t);
843static	struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
844	    ufs2_daddr_t, ufs_lbn_t);
845static	void handle_workitem_freefrag(struct freefrag *);
846static	struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
847	    ufs_lbn_t, uint64_t);
848static	void allocdirect_merge(struct allocdirectlst *,
849	    struct allocdirect *, struct allocdirect *);
850static	struct freefrag *allocindir_merge(struct allocindir *,
851	    struct allocindir *);
852static	int bmsafemap_find(struct bmsafemap_hashhead *, int,
853	    struct bmsafemap **);
854static	struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
855	    int cg, struct bmsafemap *);
856static	int newblk_find(struct newblk_hashhead *, ufs2_daddr_t, int,
857	    struct newblk **);
858static	int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
859static	int inodedep_find(struct inodedep_hashhead *, ino_t,
860	    struct inodedep **);
861static	int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
862static	int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
863	    int, struct pagedep **);
864static	int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
865	    struct pagedep **);
866static	void pause_timer(void *);
867static	int request_cleanup(struct mount *, int);
868static	int softdep_request_cleanup_flush(struct mount *, struct ufsmount *);
869static	void schedule_cleanup(struct mount *);
870static void softdep_ast_cleanup_proc(struct thread *, int);
871static struct ufsmount *softdep_bp_to_mp(struct buf *bp);
872static	int process_worklist_item(struct mount *, int, int);
873static	void process_removes(struct vnode *);
874static	void process_truncates(struct vnode *);
875static	void jwork_move(struct workhead *, struct workhead *);
876static	void jwork_insert(struct workhead *, struct jsegdep *);
877static	void add_to_worklist(struct worklist *, int);
878static	void wake_worklist(struct worklist *);
879static	void wait_worklist(struct worklist *, char *);
880static	void remove_from_worklist(struct worklist *);
881static	void softdep_flush(void *);
882static	void softdep_flushjournal(struct mount *);
883static	int softdep_speedup(struct ufsmount *);
884static	void worklist_speedup(struct mount *);
885static	int journal_mount(struct mount *, struct fs *, struct ucred *);
886static	void journal_unmount(struct ufsmount *);
887static	int journal_space(struct ufsmount *, int);
888static	void journal_suspend(struct ufsmount *);
889static	int journal_unsuspend(struct ufsmount *ump);
890static	void add_to_journal(struct worklist *);
891static	void remove_from_journal(struct worklist *);
892static	bool softdep_excess_items(struct ufsmount *, int);
893static	void softdep_process_journal(struct mount *, struct worklist *, int);
894static	struct jremref *newjremref(struct dirrem *, struct inode *,
895	    struct inode *ip, off_t, nlink_t);
896static	struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
897	    uint16_t);
898static	inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
899	    uint16_t);
900static	inline struct jsegdep *inoref_jseg(struct inoref *);
901static	struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
902static	struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
903	    ufs2_daddr_t, int);
904static	void adjust_newfreework(struct freeblks *, int);
905static	struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
906static	void move_newblock_dep(struct jaddref *, struct inodedep *);
907static	void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
908static	struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
909	    ufs2_daddr_t, long, ufs_lbn_t);
910static	struct freework *newfreework(struct ufsmount *, struct freeblks *,
911	    struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
912static	int jwait(struct worklist *, int);
913static	struct inodedep *inodedep_lookup_ip(struct inode *);
914static	int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
915static	struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
916static	void handle_jwork(struct workhead *);
917static	struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
918	    struct mkdir **);
919static	struct jblocks *jblocks_create(void);
920static	ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
921static	void jblocks_free(struct jblocks *, struct mount *, int);
922static	void jblocks_destroy(struct jblocks *);
923static	void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
924
925/*
926 * Exported softdep operations.
927 */
928static	void softdep_disk_io_initiation(struct buf *);
929static	void softdep_disk_write_complete(struct buf *);
930static	void softdep_deallocate_dependencies(struct buf *);
931static	int softdep_count_dependencies(struct buf *bp, int);
932
933/*
934 * Global lock over all of soft updates.
935 */
936static struct mtx lk;
937MTX_SYSINIT(softdep_lock, &lk, "global softdep", MTX_DEF);
938
939#define ACQUIRE_GBLLOCK(lk)	mtx_lock(lk)
940#define FREE_GBLLOCK(lk)	mtx_unlock(lk)
941#define GBLLOCK_OWNED(lk)	mtx_assert((lk), MA_OWNED)
942
943/*
944 * Per-filesystem soft-updates locking.
945 */
946#define LOCK_PTR(ump)		(&(ump)->um_softdep->sd_fslock)
947#define TRY_ACQUIRE_LOCK(ump)	rw_try_wlock(&(ump)->um_softdep->sd_fslock)
948#define ACQUIRE_LOCK(ump)	rw_wlock(&(ump)->um_softdep->sd_fslock)
949#define FREE_LOCK(ump)		rw_wunlock(&(ump)->um_softdep->sd_fslock)
950#define LOCK_OWNED(ump)		rw_assert(&(ump)->um_softdep->sd_fslock, \
951				    RA_WLOCKED)
952
953#define	BUF_AREC(bp)		lockallowrecurse(&(bp)->b_lock)
954#define	BUF_NOREC(bp)		lockdisablerecurse(&(bp)->b_lock)
955
956/*
957 * Worklist queue management.
958 * These routines require that the lock be held.
959 */
960#ifndef /* NOT */ INVARIANTS
961#define WORKLIST_INSERT(head, item) do {	\
962	(item)->wk_state |= ONWORKLIST;		\
963	LIST_INSERT_HEAD(head, item, wk_list);	\
964} while (0)
965#define WORKLIST_REMOVE(item) do {		\
966	(item)->wk_state &= ~ONWORKLIST;	\
967	LIST_REMOVE(item, wk_list);		\
968} while (0)
969#define WORKLIST_INSERT_UNLOCKED	WORKLIST_INSERT
970#define WORKLIST_REMOVE_UNLOCKED	WORKLIST_REMOVE
971
972#else /* INVARIANTS */
973static	void worklist_insert(struct workhead *, struct worklist *, int,
974	const char *, int);
975static	void worklist_remove(struct worklist *, int, const char *, int);
976
977#define WORKLIST_INSERT(head, item) \
978	worklist_insert(head, item, 1, __func__, __LINE__)
979#define WORKLIST_INSERT_UNLOCKED(head, item)\
980	worklist_insert(head, item, 0, __func__, __LINE__)
981#define WORKLIST_REMOVE(item)\
982	worklist_remove(item, 1, __func__, __LINE__)
983#define WORKLIST_REMOVE_UNLOCKED(item)\
984	worklist_remove(item, 0, __func__, __LINE__)
985
986static void
987worklist_insert(struct workhead *head,
988	struct worklist *item,
989	int locked,
990	const char *func,
991	int line)
992{
993
994	if (locked)
995		LOCK_OWNED(VFSTOUFS(item->wk_mp));
996	if (item->wk_state & ONWORKLIST)
997		panic("worklist_insert: %p %s(0x%X) already on list, "
998		    "added in function %s at line %d",
999		    item, TYPENAME(item->wk_type), item->wk_state,
1000		    item->wk_func, item->wk_line);
1001	item->wk_state |= ONWORKLIST;
1002	item->wk_func = func;
1003	item->wk_line = line;
1004	LIST_INSERT_HEAD(head, item, wk_list);
1005}
1006
1007static void
1008worklist_remove(struct worklist *item,
1009	int locked,
1010	const char *func,
1011	int line)
1012{
1013
1014	if (locked)
1015		LOCK_OWNED(VFSTOUFS(item->wk_mp));
1016	if ((item->wk_state & ONWORKLIST) == 0)
1017		panic("worklist_remove: %p %s(0x%X) not on list, "
1018		    "removed in function %s at line %d",
1019		    item, TYPENAME(item->wk_type), item->wk_state,
1020		    item->wk_func, item->wk_line);
1021	item->wk_state &= ~ONWORKLIST;
1022	item->wk_func = func;
1023	item->wk_line = line;
1024	LIST_REMOVE(item, wk_list);
1025}
1026#endif /* INVARIANTS */
1027
1028/*
1029 * Merge two jsegdeps keeping only the oldest one as newer references
1030 * can't be discarded until after older references.
1031 */
1032static inline struct jsegdep *
1033jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1034{
1035	struct jsegdep *swp;
1036
1037	if (two == NULL)
1038		return (one);
1039
1040	if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1041		swp = one;
1042		one = two;
1043		two = swp;
1044	}
1045	WORKLIST_REMOVE(&two->jd_list);
1046	free_jsegdep(two);
1047
1048	return (one);
1049}
1050
1051/*
1052 * If two freedeps are compatible free one to reduce list size.
1053 */
1054static inline struct freedep *
1055freedep_merge(struct freedep *one, struct freedep *two)
1056{
1057	if (two == NULL)
1058		return (one);
1059
1060	if (one->fd_freework == two->fd_freework) {
1061		WORKLIST_REMOVE(&two->fd_list);
1062		free_freedep(two);
1063	}
1064	return (one);
1065}
1066
1067/*
1068 * Move journal work from one list to another.  Duplicate freedeps and
1069 * jsegdeps are coalesced to keep the lists as small as possible.
1070 */
1071static void
1072jwork_move(struct workhead *dst, struct workhead *src)
1073{
1074	struct freedep *freedep;
1075	struct jsegdep *jsegdep;
1076	struct worklist *wkn;
1077	struct worklist *wk;
1078
1079	KASSERT(dst != src,
1080	    ("jwork_move: dst == src"));
1081	freedep = NULL;
1082	jsegdep = NULL;
1083	LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1084		if (wk->wk_type == D_JSEGDEP)
1085			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1086		else if (wk->wk_type == D_FREEDEP)
1087			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1088	}
1089
1090	while ((wk = LIST_FIRST(src)) != NULL) {
1091		WORKLIST_REMOVE(wk);
1092		WORKLIST_INSERT(dst, wk);
1093		if (wk->wk_type == D_JSEGDEP) {
1094			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1095			continue;
1096		}
1097		if (wk->wk_type == D_FREEDEP)
1098			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1099	}
1100}
1101
1102static void
1103jwork_insert(struct workhead *dst, struct jsegdep *jsegdep)
1104{
1105	struct jsegdep *jsegdepn;
1106	struct worklist *wk;
1107
1108	LIST_FOREACH(wk, dst, wk_list)
1109		if (wk->wk_type == D_JSEGDEP)
1110			break;
1111	if (wk == NULL) {
1112		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1113		return;
1114	}
1115	jsegdepn = WK_JSEGDEP(wk);
1116	if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1117		WORKLIST_REMOVE(wk);
1118		free_jsegdep(jsegdepn);
1119		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1120	} else
1121		free_jsegdep(jsegdep);
1122}
1123
1124/*
1125 * Routines for tracking and managing workitems.
1126 */
1127static	void workitem_free(struct worklist *, int);
1128static	void workitem_alloc(struct worklist *, int, struct mount *);
1129static	void workitem_reassign(struct worklist *, int);
1130
1131#define	WORKITEM_FREE(item, type) \
1132	workitem_free((struct worklist *)(item), (type))
1133#define	WORKITEM_REASSIGN(item, type) \
1134	workitem_reassign((struct worklist *)(item), (type))
1135
1136static void
1137workitem_free(struct worklist *item, int type)
1138{
1139	struct ufsmount *ump;
1140
1141#ifdef INVARIANTS
1142	if (item->wk_state & ONWORKLIST)
1143		panic("workitem_free: %s(0x%X) still on list, "
1144		    "added in function %s at line %d",
1145		    TYPENAME(item->wk_type), item->wk_state,
1146		    item->wk_func, item->wk_line);
1147	if (item->wk_type != type && type != D_NEWBLK)
1148		panic("workitem_free: type mismatch %s != %s",
1149		    TYPENAME(item->wk_type), TYPENAME(type));
1150#endif
1151	if (item->wk_state & IOWAITING)
1152		wakeup(item);
1153	ump = VFSTOUFS(item->wk_mp);
1154	LOCK_OWNED(ump);
1155	KASSERT(ump->softdep_deps > 0,
1156	    ("workitem_free: %s: softdep_deps going negative",
1157	    ump->um_fs->fs_fsmnt));
1158	if (--ump->softdep_deps == 0 && ump->softdep_req)
1159		wakeup(&ump->softdep_deps);
1160	KASSERT(dep_current[item->wk_type] > 0,
1161	    ("workitem_free: %s: dep_current[%s] going negative",
1162	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1163	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1164	    ("workitem_free: %s: softdep_curdeps[%s] going negative",
1165	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1166	atomic_subtract_long(&dep_current[item->wk_type], 1);
1167	ump->softdep_curdeps[item->wk_type] -= 1;
1168	LIST_REMOVE(item, wk_all);
1169	free(item, DtoM(type));
1170}
1171
1172static void
1173workitem_alloc(struct worklist *item,
1174	int type,
1175	struct mount *mp)
1176{
1177	struct ufsmount *ump;
1178
1179	item->wk_type = type;
1180	item->wk_mp = mp;
1181	item->wk_state = 0;
1182
1183	ump = VFSTOUFS(mp);
1184	ACQUIRE_GBLLOCK(&lk);
1185	dep_current[type]++;
1186	if (dep_current[type] > dep_highuse[type])
1187		dep_highuse[type] = dep_current[type];
1188	dep_total[type]++;
1189	FREE_GBLLOCK(&lk);
1190	ACQUIRE_LOCK(ump);
1191	ump->softdep_curdeps[type] += 1;
1192	ump->softdep_deps++;
1193	ump->softdep_accdeps++;
1194	LIST_INSERT_HEAD(&ump->softdep_alldeps[type], item, wk_all);
1195	FREE_LOCK(ump);
1196}
1197
1198static void
1199workitem_reassign(struct worklist *item, int newtype)
1200{
1201	struct ufsmount *ump;
1202
1203	ump = VFSTOUFS(item->wk_mp);
1204	LOCK_OWNED(ump);
1205	KASSERT(ump->softdep_curdeps[item->wk_type] > 0,
1206	    ("workitem_reassign: %s: softdep_curdeps[%s] going negative",
1207	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1208	ump->softdep_curdeps[item->wk_type] -= 1;
1209	ump->softdep_curdeps[newtype] += 1;
1210	KASSERT(dep_current[item->wk_type] > 0,
1211	    ("workitem_reassign: %s: dep_current[%s] going negative",
1212	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1213	ACQUIRE_GBLLOCK(&lk);
1214	dep_current[newtype]++;
1215	dep_current[item->wk_type]--;
1216	if (dep_current[newtype] > dep_highuse[newtype])
1217		dep_highuse[newtype] = dep_current[newtype];
1218	dep_total[newtype]++;
1219	FREE_GBLLOCK(&lk);
1220	item->wk_type = newtype;
1221	LIST_REMOVE(item, wk_all);
1222	LIST_INSERT_HEAD(&ump->softdep_alldeps[newtype], item, wk_all);
1223}
1224
1225/*
1226 * Workitem queue management
1227 */
1228static int max_softdeps;	/* maximum number of structs before slowdown */
1229static int tickdelay = 2;	/* number of ticks to pause during slowdown */
1230static int proc_waiting;	/* tracks whether we have a timeout posted */
1231static int *stat_countp;	/* statistic to count in proc_waiting timeout */
1232static struct callout softdep_callout;
1233static int req_clear_inodedeps;	/* syncer process flush some inodedeps */
1234static int req_clear_remove;	/* syncer process flush some freeblks */
1235static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1236
1237/*
1238 * runtime statistics
1239 */
1240static int stat_flush_threads;	/* number of softdep flushing threads */
1241static int stat_worklist_push;	/* number of worklist cleanups */
1242static int stat_delayed_inact;	/* number of delayed inactivation cleanups */
1243static int stat_blk_limit_push;	/* number of times block limit neared */
1244static int stat_ino_limit_push;	/* number of times inode limit neared */
1245static int stat_blk_limit_hit;	/* number of times block slowdown imposed */
1246static int stat_ino_limit_hit;	/* number of times inode slowdown imposed */
1247static int stat_sync_limit_hit;	/* number of synchronous slowdowns imposed */
1248static int stat_indir_blk_ptrs;	/* bufs redirtied as indir ptrs not written */
1249static int stat_inode_bitmap;	/* bufs redirtied as inode bitmap not written */
1250static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1251static int stat_dir_entry;	/* bufs redirtied as dir entry cannot write */
1252static int stat_jaddref;	/* bufs redirtied as ino bitmap can not write */
1253static int stat_jnewblk;	/* bufs redirtied as blk bitmap can not write */
1254static int stat_journal_min;	/* Times hit journal min threshold */
1255static int stat_journal_low;	/* Times hit journal low threshold */
1256static int stat_journal_wait;	/* Times blocked in jwait(). */
1257static int stat_jwait_filepage;	/* Times blocked in jwait() for filepage. */
1258static int stat_jwait_freeblks;	/* Times blocked in jwait() for freeblks. */
1259static int stat_jwait_inode;	/* Times blocked in jwait() for inodes. */
1260static int stat_jwait_newblk;	/* Times blocked in jwait() for newblks. */
1261static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1262static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1263static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1264static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1265static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1266static int stat_emptyjblocks; /* Number of potentially empty journal blocks */
1267
1268SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1269    &max_softdeps, 0, "");
1270SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1271    &tickdelay, 0, "");
1272SYSCTL_INT(_debug_softdep, OID_AUTO, flush_threads, CTLFLAG_RD,
1273    &stat_flush_threads, 0, "");
1274SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push,
1275    CTLFLAG_RW | CTLFLAG_STATS, &stat_worklist_push, 0,"");
1276SYSCTL_INT(_debug_softdep, OID_AUTO, delayed_inactivations, CTLFLAG_RD,
1277    &stat_delayed_inact, 0, "");
1278SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push,
1279    CTLFLAG_RW | CTLFLAG_STATS, &stat_blk_limit_push, 0,"");
1280SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push,
1281    CTLFLAG_RW | CTLFLAG_STATS, &stat_ino_limit_push, 0,"");
1282SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit,
1283    CTLFLAG_RW | CTLFLAG_STATS, &stat_blk_limit_hit, 0, "");
1284SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit,
1285    CTLFLAG_RW | CTLFLAG_STATS, &stat_ino_limit_hit, 0, "");
1286SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit,
1287    CTLFLAG_RW | CTLFLAG_STATS, &stat_sync_limit_hit, 0, "");
1288SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs,
1289    CTLFLAG_RW | CTLFLAG_STATS, &stat_indir_blk_ptrs, 0, "");
1290SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap,
1291    CTLFLAG_RW | CTLFLAG_STATS, &stat_inode_bitmap, 0, "");
1292SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs,
1293    CTLFLAG_RW | CTLFLAG_STATS, &stat_direct_blk_ptrs, 0, "");
1294SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry,
1295    CTLFLAG_RW | CTLFLAG_STATS, &stat_dir_entry, 0, "");
1296SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback,
1297    CTLFLAG_RW | CTLFLAG_STATS, &stat_jaddref, 0, "");
1298SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback,
1299    CTLFLAG_RW | CTLFLAG_STATS, &stat_jnewblk, 0, "");
1300SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low,
1301    CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_low, 0, "");
1302SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min,
1303    CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_min, 0, "");
1304SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait,
1305    CTLFLAG_RW | CTLFLAG_STATS, &stat_journal_wait, 0, "");
1306SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage,
1307    CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_filepage, 0, "");
1308SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks,
1309    CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_freeblks, 0, "");
1310SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode,
1311    CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_inode, 0, "");
1312SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk,
1313    CTLFLAG_RW | CTLFLAG_STATS, &stat_jwait_newblk, 0, "");
1314SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests,
1315    CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_blkrequests, 0, "");
1316SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests,
1317    CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_inorequests, 0, "");
1318SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay,
1319    CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_high_delay, 0, "");
1320SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries,
1321    CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_retries, 0, "");
1322SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures,
1323    CTLFLAG_RW | CTLFLAG_STATS, &stat_cleanup_failures, 0, "");
1324
1325SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1326    &softdep_flushcache, 0, "");
1327SYSCTL_INT(_debug_softdep, OID_AUTO, emptyjblocks, CTLFLAG_RD,
1328    &stat_emptyjblocks, 0, "");
1329
1330SYSCTL_DECL(_vfs_ffs);
1331
1332/* Whether to recompute the summary at mount time */
1333static int compute_summary_at_mount = 0;
1334SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1335	   &compute_summary_at_mount, 0, "Recompute summary at mount");
1336static int print_threads = 0;
1337SYSCTL_INT(_debug_softdep, OID_AUTO, print_threads, CTLFLAG_RW,
1338    &print_threads, 0, "Notify flusher thread start/stop");
1339
1340/* List of all filesystems mounted with soft updates */
1341static TAILQ_HEAD(, mount_softdeps) softdepmounts;
1342
1343static void
1344get_parent_vp_unlock_bp(struct mount *mp,
1345	struct buf *bp,
1346	struct diraddhd *diraddhdp,
1347	struct diraddhd *unfinishedp)
1348{
1349	struct diradd *dap;
1350
1351	/*
1352	 * Requeue unfinished dependencies before
1353	 * unlocking buffer, which could make
1354	 * diraddhdp invalid.
1355	 */
1356	ACQUIRE_LOCK(VFSTOUFS(mp));
1357	while ((dap = LIST_FIRST(unfinishedp)) != NULL) {
1358		LIST_REMOVE(dap, da_pdlist);
1359		LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist);
1360	}
1361	FREE_LOCK(VFSTOUFS(mp));
1362
1363	bp->b_vflags &= ~BV_SCANNED;
1364	BUF_NOREC(bp);
1365	BUF_UNLOCK(bp);
1366}
1367
1368/*
1369 * This function fetches inode inum on mount point mp.  We already
1370 * hold a locked vnode vp, and might have a locked buffer bp belonging
1371 * to vp.
1372
1373 * We must not block on acquiring the new inode lock as we will get
1374 * into a lock-order reversal with the buffer lock and possibly get a
1375 * deadlock.  Thus if we cannot instantiate the requested vnode
1376 * without sleeping on its lock, we must unlock the vnode and the
1377 * buffer before doing a blocking on the vnode lock.  We return
1378 * ERELOOKUP if we have had to unlock either the vnode or the buffer so
1379 * that the caller can reassess its state.
1380 *
1381 * Top-level VFS code (for syscalls and other consumers, e.g. callers
1382 * of VOP_FSYNC() in syncer) check for ERELOOKUP and restart at safe
1383 * point.
1384 *
1385 * Since callers expect to operate on fully constructed vnode, we also
1386 * recheck v_data after relock, and return ENOENT if NULL.
1387 *
1388 * If unlocking bp, we must unroll dequeueing its unfinished
1389 * dependencies, and clear scan flag, before unlocking.  If unlocking
1390 * vp while it is under deactivation, we re-queue deactivation.
1391 */
1392static int
1393get_parent_vp(struct vnode *vp,
1394	struct mount *mp,
1395	ino_t inum,
1396	struct buf *bp,
1397	struct diraddhd *diraddhdp,
1398	struct diraddhd *unfinishedp,
1399	struct vnode **rvp)
1400{
1401	struct vnode *pvp;
1402	int error;
1403	bool bplocked;
1404
1405	ASSERT_VOP_ELOCKED(vp, "child vnode must be locked");
1406	for (bplocked = true, pvp = NULL;;) {
1407		error = ffs_vgetf(mp, inum, LK_EXCLUSIVE | LK_NOWAIT, &pvp,
1408		    FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP);
1409		if (error == 0) {
1410			/*
1411			 * Since we could have unlocked vp, the inode
1412			 * number could no longer indicate a
1413			 * constructed node.  In this case, we must
1414			 * restart the syscall.
1415			 */
1416			if (VTOI(pvp)->i_mode == 0 || !bplocked) {
1417				if (bp != NULL && bplocked)
1418					get_parent_vp_unlock_bp(mp, bp,
1419					    diraddhdp, unfinishedp);
1420				if (VTOI(pvp)->i_mode == 0)
1421					vgone(pvp);
1422				error = ERELOOKUP;
1423				goto out2;
1424			}
1425			goto out1;
1426		}
1427		if (bp != NULL && bplocked) {
1428			get_parent_vp_unlock_bp(mp, bp, diraddhdp, unfinishedp);
1429			bplocked = false;
1430		}
1431
1432		/*
1433		 * Do not drop vnode lock while inactivating during
1434		 * vunref.  This would result in leaks of the VI flags
1435		 * and reclaiming of non-truncated vnode.  Instead,
1436		 * re-schedule inactivation hoping that we would be
1437		 * able to sync inode later.
1438		 */
1439		if ((vp->v_iflag & VI_DOINGINACT) != 0 &&
1440		    (vp->v_vflag & VV_UNREF) != 0) {
1441			VI_LOCK(vp);
1442			vp->v_iflag |= VI_OWEINACT;
1443			VI_UNLOCK(vp);
1444			return (ERELOOKUP);
1445		}
1446
1447		VOP_UNLOCK(vp);
1448		error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &pvp,
1449		    FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP);
1450		if (error != 0) {
1451			MPASS(error != ERELOOKUP);
1452			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1453			break;
1454		}
1455		if (VTOI(pvp)->i_mode == 0) {
1456			vgone(pvp);
1457			vput(pvp);
1458			pvp = NULL;
1459			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1460			error = ERELOOKUP;
1461			break;
1462		}
1463		error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
1464		if (error == 0)
1465			break;
1466		vput(pvp);
1467		pvp = NULL;
1468		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1469		if (vp->v_data == NULL) {
1470			error = ENOENT;
1471			break;
1472		}
1473	}
1474	if (bp != NULL) {
1475		MPASS(!bplocked);
1476		error = ERELOOKUP;
1477	}
1478out2:
1479	if (error != 0 && pvp != NULL) {
1480		vput(pvp);
1481		pvp = NULL;
1482	}
1483out1:
1484	*rvp = pvp;
1485	ASSERT_VOP_ELOCKED(vp, "child vnode must be locked on return");
1486	return (error);
1487}
1488
1489/*
1490 * This function cleans the worklist for a filesystem.
1491 * Each filesystem running with soft dependencies gets its own
1492 * thread to run in this function. The thread is started up in
1493 * softdep_mount and shutdown in softdep_unmount. They show up
1494 * as part of the kernel "bufdaemon" process whose process
1495 * entry is available in bufdaemonproc.
1496 */
1497static int searchfailed;
1498extern struct proc *bufdaemonproc;
1499static void
1500softdep_flush(void *addr)
1501{
1502	struct mount *mp;
1503	struct thread *td;
1504	struct ufsmount *ump;
1505	int cleanups;
1506
1507	td = curthread;
1508	td->td_pflags |= TDP_NORUNNINGBUF;
1509	mp = (struct mount *)addr;
1510	ump = VFSTOUFS(mp);
1511	atomic_add_int(&stat_flush_threads, 1);
1512	ACQUIRE_LOCK(ump);
1513	ump->softdep_flags &= ~FLUSH_STARTING;
1514	wakeup(&ump->softdep_flushtd);
1515	FREE_LOCK(ump);
1516	if (print_threads) {
1517		if (stat_flush_threads == 1)
1518			printf("Running %s at pid %d\n", bufdaemonproc->p_comm,
1519			    bufdaemonproc->p_pid);
1520		printf("Start thread %s\n", td->td_name);
1521	}
1522	for (;;) {
1523		while (softdep_process_worklist(mp, 0) > 0 ||
1524		    (MOUNTEDSUJ(mp) &&
1525		    VFSTOUFS(mp)->softdep_jblocks->jb_suspended))
1526			kthread_suspend_check();
1527		ACQUIRE_LOCK(ump);
1528		if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1529			msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM,
1530			    "sdflush", hz / 2);
1531		ump->softdep_flags &= ~FLUSH_CLEANUP;
1532		/*
1533		 * Check to see if we are done and need to exit.
1534		 */
1535		if ((ump->softdep_flags & FLUSH_EXIT) == 0) {
1536			FREE_LOCK(ump);
1537			continue;
1538		}
1539		ump->softdep_flags &= ~FLUSH_EXIT;
1540		cleanups = ump->um_softdep->sd_cleanups;
1541		FREE_LOCK(ump);
1542		wakeup(&ump->softdep_flags);
1543		if (print_threads) {
1544			printf("Stop thread %s: searchfailed %d, "
1545			    "did cleanups %d\n",
1546			    td->td_name, searchfailed, cleanups);
1547		}
1548		atomic_subtract_int(&stat_flush_threads, 1);
1549		kthread_exit();
1550		panic("kthread_exit failed\n");
1551	}
1552}
1553
1554static void
1555worklist_speedup(struct mount *mp)
1556{
1557	struct ufsmount *ump;
1558
1559	ump = VFSTOUFS(mp);
1560	LOCK_OWNED(ump);
1561	if ((ump->softdep_flags & (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1562		ump->softdep_flags |= FLUSH_CLEANUP;
1563	wakeup(&ump->softdep_flushtd);
1564}
1565
1566static void
1567softdep_send_speedup(struct ufsmount *ump,
1568	off_t shortage,
1569	uint64_t flags)
1570{
1571	struct buf *bp;
1572
1573	if ((ump->um_flags & UM_CANSPEEDUP) == 0)
1574		return;
1575
1576	bp = malloc(sizeof(*bp), M_TRIM, M_WAITOK | M_ZERO);
1577	bp->b_iocmd = BIO_SPEEDUP;
1578	bp->b_ioflags = flags;
1579	bp->b_bcount = omin(shortage, LONG_MAX);
1580	g_vfs_strategy(ump->um_bo, bp);
1581	bufwait(bp);
1582	free(bp, M_TRIM);
1583}
1584
1585static int
1586softdep_speedup(struct ufsmount *ump)
1587{
1588	struct ufsmount *altump;
1589	struct mount_softdeps *sdp;
1590
1591	LOCK_OWNED(ump);
1592	worklist_speedup(ump->um_mountp);
1593	bd_speedup();
1594	/*
1595	 * If we have global shortages, then we need other
1596	 * filesystems to help with the cleanup. Here we wakeup a
1597	 * flusher thread for a filesystem that is over its fair
1598	 * share of resources.
1599	 */
1600	if (req_clear_inodedeps || req_clear_remove) {
1601		ACQUIRE_GBLLOCK(&lk);
1602		TAILQ_FOREACH(sdp, &softdepmounts, sd_next) {
1603			if ((altump = sdp->sd_ump) == ump)
1604				continue;
1605			if (((req_clear_inodedeps &&
1606			    altump->softdep_curdeps[D_INODEDEP] >
1607			    max_softdeps / stat_flush_threads) ||
1608			    (req_clear_remove &&
1609			    altump->softdep_curdeps[D_DIRREM] >
1610			    (max_softdeps / 2) / stat_flush_threads)) &&
1611			    TRY_ACQUIRE_LOCK(altump))
1612				break;
1613		}
1614		if (sdp == NULL) {
1615			searchfailed++;
1616			FREE_GBLLOCK(&lk);
1617		} else {
1618			/*
1619			 * Move to the end of the list so we pick a
1620			 * different one on out next try.
1621			 */
1622			TAILQ_REMOVE(&softdepmounts, sdp, sd_next);
1623			TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
1624			FREE_GBLLOCK(&lk);
1625			if ((altump->softdep_flags &
1626			    (FLUSH_CLEANUP | FLUSH_EXIT)) == 0)
1627				altump->softdep_flags |= FLUSH_CLEANUP;
1628			altump->um_softdep->sd_cleanups++;
1629			wakeup(&altump->softdep_flushtd);
1630			FREE_LOCK(altump);
1631		}
1632	}
1633	return (speedup_syncer());
1634}
1635
1636/*
1637 * Add an item to the end of the work queue.
1638 * This routine requires that the lock be held.
1639 * This is the only routine that adds items to the list.
1640 * The following routine is the only one that removes items
1641 * and does so in order from first to last.
1642 */
1643
1644#define	WK_HEAD		0x0001	/* Add to HEAD. */
1645#define	WK_NODELAY	0x0002	/* Process immediately. */
1646
1647static void
1648add_to_worklist(struct worklist *wk, int flags)
1649{
1650	struct ufsmount *ump;
1651
1652	ump = VFSTOUFS(wk->wk_mp);
1653	LOCK_OWNED(ump);
1654	if (wk->wk_state & ONWORKLIST)
1655		panic("add_to_worklist: %s(0x%X) already on list",
1656		    TYPENAME(wk->wk_type), wk->wk_state);
1657	wk->wk_state |= ONWORKLIST;
1658	if (ump->softdep_on_worklist == 0) {
1659		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1660		ump->softdep_worklist_tail = wk;
1661	} else if (flags & WK_HEAD) {
1662		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1663	} else {
1664		LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1665		ump->softdep_worklist_tail = wk;
1666	}
1667	ump->softdep_on_worklist += 1;
1668	if (flags & WK_NODELAY)
1669		worklist_speedup(wk->wk_mp);
1670}
1671
1672/*
1673 * Remove the item to be processed. If we are removing the last
1674 * item on the list, we need to recalculate the tail pointer.
1675 */
1676static void
1677remove_from_worklist(struct worklist *wk)
1678{
1679	struct ufsmount *ump;
1680
1681	ump = VFSTOUFS(wk->wk_mp);
1682	if (ump->softdep_worklist_tail == wk)
1683		ump->softdep_worklist_tail =
1684		    (struct worklist *)wk->wk_list.le_prev;
1685	WORKLIST_REMOVE(wk);
1686	ump->softdep_on_worklist -= 1;
1687}
1688
1689static void
1690wake_worklist(struct worklist *wk)
1691{
1692	if (wk->wk_state & IOWAITING) {
1693		wk->wk_state &= ~IOWAITING;
1694		wakeup(wk);
1695	}
1696}
1697
1698static void
1699wait_worklist(struct worklist *wk, char *wmesg)
1700{
1701	struct ufsmount *ump;
1702
1703	ump = VFSTOUFS(wk->wk_mp);
1704	wk->wk_state |= IOWAITING;
1705	msleep(wk, LOCK_PTR(ump), PVM, wmesg, 0);
1706}
1707
1708/*
1709 * Process that runs once per second to handle items in the background queue.
1710 *
1711 * Note that we ensure that everything is done in the order in which they
1712 * appear in the queue. The code below depends on this property to ensure
1713 * that blocks of a file are freed before the inode itself is freed. This
1714 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1715 * until all the old ones have been purged from the dependency lists.
1716 */
1717static int
1718softdep_process_worklist(struct mount *mp, int full)
1719{
1720	int cnt, matchcnt;
1721	struct ufsmount *ump;
1722	long starttime;
1723
1724	KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1725	ump = VFSTOUFS(mp);
1726	if (ump->um_softdep == NULL)
1727		return (0);
1728	matchcnt = 0;
1729	ACQUIRE_LOCK(ump);
1730	starttime = time_second;
1731	softdep_process_journal(mp, NULL, full ? MNT_WAIT : 0);
1732	check_clear_deps(mp);
1733	while (ump->softdep_on_worklist > 0) {
1734		if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1735			break;
1736		else
1737			matchcnt += cnt;
1738		check_clear_deps(mp);
1739		/*
1740		 * We do not generally want to stop for buffer space, but if
1741		 * we are really being a buffer hog, we will stop and wait.
1742		 */
1743		if (should_yield()) {
1744			FREE_LOCK(ump);
1745			kern_yield(PRI_USER);
1746			bwillwrite();
1747			ACQUIRE_LOCK(ump);
1748		}
1749		/*
1750		 * Never allow processing to run for more than one
1751		 * second. This gives the syncer thread the opportunity
1752		 * to pause if appropriate.
1753		 */
1754		if (!full && starttime != time_second)
1755			break;
1756	}
1757	if (full == 0)
1758		journal_unsuspend(ump);
1759	FREE_LOCK(ump);
1760	return (matchcnt);
1761}
1762
1763/*
1764 * Process all removes associated with a vnode if we are running out of
1765 * journal space.  Any other process which attempts to flush these will
1766 * be unable as we have the vnodes locked.
1767 */
1768static void
1769process_removes(struct vnode *vp)
1770{
1771	struct inodedep *inodedep;
1772	struct dirrem *dirrem;
1773	struct ufsmount *ump;
1774	struct mount *mp;
1775	ino_t inum;
1776
1777	mp = vp->v_mount;
1778	ump = VFSTOUFS(mp);
1779	LOCK_OWNED(ump);
1780	inum = VTOI(vp)->i_number;
1781	for (;;) {
1782top:
1783		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1784			return;
1785		LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1786			/*
1787			 * If another thread is trying to lock this vnode
1788			 * it will fail but we must wait for it to do so
1789			 * before we can proceed.
1790			 */
1791			if (dirrem->dm_state & INPROGRESS) {
1792				wait_worklist(&dirrem->dm_list, "pwrwait");
1793				goto top;
1794			}
1795			if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1796			    (COMPLETE | ONWORKLIST))
1797				break;
1798		}
1799		if (dirrem == NULL)
1800			return;
1801		remove_from_worklist(&dirrem->dm_list);
1802		FREE_LOCK(ump);
1803		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1804			panic("process_removes: suspended filesystem");
1805		handle_workitem_remove(dirrem, 0);
1806		vn_finished_secondary_write(mp);
1807		ACQUIRE_LOCK(ump);
1808	}
1809}
1810
1811/*
1812 * Process all truncations associated with a vnode if we are running out
1813 * of journal space.  This is called when the vnode lock is already held
1814 * and no other process can clear the truncation.  This function returns
1815 * a value greater than zero if it did any work.
1816 */
1817static void
1818process_truncates(struct vnode *vp)
1819{
1820	struct inodedep *inodedep;
1821	struct freeblks *freeblks;
1822	struct ufsmount *ump;
1823	struct mount *mp;
1824	ino_t inum;
1825	int cgwait;
1826
1827	mp = vp->v_mount;
1828	ump = VFSTOUFS(mp);
1829	LOCK_OWNED(ump);
1830	inum = VTOI(vp)->i_number;
1831	for (;;) {
1832		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1833			return;
1834		cgwait = 0;
1835		TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1836			/* Journal entries not yet written.  */
1837			if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1838				jwait(&LIST_FIRST(
1839				    &freeblks->fb_jblkdephd)->jb_list,
1840				    MNT_WAIT);
1841				break;
1842			}
1843			/* Another thread is executing this item. */
1844			if (freeblks->fb_state & INPROGRESS) {
1845				wait_worklist(&freeblks->fb_list, "ptrwait");
1846				break;
1847			}
1848			/* Freeblks is waiting on a inode write. */
1849			if ((freeblks->fb_state & COMPLETE) == 0) {
1850				FREE_LOCK(ump);
1851				ffs_update(vp, 1);
1852				ACQUIRE_LOCK(ump);
1853				break;
1854			}
1855			if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1856			    (ALLCOMPLETE | ONWORKLIST)) {
1857				remove_from_worklist(&freeblks->fb_list);
1858				freeblks->fb_state |= INPROGRESS;
1859				FREE_LOCK(ump);
1860				if (vn_start_secondary_write(NULL, &mp,
1861				    V_NOWAIT))
1862					panic("process_truncates: "
1863					    "suspended filesystem");
1864				handle_workitem_freeblocks(freeblks, 0);
1865				vn_finished_secondary_write(mp);
1866				ACQUIRE_LOCK(ump);
1867				break;
1868			}
1869			if (freeblks->fb_cgwait)
1870				cgwait++;
1871		}
1872		if (cgwait) {
1873			FREE_LOCK(ump);
1874			sync_cgs(mp, MNT_WAIT);
1875			ffs_sync_snap(mp, MNT_WAIT);
1876			ACQUIRE_LOCK(ump);
1877			continue;
1878		}
1879		if (freeblks == NULL)
1880			break;
1881	}
1882	return;
1883}
1884
1885/*
1886 * Process one item on the worklist.
1887 */
1888static int
1889process_worklist_item(struct mount *mp,
1890	int target,
1891	int flags)
1892{
1893	struct worklist sentinel;
1894	struct worklist *wk;
1895	struct ufsmount *ump;
1896	int matchcnt;
1897	int error;
1898
1899	KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1900	/*
1901	 * If we are being called because of a process doing a
1902	 * copy-on-write, then it is not safe to write as we may
1903	 * recurse into the copy-on-write routine.
1904	 */
1905	if (curthread->td_pflags & TDP_COWINPROGRESS)
1906		return (-1);
1907	PHOLD(curproc);	/* Don't let the stack go away. */
1908	ump = VFSTOUFS(mp);
1909	LOCK_OWNED(ump);
1910	matchcnt = 0;
1911	sentinel.wk_mp = NULL;
1912	sentinel.wk_type = D_SENTINEL;
1913	LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1914	for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1915	    wk = LIST_NEXT(&sentinel, wk_list)) {
1916		if (wk->wk_type == D_SENTINEL) {
1917			LIST_REMOVE(&sentinel, wk_list);
1918			LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1919			continue;
1920		}
1921		if (wk->wk_state & INPROGRESS)
1922			panic("process_worklist_item: %p already in progress.",
1923			    wk);
1924		wk->wk_state |= INPROGRESS;
1925		remove_from_worklist(wk);
1926		FREE_LOCK(ump);
1927		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1928			panic("process_worklist_item: suspended filesystem");
1929		switch (wk->wk_type) {
1930		case D_DIRREM:
1931			/* removal of a directory entry */
1932			error = handle_workitem_remove(WK_DIRREM(wk), flags);
1933			break;
1934
1935		case D_FREEBLKS:
1936			/* releasing blocks and/or fragments from a file */
1937			error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1938			    flags);
1939			break;
1940
1941		case D_FREEFRAG:
1942			/* releasing a fragment when replaced as a file grows */
1943			handle_workitem_freefrag(WK_FREEFRAG(wk));
1944			error = 0;
1945			break;
1946
1947		case D_FREEFILE:
1948			/* releasing an inode when its link count drops to 0 */
1949			handle_workitem_freefile(WK_FREEFILE(wk));
1950			error = 0;
1951			break;
1952
1953		default:
1954			panic("%s_process_worklist: Unknown type %s",
1955			    "softdep", TYPENAME(wk->wk_type));
1956			/* NOTREACHED */
1957		}
1958		vn_finished_secondary_write(mp);
1959		ACQUIRE_LOCK(ump);
1960		if (error == 0) {
1961			if (++matchcnt == target)
1962				break;
1963			continue;
1964		}
1965		/*
1966		 * We have to retry the worklist item later.  Wake up any
1967		 * waiters who may be able to complete it immediately and
1968		 * add the item back to the head so we don't try to execute
1969		 * it again.
1970		 */
1971		wk->wk_state &= ~INPROGRESS;
1972		wake_worklist(wk);
1973		add_to_worklist(wk, WK_HEAD);
1974	}
1975	/* Sentinal could've become the tail from remove_from_worklist. */
1976	if (ump->softdep_worklist_tail == &sentinel)
1977		ump->softdep_worklist_tail =
1978		    (struct worklist *)sentinel.wk_list.le_prev;
1979	LIST_REMOVE(&sentinel, wk_list);
1980	PRELE(curproc);
1981	return (matchcnt);
1982}
1983
1984/*
1985 * Move dependencies from one buffer to another.
1986 */
1987int
1988softdep_move_dependencies(struct buf *oldbp, struct buf *newbp)
1989{
1990	struct worklist *wk, *wktail;
1991	struct ufsmount *ump;
1992	int dirty;
1993
1994	if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL)
1995		return (0);
1996	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
1997	    ("softdep_move_dependencies called on non-softdep filesystem"));
1998	dirty = 0;
1999	wktail = NULL;
2000	ump = VFSTOUFS(wk->wk_mp);
2001	ACQUIRE_LOCK(ump);
2002	while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
2003		LIST_REMOVE(wk, wk_list);
2004		if (wk->wk_type == D_BMSAFEMAP &&
2005		    bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
2006			dirty = 1;
2007		if (wktail == NULL)
2008			LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
2009		else
2010			LIST_INSERT_AFTER(wktail, wk, wk_list);
2011		wktail = wk;
2012	}
2013	FREE_LOCK(ump);
2014
2015	return (dirty);
2016}
2017
2018/*
2019 * Purge the work list of all items associated with a particular mount point.
2020 */
2021int
2022softdep_flushworklist(struct mount *oldmnt,
2023	int *countp,
2024	struct thread *td)
2025{
2026	struct vnode *devvp;
2027	struct ufsmount *ump;
2028	int count, error;
2029
2030	/*
2031	 * Alternately flush the block device associated with the mount
2032	 * point and process any dependencies that the flushing
2033	 * creates. We continue until no more worklist dependencies
2034	 * are found.
2035	 */
2036	*countp = 0;
2037	error = 0;
2038	ump = VFSTOUFS(oldmnt);
2039	devvp = ump->um_devvp;
2040	while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
2041		*countp += count;
2042		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
2043		error = VOP_FSYNC(devvp, MNT_WAIT, td);
2044		VOP_UNLOCK(devvp);
2045		if (error != 0)
2046			break;
2047	}
2048	return (error);
2049}
2050
2051#define	SU_WAITIDLE_RETRIES	20
2052static int
2053softdep_waitidle(struct mount *mp, int flags __unused)
2054{
2055	struct ufsmount *ump;
2056	struct vnode *devvp;
2057	struct thread *td;
2058	int error, i;
2059
2060	ump = VFSTOUFS(mp);
2061	KASSERT(ump->um_softdep != NULL,
2062	    ("softdep_waitidle called on non-softdep filesystem"));
2063	devvp = ump->um_devvp;
2064	td = curthread;
2065	error = 0;
2066	ACQUIRE_LOCK(ump);
2067	for (i = 0; i < SU_WAITIDLE_RETRIES && ump->softdep_deps != 0; i++) {
2068		ump->softdep_req = 1;
2069		KASSERT((flags & FORCECLOSE) == 0 ||
2070		    ump->softdep_on_worklist == 0,
2071		    ("softdep_waitidle: work added after flush"));
2072		msleep(&ump->softdep_deps, LOCK_PTR(ump), PVM | PDROP,
2073		    "softdeps", 10 * hz);
2074		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
2075		error = VOP_FSYNC(devvp, MNT_WAIT, td);
2076		VOP_UNLOCK(devvp);
2077		ACQUIRE_LOCK(ump);
2078		if (error != 0)
2079			break;
2080	}
2081	ump->softdep_req = 0;
2082	if (i == SU_WAITIDLE_RETRIES && error == 0 && ump->softdep_deps != 0) {
2083		error = EBUSY;
2084		printf("softdep_waitidle: Failed to flush worklist for %p\n",
2085		    mp);
2086	}
2087	FREE_LOCK(ump);
2088	return (error);
2089}
2090
2091/*
2092 * Flush all vnodes and worklist items associated with a specified mount point.
2093 */
2094int
2095softdep_flushfiles(struct mount *oldmnt,
2096	int flags,
2097	struct thread *td)
2098{
2099	struct ufsmount *ump __unused;
2100#ifdef QUOTA
2101	int i;
2102#endif
2103	int error, early, depcount, loopcnt, retry_flush_count, retry;
2104	int morework;
2105
2106	ump = VFSTOUFS(oldmnt);
2107	KASSERT(ump->um_softdep != NULL,
2108	    ("softdep_flushfiles called on non-softdep filesystem"));
2109	loopcnt = 10;
2110	retry_flush_count = 3;
2111retry_flush:
2112	error = 0;
2113
2114	/*
2115	 * Alternately flush the vnodes associated with the mount
2116	 * point and process any dependencies that the flushing
2117	 * creates. In theory, this loop can happen at most twice,
2118	 * but we give it a few extra just to be sure.
2119	 */
2120	for (; loopcnt > 0; loopcnt--) {
2121		/*
2122		 * Do another flush in case any vnodes were brought in
2123		 * as part of the cleanup operations.
2124		 */
2125		early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
2126		    MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
2127		if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
2128			break;
2129		if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
2130		    depcount == 0)
2131			break;
2132	}
2133	/*
2134	 * If we are unmounting then it is an error to fail. If we
2135	 * are simply trying to downgrade to read-only, then filesystem
2136	 * activity can keep us busy forever, so we just fail with EBUSY.
2137	 */
2138	if (loopcnt == 0) {
2139		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
2140			panic("softdep_flushfiles: looping");
2141		error = EBUSY;
2142	}
2143	if (!error)
2144		error = softdep_waitidle(oldmnt, flags);
2145	if (!error) {
2146		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
2147			retry = 0;
2148			MNT_ILOCK(oldmnt);
2149			morework = oldmnt->mnt_nvnodelistsize > 0;
2150#ifdef QUOTA
2151			UFS_LOCK(ump);
2152			for (i = 0; i < MAXQUOTAS; i++) {
2153				if (ump->um_quotas[i] != NULLVP)
2154					morework = 1;
2155			}
2156			UFS_UNLOCK(ump);
2157#endif
2158			if (morework) {
2159				if (--retry_flush_count > 0) {
2160					retry = 1;
2161					loopcnt = 3;
2162				} else
2163					error = EBUSY;
2164			}
2165			MNT_IUNLOCK(oldmnt);
2166			if (retry)
2167				goto retry_flush;
2168		}
2169	}
2170	return (error);
2171}
2172
2173/*
2174 * Structure hashing.
2175 *
2176 * There are four types of structures that can be looked up:
2177 *	1) pagedep structures identified by mount point, inode number,
2178 *	   and logical block.
2179 *	2) inodedep structures identified by mount point and inode number.
2180 *	3) newblk structures identified by mount point and
2181 *	   physical block number.
2182 *	4) bmsafemap structures identified by mount point and
2183 *	   cylinder group number.
2184 *
2185 * The "pagedep" and "inodedep" dependency structures are hashed
2186 * separately from the file blocks and inodes to which they correspond.
2187 * This separation helps when the in-memory copy of an inode or
2188 * file block must be replaced. It also obviates the need to access
2189 * an inode or file page when simply updating (or de-allocating)
2190 * dependency structures. Lookup of newblk structures is needed to
2191 * find newly allocated blocks when trying to associate them with
2192 * their allocdirect or allocindir structure.
2193 *
2194 * The lookup routines optionally create and hash a new instance when
2195 * an existing entry is not found. The bmsafemap lookup routine always
2196 * allocates a new structure if an existing one is not found.
2197 */
2198#define DEPALLOC	0x0001	/* allocate structure if lookup fails */
2199
2200/*
2201 * Structures and routines associated with pagedep caching.
2202 */
2203#define	PAGEDEP_HASH(ump, inum, lbn) \
2204	(&(ump)->pagedep_hashtbl[((inum) + (lbn)) & (ump)->pagedep_hash_size])
2205
2206static int
2207pagedep_find(struct pagedep_hashhead *pagedephd,
2208	ino_t ino,
2209	ufs_lbn_t lbn,
2210	struct pagedep **pagedeppp)
2211{
2212	struct pagedep *pagedep;
2213
2214	LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2215		if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn) {
2216			*pagedeppp = pagedep;
2217			return (1);
2218		}
2219	}
2220	*pagedeppp = NULL;
2221	return (0);
2222}
2223/*
2224 * Look up a pagedep. Return 1 if found, 0 otherwise.
2225 * If not found, allocate if DEPALLOC flag is passed.
2226 * Found or allocated entry is returned in pagedeppp.
2227 */
2228static int
2229pagedep_lookup(struct mount *mp,
2230	struct buf *bp,
2231	ino_t ino,
2232	ufs_lbn_t lbn,
2233	int flags,
2234	struct pagedep **pagedeppp)
2235{
2236	struct pagedep *pagedep;
2237	struct pagedep_hashhead *pagedephd;
2238	struct worklist *wk;
2239	struct ufsmount *ump;
2240	int ret;
2241	int i;
2242
2243	ump = VFSTOUFS(mp);
2244	LOCK_OWNED(ump);
2245	if (bp) {
2246		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2247			if (wk->wk_type == D_PAGEDEP) {
2248				*pagedeppp = WK_PAGEDEP(wk);
2249				return (1);
2250			}
2251		}
2252	}
2253	pagedephd = PAGEDEP_HASH(ump, ino, lbn);
2254	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2255	if (ret) {
2256		if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2257			WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2258		return (1);
2259	}
2260	if ((flags & DEPALLOC) == 0)
2261		return (0);
2262	FREE_LOCK(ump);
2263	pagedep = malloc(sizeof(struct pagedep),
2264	    M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2265	workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2266	ACQUIRE_LOCK(ump);
2267	ret = pagedep_find(pagedephd, ino, lbn, pagedeppp);
2268	if (*pagedeppp) {
2269		/*
2270		 * This should never happen since we only create pagedeps
2271		 * with the vnode lock held.  Could be an assert.
2272		 */
2273		WORKITEM_FREE(pagedep, D_PAGEDEP);
2274		return (ret);
2275	}
2276	pagedep->pd_ino = ino;
2277	pagedep->pd_lbn = lbn;
2278	LIST_INIT(&pagedep->pd_dirremhd);
2279	LIST_INIT(&pagedep->pd_pendinghd);
2280	for (i = 0; i < DAHASHSZ; i++)
2281		LIST_INIT(&pagedep->pd_diraddhd[i]);
2282	LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2283	WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2284	*pagedeppp = pagedep;
2285	return (0);
2286}
2287
2288/*
2289 * Structures and routines associated with inodedep caching.
2290 */
2291#define	INODEDEP_HASH(ump, inum) \
2292      (&(ump)->inodedep_hashtbl[(inum) & (ump)->inodedep_hash_size])
2293
2294static int
2295inodedep_find(struct inodedep_hashhead *inodedephd,
2296	ino_t inum,
2297	struct inodedep **inodedeppp)
2298{
2299	struct inodedep *inodedep;
2300
2301	LIST_FOREACH(inodedep, inodedephd, id_hash)
2302		if (inum == inodedep->id_ino)
2303			break;
2304	if (inodedep) {
2305		*inodedeppp = inodedep;
2306		return (1);
2307	}
2308	*inodedeppp = NULL;
2309
2310	return (0);
2311}
2312/*
2313 * Look up an inodedep. Return 1 if found, 0 if not found.
2314 * If not found, allocate if DEPALLOC flag is passed.
2315 * Found or allocated entry is returned in inodedeppp.
2316 */
2317static int
2318inodedep_lookup(struct mount *mp,
2319	ino_t inum,
2320	int flags,
2321	struct inodedep **inodedeppp)
2322{
2323	struct inodedep *inodedep;
2324	struct inodedep_hashhead *inodedephd;
2325	struct ufsmount *ump;
2326	struct fs *fs;
2327
2328	ump = VFSTOUFS(mp);
2329	LOCK_OWNED(ump);
2330	fs = ump->um_fs;
2331	inodedephd = INODEDEP_HASH(ump, inum);
2332
2333	if (inodedep_find(inodedephd, inum, inodedeppp))
2334		return (1);
2335	if ((flags & DEPALLOC) == 0)
2336		return (0);
2337	/*
2338	 * If the system is over its limit and our filesystem is
2339	 * responsible for more than our share of that usage and
2340	 * we are not in a rush, request some inodedep cleanup.
2341	 */
2342	if (softdep_excess_items(ump, D_INODEDEP))
2343		schedule_cleanup(mp);
2344	else
2345		FREE_LOCK(ump);
2346	inodedep = malloc(sizeof(struct inodedep),
2347		M_INODEDEP, M_SOFTDEP_FLAGS);
2348	workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2349	ACQUIRE_LOCK(ump);
2350	if (inodedep_find(inodedephd, inum, inodedeppp)) {
2351		WORKITEM_FREE(inodedep, D_INODEDEP);
2352		return (1);
2353	}
2354	inodedep->id_fs = fs;
2355	inodedep->id_ino = inum;
2356	inodedep->id_state = ALLCOMPLETE;
2357	inodedep->id_nlinkdelta = 0;
2358	inodedep->id_nlinkwrote = -1;
2359	inodedep->id_savedino1 = NULL;
2360	inodedep->id_savedsize = -1;
2361	inodedep->id_savedextsize = -1;
2362	inodedep->id_savednlink = -1;
2363	inodedep->id_bmsafemap = NULL;
2364	inodedep->id_mkdiradd = NULL;
2365	LIST_INIT(&inodedep->id_dirremhd);
2366	LIST_INIT(&inodedep->id_pendinghd);
2367	LIST_INIT(&inodedep->id_inowait);
2368	LIST_INIT(&inodedep->id_bufwait);
2369	TAILQ_INIT(&inodedep->id_inoreflst);
2370	TAILQ_INIT(&inodedep->id_inoupdt);
2371	TAILQ_INIT(&inodedep->id_newinoupdt);
2372	TAILQ_INIT(&inodedep->id_extupdt);
2373	TAILQ_INIT(&inodedep->id_newextupdt);
2374	TAILQ_INIT(&inodedep->id_freeblklst);
2375	LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2376	*inodedeppp = inodedep;
2377	return (0);
2378}
2379
2380/*
2381 * Structures and routines associated with newblk caching.
2382 */
2383#define	NEWBLK_HASH(ump, inum) \
2384	(&(ump)->newblk_hashtbl[(inum) & (ump)->newblk_hash_size])
2385
2386static int
2387newblk_find(struct newblk_hashhead *newblkhd,
2388	ufs2_daddr_t newblkno,
2389	int flags,
2390	struct newblk **newblkpp)
2391{
2392	struct newblk *newblk;
2393
2394	LIST_FOREACH(newblk, newblkhd, nb_hash) {
2395		if (newblkno != newblk->nb_newblkno)
2396			continue;
2397		/*
2398		 * If we're creating a new dependency don't match those that
2399		 * have already been converted to allocdirects.  This is for
2400		 * a frag extend.
2401		 */
2402		if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2403			continue;
2404		break;
2405	}
2406	if (newblk) {
2407		*newblkpp = newblk;
2408		return (1);
2409	}
2410	*newblkpp = NULL;
2411	return (0);
2412}
2413
2414/*
2415 * Look up a newblk. Return 1 if found, 0 if not found.
2416 * If not found, allocate if DEPALLOC flag is passed.
2417 * Found or allocated entry is returned in newblkpp.
2418 */
2419static int
2420newblk_lookup(struct mount *mp,
2421	ufs2_daddr_t newblkno,
2422	int flags,
2423	struct newblk **newblkpp)
2424{
2425	struct newblk *newblk;
2426	struct newblk_hashhead *newblkhd;
2427	struct ufsmount *ump;
2428
2429	ump = VFSTOUFS(mp);
2430	LOCK_OWNED(ump);
2431	newblkhd = NEWBLK_HASH(ump, newblkno);
2432	if (newblk_find(newblkhd, newblkno, flags, newblkpp))
2433		return (1);
2434	if ((flags & DEPALLOC) == 0)
2435		return (0);
2436	if (softdep_excess_items(ump, D_NEWBLK) ||
2437	    softdep_excess_items(ump, D_ALLOCDIRECT) ||
2438	    softdep_excess_items(ump, D_ALLOCINDIR))
2439		schedule_cleanup(mp);
2440	else
2441		FREE_LOCK(ump);
2442	newblk = malloc(sizeof(union allblk), M_NEWBLK,
2443	    M_SOFTDEP_FLAGS | M_ZERO);
2444	workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2445	ACQUIRE_LOCK(ump);
2446	if (newblk_find(newblkhd, newblkno, flags, newblkpp)) {
2447		WORKITEM_FREE(newblk, D_NEWBLK);
2448		return (1);
2449	}
2450	newblk->nb_freefrag = NULL;
2451	LIST_INIT(&newblk->nb_indirdeps);
2452	LIST_INIT(&newblk->nb_newdirblk);
2453	LIST_INIT(&newblk->nb_jwork);
2454	newblk->nb_state = ATTACHED;
2455	newblk->nb_newblkno = newblkno;
2456	LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2457	*newblkpp = newblk;
2458	return (0);
2459}
2460
2461/*
2462 * Structures and routines associated with freed indirect block caching.
2463 */
2464#define	INDIR_HASH(ump, blkno) \
2465	(&(ump)->indir_hashtbl[(blkno) & (ump)->indir_hash_size])
2466
2467/*
2468 * Lookup an indirect block in the indir hash table.  The freework is
2469 * removed and potentially freed.  The caller must do a blocking journal
2470 * write before writing to the blkno.
2471 */
2472static int
2473indirblk_lookup(struct mount *mp, ufs2_daddr_t blkno)
2474{
2475	struct freework *freework;
2476	struct indir_hashhead *wkhd;
2477	struct ufsmount *ump;
2478
2479	ump = VFSTOUFS(mp);
2480	wkhd = INDIR_HASH(ump, blkno);
2481	TAILQ_FOREACH(freework, wkhd, fw_next) {
2482		if (freework->fw_blkno != blkno)
2483			continue;
2484		indirblk_remove(freework);
2485		return (1);
2486	}
2487	return (0);
2488}
2489
2490/*
2491 * Insert an indirect block represented by freework into the indirblk
2492 * hash table so that it may prevent the block from being re-used prior
2493 * to the journal being written.
2494 */
2495static void
2496indirblk_insert(struct freework *freework)
2497{
2498	struct jblocks *jblocks;
2499	struct jseg *jseg;
2500	struct ufsmount *ump;
2501
2502	ump = VFSTOUFS(freework->fw_list.wk_mp);
2503	jblocks = ump->softdep_jblocks;
2504	jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2505	if (jseg == NULL)
2506		return;
2507
2508	LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2509	TAILQ_INSERT_HEAD(INDIR_HASH(ump, freework->fw_blkno), freework,
2510	    fw_next);
2511	freework->fw_state &= ~DEPCOMPLETE;
2512}
2513
2514static void
2515indirblk_remove(struct freework *freework)
2516{
2517	struct ufsmount *ump;
2518
2519	ump = VFSTOUFS(freework->fw_list.wk_mp);
2520	LIST_REMOVE(freework, fw_segs);
2521	TAILQ_REMOVE(INDIR_HASH(ump, freework->fw_blkno), freework, fw_next);
2522	freework->fw_state |= DEPCOMPLETE;
2523	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2524		WORKITEM_FREE(freework, D_FREEWORK);
2525}
2526
2527/*
2528 * Executed during filesystem system initialization before
2529 * mounting any filesystems.
2530 */
2531void
2532softdep_initialize(void)
2533{
2534
2535	TAILQ_INIT(&softdepmounts);
2536#ifdef __LP64__
2537	max_softdeps = desiredvnodes * 4;
2538#else
2539	max_softdeps = desiredvnodes * 2;
2540#endif
2541
2542	/* initialise bioops hack */
2543	bioops.io_start = softdep_disk_io_initiation;
2544	bioops.io_complete = softdep_disk_write_complete;
2545	bioops.io_deallocate = softdep_deallocate_dependencies;
2546	bioops.io_countdeps = softdep_count_dependencies;
2547	ast_register(TDA_UFS, ASTR_KCLEAR | ASTR_ASTF_REQUIRED, 0,
2548	    softdep_ast_cleanup_proc);
2549
2550	/* Initialize the callout with an mtx. */
2551	callout_init_mtx(&softdep_callout, &lk, 0);
2552}
2553
2554/*
2555 * Executed after all filesystems have been unmounted during
2556 * filesystem module unload.
2557 */
2558void
2559softdep_uninitialize(void)
2560{
2561
2562	/* clear bioops hack */
2563	bioops.io_start = NULL;
2564	bioops.io_complete = NULL;
2565	bioops.io_deallocate = NULL;
2566	bioops.io_countdeps = NULL;
2567	ast_deregister(TDA_UFS);
2568
2569	callout_drain(&softdep_callout);
2570}
2571
2572/*
2573 * Called at mount time to notify the dependency code that a
2574 * filesystem wishes to use it.
2575 */
2576int
2577softdep_mount(struct vnode *devvp,
2578	struct mount *mp,
2579	struct fs *fs,
2580	struct ucred *cred)
2581{
2582	struct csum_total cstotal;
2583	struct mount_softdeps *sdp;
2584	struct ufsmount *ump;
2585	struct cg *cgp;
2586	struct buf *bp;
2587	uint64_t cyl, i;
2588	int error;
2589
2590	ump = VFSTOUFS(mp);
2591
2592	sdp = malloc(sizeof(struct mount_softdeps), M_MOUNTDATA,
2593	    M_WAITOK | M_ZERO);
2594	rw_init(&sdp->sd_fslock, "SUrw");
2595	sdp->sd_ump = ump;
2596	LIST_INIT(&sdp->sd_workitem_pending);
2597	LIST_INIT(&sdp->sd_journal_pending);
2598	TAILQ_INIT(&sdp->sd_unlinked);
2599	LIST_INIT(&sdp->sd_dirtycg);
2600	sdp->sd_worklist_tail = NULL;
2601	sdp->sd_on_worklist = 0;
2602	sdp->sd_deps = 0;
2603	LIST_INIT(&sdp->sd_mkdirlisthd);
2604	sdp->sd_pdhash = hashinit(desiredvnodes / 5, M_PAGEDEP,
2605	    &sdp->sd_pdhashsize);
2606	sdp->sd_pdnextclean = 0;
2607	sdp->sd_idhash = hashinit(desiredvnodes, M_INODEDEP,
2608	    &sdp->sd_idhashsize);
2609	sdp->sd_idnextclean = 0;
2610	sdp->sd_newblkhash = hashinit(max_softdeps / 2,  M_NEWBLK,
2611	    &sdp->sd_newblkhashsize);
2612	sdp->sd_bmhash = hashinit(1024, M_BMSAFEMAP, &sdp->sd_bmhashsize);
2613	i = 1 << (ffs(desiredvnodes / 10) - 1);
2614	sdp->sd_indirhash = malloc(i * sizeof(struct indir_hashhead),
2615	    M_FREEWORK, M_WAITOK);
2616	sdp->sd_indirhashsize = i - 1;
2617	for (i = 0; i <= sdp->sd_indirhashsize; i++)
2618		TAILQ_INIT(&sdp->sd_indirhash[i]);
2619	for (i = 0; i <= D_LAST; i++)
2620		LIST_INIT(&sdp->sd_alldeps[i]);
2621	ACQUIRE_GBLLOCK(&lk);
2622	TAILQ_INSERT_TAIL(&softdepmounts, sdp, sd_next);
2623	FREE_GBLLOCK(&lk);
2624
2625	ump->um_softdep = sdp;
2626	MNT_ILOCK(mp);
2627	mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2628	if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2629		mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2630		    MNTK_SOFTDEP | MNTK_NOASYNC;
2631	}
2632	MNT_IUNLOCK(mp);
2633
2634	if ((fs->fs_flags & FS_SUJ) &&
2635	    (error = journal_mount(mp, fs, cred)) != 0) {
2636		printf("Failed to start journal: %d\n", error);
2637		softdep_unmount(mp);
2638		return (error);
2639	}
2640	/*
2641	 * Start our flushing thread in the bufdaemon process.
2642	 */
2643	ACQUIRE_LOCK(ump);
2644	ump->softdep_flags |= FLUSH_STARTING;
2645	FREE_LOCK(ump);
2646	kproc_kthread_add(&softdep_flush, mp, &bufdaemonproc,
2647	    &ump->softdep_flushtd, 0, 0, "softdepflush", "%s worker",
2648	    mp->mnt_stat.f_mntonname);
2649	ACQUIRE_LOCK(ump);
2650	while ((ump->softdep_flags & FLUSH_STARTING) != 0) {
2651		msleep(&ump->softdep_flushtd, LOCK_PTR(ump), PVM, "sdstart",
2652		    hz / 2);
2653	}
2654	FREE_LOCK(ump);
2655	/*
2656	 * When doing soft updates, the counters in the
2657	 * superblock may have gotten out of sync. Recomputation
2658	 * can take a long time and can be deferred for background
2659	 * fsck.  However, the old behavior of scanning the cylinder
2660	 * groups and recalculating them at mount time is available
2661	 * by setting vfs.ffs.compute_summary_at_mount to one.
2662	 */
2663	if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2664		return (0);
2665	bzero(&cstotal, sizeof cstotal);
2666	for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2667		if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2668		    fs->fs_cgsize, cred, &bp)) != 0) {
2669			brelse(bp);
2670			softdep_unmount(mp);
2671			return (error);
2672		}
2673		cgp = (struct cg *)bp->b_data;
2674		cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2675		cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2676		cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2677		cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2678		fs->fs_cs(fs, cyl) = cgp->cg_cs;
2679		brelse(bp);
2680	}
2681#ifdef INVARIANTS
2682	if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2683		printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2684#endif
2685	bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2686	return (0);
2687}
2688
2689void
2690softdep_unmount(struct mount *mp)
2691{
2692	struct ufsmount *ump;
2693	struct mount_softdeps *ums;
2694
2695	ump = VFSTOUFS(mp);
2696	KASSERT(ump->um_softdep != NULL,
2697	    ("softdep_unmount called on non-softdep filesystem"));
2698	MNT_ILOCK(mp);
2699	mp->mnt_flag &= ~MNT_SOFTDEP;
2700	if ((mp->mnt_flag & MNT_SUJ) == 0) {
2701		MNT_IUNLOCK(mp);
2702	} else {
2703		mp->mnt_flag &= ~MNT_SUJ;
2704		MNT_IUNLOCK(mp);
2705		journal_unmount(ump);
2706	}
2707	/*
2708	 * Shut down our flushing thread. Check for NULL is if
2709	 * softdep_mount errors out before the thread has been created.
2710	 */
2711	if (ump->softdep_flushtd != NULL) {
2712		ACQUIRE_LOCK(ump);
2713		ump->softdep_flags |= FLUSH_EXIT;
2714		wakeup(&ump->softdep_flushtd);
2715		while ((ump->softdep_flags & FLUSH_EXIT) != 0) {
2716			msleep(&ump->softdep_flags, LOCK_PTR(ump), PVM,
2717			    "sdwait", 0);
2718		}
2719		KASSERT((ump->softdep_flags & FLUSH_EXIT) == 0,
2720		    ("Thread shutdown failed"));
2721		FREE_LOCK(ump);
2722	}
2723
2724	/*
2725	 * We are no longer have softdep structure attached to ump.
2726	 */
2727	ums = ump->um_softdep;
2728	ACQUIRE_GBLLOCK(&lk);
2729	TAILQ_REMOVE(&softdepmounts, ums, sd_next);
2730	FREE_GBLLOCK(&lk);
2731	ump->um_softdep = NULL;
2732
2733	KASSERT(ums->sd_on_journal == 0,
2734	    ("ump %p ums %p on_journal %d", ump, ums, ums->sd_on_journal));
2735	KASSERT(ums->sd_on_worklist == 0,
2736	    ("ump %p ums %p on_worklist %d", ump, ums, ums->sd_on_worklist));
2737	KASSERT(ums->sd_deps == 0,
2738	    ("ump %p ums %p deps %d", ump, ums, ums->sd_deps));
2739
2740	/*
2741	 * Free up our resources.
2742	 */
2743	rw_destroy(&ums->sd_fslock);
2744	hashdestroy(ums->sd_pdhash, M_PAGEDEP, ums->sd_pdhashsize);
2745	hashdestroy(ums->sd_idhash, M_INODEDEP, ums->sd_idhashsize);
2746	hashdestroy(ums->sd_newblkhash, M_NEWBLK, ums->sd_newblkhashsize);
2747	hashdestroy(ums->sd_bmhash, M_BMSAFEMAP, ums->sd_bmhashsize);
2748	free(ums->sd_indirhash, M_FREEWORK);
2749#ifdef INVARIANTS
2750	for (int i = 0; i <= D_LAST; i++) {
2751		KASSERT(ums->sd_curdeps[i] == 0,
2752		    ("Unmount %s: Dep type %s != 0 (%jd)", ump->um_fs->fs_fsmnt,
2753		    TYPENAME(i), (intmax_t)ums->sd_curdeps[i]));
2754		KASSERT(LIST_EMPTY(&ums->sd_alldeps[i]),
2755		    ("Unmount %s: Dep type %s not empty (%p)",
2756		    ump->um_fs->fs_fsmnt,
2757		    TYPENAME(i), LIST_FIRST(&ums->sd_alldeps[i])));
2758	}
2759#endif
2760	free(ums, M_MOUNTDATA);
2761}
2762
2763static struct jblocks *
2764jblocks_create(void)
2765{
2766	struct jblocks *jblocks;
2767
2768	jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2769	TAILQ_INIT(&jblocks->jb_segs);
2770	jblocks->jb_avail = 10;
2771	jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2772	    M_JBLOCKS, M_WAITOK | M_ZERO);
2773
2774	return (jblocks);
2775}
2776
2777static ufs2_daddr_t
2778jblocks_alloc(struct jblocks *jblocks,
2779	int bytes,
2780	int *actual)
2781{
2782	ufs2_daddr_t daddr;
2783	struct jextent *jext;
2784	int freecnt;
2785	int blocks;
2786
2787	blocks = bytes / DEV_BSIZE;
2788	jext = &jblocks->jb_extent[jblocks->jb_head];
2789	freecnt = jext->je_blocks - jblocks->jb_off;
2790	if (freecnt == 0) {
2791		jblocks->jb_off = 0;
2792		if (++jblocks->jb_head > jblocks->jb_used)
2793			jblocks->jb_head = 0;
2794		jext = &jblocks->jb_extent[jblocks->jb_head];
2795		freecnt = jext->je_blocks;
2796	}
2797	if (freecnt > blocks)
2798		freecnt = blocks;
2799	*actual = freecnt * DEV_BSIZE;
2800	daddr = jext->je_daddr + jblocks->jb_off;
2801	jblocks->jb_off += freecnt;
2802	jblocks->jb_free -= freecnt;
2803
2804	return (daddr);
2805}
2806
2807static void
2808jblocks_free(struct jblocks *jblocks,
2809	struct mount *mp,
2810	int bytes)
2811{
2812
2813	LOCK_OWNED(VFSTOUFS(mp));
2814	jblocks->jb_free += bytes / DEV_BSIZE;
2815	if (jblocks->jb_suspended)
2816		worklist_speedup(mp);
2817	wakeup(jblocks);
2818}
2819
2820static void
2821jblocks_destroy(struct jblocks *jblocks)
2822{
2823
2824	if (jblocks->jb_extent)
2825		free(jblocks->jb_extent, M_JBLOCKS);
2826	free(jblocks, M_JBLOCKS);
2827}
2828
2829static void
2830jblocks_add(struct jblocks *jblocks,
2831	ufs2_daddr_t daddr,
2832	int blocks)
2833{
2834	struct jextent *jext;
2835
2836	jblocks->jb_blocks += blocks;
2837	jblocks->jb_free += blocks;
2838	jext = &jblocks->jb_extent[jblocks->jb_used];
2839	/* Adding the first block. */
2840	if (jext->je_daddr == 0) {
2841		jext->je_daddr = daddr;
2842		jext->je_blocks = blocks;
2843		return;
2844	}
2845	/* Extending the last extent. */
2846	if (jext->je_daddr + jext->je_blocks == daddr) {
2847		jext->je_blocks += blocks;
2848		return;
2849	}
2850	/* Adding a new extent. */
2851	if (++jblocks->jb_used == jblocks->jb_avail) {
2852		jblocks->jb_avail *= 2;
2853		jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2854		    M_JBLOCKS, M_WAITOK | M_ZERO);
2855		memcpy(jext, jblocks->jb_extent,
2856		    sizeof(struct jextent) * jblocks->jb_used);
2857		free(jblocks->jb_extent, M_JBLOCKS);
2858		jblocks->jb_extent = jext;
2859	}
2860	jext = &jblocks->jb_extent[jblocks->jb_used];
2861	jext->je_daddr = daddr;
2862	jext->je_blocks = blocks;
2863	return;
2864}
2865
2866int
2867softdep_journal_lookup(struct mount *mp, struct vnode **vpp)
2868{
2869	struct componentname cnp;
2870	struct vnode *dvp;
2871	ino_t sujournal;
2872	int error;
2873
2874	error = VFS_VGET(mp, UFS_ROOTINO, LK_EXCLUSIVE, &dvp);
2875	if (error)
2876		return (error);
2877	bzero(&cnp, sizeof(cnp));
2878	cnp.cn_nameiop = LOOKUP;
2879	cnp.cn_flags = ISLASTCN;
2880	cnp.cn_cred = curthread->td_ucred;
2881	cnp.cn_pnbuf = SUJ_FILE;
2882	cnp.cn_nameptr = SUJ_FILE;
2883	cnp.cn_namelen = strlen(SUJ_FILE);
2884	error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2885	vput(dvp);
2886	if (error != 0)
2887		return (error);
2888	error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2889	return (error);
2890}
2891
2892/*
2893 * Open and verify the journal file.
2894 */
2895static int
2896journal_mount(struct mount *mp,
2897	struct fs *fs,
2898	struct ucred *cred)
2899{
2900	struct jblocks *jblocks;
2901	struct ufsmount *ump;
2902	struct vnode *vp;
2903	struct inode *ip;
2904	ufs2_daddr_t blkno;
2905	int bcount;
2906	int error;
2907	int i;
2908
2909	ump = VFSTOUFS(mp);
2910	ump->softdep_journal_tail = NULL;
2911	ump->softdep_on_journal = 0;
2912	ump->softdep_accdeps = 0;
2913	ump->softdep_req = 0;
2914	ump->softdep_jblocks = NULL;
2915	error = softdep_journal_lookup(mp, &vp);
2916	if (error != 0) {
2917		printf("Failed to find journal.  Use tunefs to create one\n");
2918		return (error);
2919	}
2920	ip = VTOI(vp);
2921	if (ip->i_size < SUJ_MIN) {
2922		error = ENOSPC;
2923		goto out;
2924	}
2925	bcount = lblkno(fs, ip->i_size);	/* Only use whole blocks. */
2926	jblocks = jblocks_create();
2927	for (i = 0; i < bcount; i++) {
2928		error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2929		if (error)
2930			break;
2931		jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2932	}
2933	if (error) {
2934		jblocks_destroy(jblocks);
2935		goto out;
2936	}
2937	jblocks->jb_low = jblocks->jb_free / 3;	/* Reserve 33%. */
2938	jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2939	ump->softdep_jblocks = jblocks;
2940
2941	MNT_ILOCK(mp);
2942	mp->mnt_flag |= MNT_SUJ;
2943	MNT_IUNLOCK(mp);
2944
2945	/*
2946	 * Only validate the journal contents if the
2947	 * filesystem is clean, otherwise we write the logs
2948	 * but they'll never be used.  If the filesystem was
2949	 * still dirty when we mounted it the journal is
2950	 * invalid and a new journal can only be valid if it
2951	 * starts from a clean mount.
2952	 */
2953	if (fs->fs_clean) {
2954		DIP_SET(ip, i_modrev, fs->fs_mtime);
2955		ip->i_flags |= IN_MODIFIED;
2956		ffs_update(vp, 1);
2957	}
2958out:
2959	vput(vp);
2960	return (error);
2961}
2962
2963static void
2964journal_unmount(struct ufsmount *ump)
2965{
2966
2967	if (ump->softdep_jblocks)
2968		jblocks_destroy(ump->softdep_jblocks);
2969	ump->softdep_jblocks = NULL;
2970}
2971
2972/*
2973 * Called when a journal record is ready to be written.  Space is allocated
2974 * and the journal entry is created when the journal is flushed to stable
2975 * store.
2976 */
2977static void
2978add_to_journal(struct worklist *wk)
2979{
2980	struct ufsmount *ump;
2981
2982	ump = VFSTOUFS(wk->wk_mp);
2983	LOCK_OWNED(ump);
2984	if (wk->wk_state & ONWORKLIST)
2985		panic("add_to_journal: %s(0x%X) already on list",
2986		    TYPENAME(wk->wk_type), wk->wk_state);
2987	wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2988	if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2989		ump->softdep_jblocks->jb_age = ticks;
2990		LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2991	} else
2992		LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2993	ump->softdep_journal_tail = wk;
2994	ump->softdep_on_journal += 1;
2995}
2996
2997/*
2998 * Remove an arbitrary item for the journal worklist maintain the tail
2999 * pointer.  This happens when a new operation obviates the need to
3000 * journal an old operation.
3001 */
3002static void
3003remove_from_journal(struct worklist *wk)
3004{
3005	struct ufsmount *ump;
3006
3007	ump = VFSTOUFS(wk->wk_mp);
3008	LOCK_OWNED(ump);
3009#ifdef INVARIANTS
3010	{
3011		struct worklist *wkn;
3012
3013		LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
3014			if (wkn == wk)
3015				break;
3016		if (wkn == NULL)
3017			panic("remove_from_journal: %p is not in journal", wk);
3018	}
3019#endif
3020	/*
3021	 * We emulate a TAILQ to save space in most structures which do not
3022	 * require TAILQ semantics.  Here we must update the tail position
3023	 * when removing the tail which is not the final entry. This works
3024	 * only if the worklist linkage are at the beginning of the structure.
3025	 */
3026	if (ump->softdep_journal_tail == wk)
3027		ump->softdep_journal_tail =
3028		    (struct worklist *)wk->wk_list.le_prev;
3029	WORKLIST_REMOVE(wk);
3030	ump->softdep_on_journal -= 1;
3031}
3032
3033/*
3034 * Check for journal space as well as dependency limits so the prelink
3035 * code can throttle both journaled and non-journaled filesystems.
3036 * Threshold is 0 for low and 1 for min.
3037 */
3038static int
3039journal_space(struct ufsmount *ump, int thresh)
3040{
3041	struct jblocks *jblocks;
3042	int limit, avail;
3043
3044	jblocks = ump->softdep_jblocks;
3045	if (jblocks == NULL)
3046		return (1);
3047	/*
3048	 * We use a tighter restriction here to prevent request_cleanup()
3049	 * running in threads from running into locks we currently hold.
3050	 * We have to be over the limit and our filesystem has to be
3051	 * responsible for more than our share of that usage.
3052	 */
3053	limit = (max_softdeps / 10) * 9;
3054	if (dep_current[D_INODEDEP] > limit &&
3055	    ump->softdep_curdeps[D_INODEDEP] > limit / stat_flush_threads)
3056		return (0);
3057	if (thresh)
3058		thresh = jblocks->jb_min;
3059	else
3060		thresh = jblocks->jb_low;
3061	avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
3062	avail = jblocks->jb_free - avail;
3063
3064	return (avail > thresh);
3065}
3066
3067static void
3068journal_suspend(struct ufsmount *ump)
3069{
3070	struct jblocks *jblocks;
3071	struct mount *mp;
3072	bool set;
3073
3074	mp = UFSTOVFS(ump);
3075	if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0)
3076		return;
3077
3078	jblocks = ump->softdep_jblocks;
3079	vfs_op_enter(mp);
3080	set = false;
3081	MNT_ILOCK(mp);
3082	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
3083		stat_journal_min++;
3084		mp->mnt_kern_flag |= MNTK_SUSPEND;
3085		mp->mnt_susp_owner = ump->softdep_flushtd;
3086		set = true;
3087	}
3088	jblocks->jb_suspended = 1;
3089	MNT_IUNLOCK(mp);
3090	if (!set)
3091		vfs_op_exit(mp);
3092}
3093
3094static int
3095journal_unsuspend(struct ufsmount *ump)
3096{
3097	struct jblocks *jblocks;
3098	struct mount *mp;
3099
3100	mp = UFSTOVFS(ump);
3101	jblocks = ump->softdep_jblocks;
3102
3103	if (jblocks != NULL && jblocks->jb_suspended &&
3104	    journal_space(ump, jblocks->jb_min)) {
3105		jblocks->jb_suspended = 0;
3106		FREE_LOCK(ump);
3107		mp->mnt_susp_owner = curthread;
3108		vfs_write_resume(mp, 0);
3109		ACQUIRE_LOCK(ump);
3110		return (1);
3111	}
3112	return (0);
3113}
3114
3115static void
3116journal_check_space(struct ufsmount *ump)
3117{
3118	struct mount *mp;
3119
3120	LOCK_OWNED(ump);
3121
3122	if (journal_space(ump, 0) == 0) {
3123		softdep_speedup(ump);
3124		mp = UFSTOVFS(ump);
3125		FREE_LOCK(ump);
3126		VFS_SYNC(mp, MNT_NOWAIT);
3127		ffs_sbupdate(ump, MNT_WAIT, 0);
3128		ACQUIRE_LOCK(ump);
3129		if (journal_space(ump, 1) == 0)
3130			journal_suspend(ump);
3131	}
3132}
3133
3134/*
3135 * Called before any allocation function to be certain that there is
3136 * sufficient space in the journal prior to creating any new records.
3137 * Since in the case of block allocation we may have multiple locked
3138 * buffers at the time of the actual allocation we can not block
3139 * when the journal records are created.  Doing so would create a deadlock
3140 * if any of these buffers needed to be flushed to reclaim space.  Instead
3141 * we require a sufficiently large amount of available space such that
3142 * each thread in the system could have passed this allocation check and
3143 * still have sufficient free space.  With 20% of a minimum journal size
3144 * of 1MB we have 6553 records available.
3145 */
3146int
3147softdep_prealloc(struct vnode *vp, int waitok)
3148{
3149	struct ufsmount *ump;
3150
3151	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
3152	    ("softdep_prealloc called on non-softdep filesystem"));
3153	/*
3154	 * Nothing to do if we are not running journaled soft updates.
3155	 * If we currently hold the snapshot lock, we must avoid
3156	 * handling other resources that could cause deadlock.  Do not
3157	 * touch quotas vnode since it is typically recursed with
3158	 * other vnode locks held.
3159	 */
3160	if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)) ||
3161	    (vp->v_vflag & VV_SYSTEM) != 0)
3162		return (0);
3163	ump = VFSTOUFS(vp->v_mount);
3164	ACQUIRE_LOCK(ump);
3165	if (journal_space(ump, 0)) {
3166		FREE_LOCK(ump);
3167		return (0);
3168	}
3169	stat_journal_low++;
3170	FREE_LOCK(ump);
3171	if (waitok == MNT_NOWAIT)
3172		return (ENOSPC);
3173	/*
3174	 * Attempt to sync this vnode once to flush any journal
3175	 * work attached to it.
3176	 */
3177	if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
3178		ffs_syncvnode(vp, waitok, 0);
3179	ACQUIRE_LOCK(ump);
3180	process_removes(vp);
3181	process_truncates(vp);
3182	journal_check_space(ump);
3183	FREE_LOCK(ump);
3184
3185	return (0);
3186}
3187
3188/*
3189 * Try hard to sync all data and metadata for the vnode, and workitems
3190 * flushing which might conflict with the vnode lock.  This is a
3191 * helper for softdep_prerename().
3192 */
3193static int
3194softdep_prerename_vnode(struct ufsmount *ump, struct vnode *vp)
3195{
3196	int error;
3197
3198	ASSERT_VOP_ELOCKED(vp, "prehandle");
3199	if (vp->v_data == NULL)
3200		return (0);
3201	error = VOP_FSYNC(vp, MNT_WAIT, curthread);
3202	if (error != 0)
3203		return (error);
3204	ACQUIRE_LOCK(ump);
3205	process_removes(vp);
3206	process_truncates(vp);
3207	FREE_LOCK(ump);
3208	return (0);
3209}
3210
3211/*
3212 * Must be called from VOP_RENAME() after all vnodes are locked.
3213 * Ensures that there is enough journal space for rename.  It is
3214 * sufficiently different from softdep_prelink() by having to handle
3215 * four vnodes.
3216 */
3217int
3218softdep_prerename(struct vnode *fdvp,
3219	struct vnode *fvp,
3220	struct vnode *tdvp,
3221	struct vnode *tvp)
3222{
3223	struct ufsmount *ump;
3224	int error;
3225
3226	ump = VFSTOUFS(fdvp->v_mount);
3227
3228	if (journal_space(ump, 0))
3229		return (0);
3230
3231	VOP_UNLOCK(tdvp);
3232	VOP_UNLOCK(fvp);
3233	if (tvp != NULL && tvp != tdvp)
3234		VOP_UNLOCK(tvp);
3235
3236	error = softdep_prerename_vnode(ump, fdvp);
3237	VOP_UNLOCK(fdvp);
3238	if (error != 0)
3239		return (error);
3240
3241	VOP_LOCK(fvp, LK_EXCLUSIVE | LK_RETRY);
3242	error = softdep_prerename_vnode(ump, fvp);
3243	VOP_UNLOCK(fvp);
3244	if (error != 0)
3245		return (error);
3246
3247	if (tdvp != fdvp) {
3248		VOP_LOCK(tdvp, LK_EXCLUSIVE | LK_RETRY);
3249		error = softdep_prerename_vnode(ump, tdvp);
3250		VOP_UNLOCK(tdvp);
3251		if (error != 0)
3252			return (error);
3253	}
3254
3255	if (tvp != fvp && tvp != NULL) {
3256		VOP_LOCK(tvp, LK_EXCLUSIVE | LK_RETRY);
3257		error = softdep_prerename_vnode(ump, tvp);
3258		VOP_UNLOCK(tvp);
3259		if (error != 0)
3260			return (error);
3261	}
3262
3263	ACQUIRE_LOCK(ump);
3264	softdep_speedup(ump);
3265	process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
3266	journal_check_space(ump);
3267	FREE_LOCK(ump);
3268	return (ERELOOKUP);
3269}
3270
3271/*
3272 * Before adjusting a link count on a vnode verify that we have sufficient
3273 * journal space.  If not, process operations that depend on the currently
3274 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
3275 * and softdep flush threads can not acquire these locks to reclaim space.
3276 *
3277 * Returns 0 if all owned locks are still valid and were not dropped
3278 * in the process, in other case it returns either an error from sync,
3279 * or ERELOOKUP if any of the locks were re-acquired.  In the later
3280 * case, the state of the vnodes cannot be relied upon and our VFS
3281 * syscall must be restarted at top level from the lookup.
3282 */
3283int
3284softdep_prelink(struct vnode *dvp,
3285	struct vnode *vp,
3286	struct componentname *cnp)
3287{
3288	struct ufsmount *ump;
3289	struct nameidata *ndp;
3290
3291	ASSERT_VOP_ELOCKED(dvp, "prelink dvp");
3292	if (vp != NULL)
3293		ASSERT_VOP_ELOCKED(vp, "prelink vp");
3294	ump = VFSTOUFS(dvp->v_mount);
3295
3296	/*
3297	 * Nothing to do if we have sufficient journal space.  We skip
3298	 * flushing when vp is a snapshot to avoid deadlock where
3299	 * another thread is trying to update the inodeblock for dvp
3300	 * and is waiting on snaplk that vp holds.
3301	 */
3302	if (journal_space(ump, 0) || (vp != NULL && IS_SNAPSHOT(VTOI(vp))))
3303		return (0);
3304
3305	/*
3306	 * Check if the journal space consumption can in theory be
3307	 * accounted on dvp and vp.  If the vnodes metadata was not
3308	 * changed comparing with the previous round-trip into
3309	 * softdep_prelink(), as indicated by the seqc generation
3310	 * recorded in the nameidata, then there is no point in
3311	 * starting the sync.
3312	 */
3313	ndp = __containerof(cnp, struct nameidata, ni_cnd);
3314	if (!seqc_in_modify(ndp->ni_dvp_seqc) &&
3315	    vn_seqc_consistent(dvp, ndp->ni_dvp_seqc) &&
3316	    (vp == NULL || (!seqc_in_modify(ndp->ni_vp_seqc) &&
3317	    vn_seqc_consistent(vp, ndp->ni_vp_seqc))))
3318		return (0);
3319
3320	stat_journal_low++;
3321	if (vp != NULL) {
3322		VOP_UNLOCK(dvp);
3323		ffs_syncvnode(vp, MNT_NOWAIT, 0);
3324		vn_lock_pair(dvp, false, LK_EXCLUSIVE, vp, true, LK_EXCLUSIVE);
3325		if (dvp->v_data == NULL)
3326			goto out;
3327	}
3328	if (vp != NULL)
3329		VOP_UNLOCK(vp);
3330	ffs_syncvnode(dvp, MNT_WAIT, 0);
3331	/* Process vp before dvp as it may create .. removes. */
3332	if (vp != NULL) {
3333		VOP_UNLOCK(dvp);
3334		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3335		if (vp->v_data == NULL) {
3336			vn_lock_pair(dvp, false, LK_EXCLUSIVE, vp, true,
3337			    LK_EXCLUSIVE);
3338			goto out;
3339		}
3340		ACQUIRE_LOCK(ump);
3341		process_removes(vp);
3342		process_truncates(vp);
3343		FREE_LOCK(ump);
3344		VOP_UNLOCK(vp);
3345		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
3346		if (dvp->v_data == NULL) {
3347			vn_lock_pair(dvp, true, LK_EXCLUSIVE, vp, false,
3348			    LK_EXCLUSIVE);
3349			goto out;
3350		}
3351	}
3352
3353	ACQUIRE_LOCK(ump);
3354	process_removes(dvp);
3355	process_truncates(dvp);
3356	VOP_UNLOCK(dvp);
3357	softdep_speedup(ump);
3358
3359	process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
3360	journal_check_space(ump);
3361	FREE_LOCK(ump);
3362
3363	vn_lock_pair(dvp, false, LK_EXCLUSIVE, vp, false, LK_EXCLUSIVE);
3364out:
3365	ndp->ni_dvp_seqc = vn_seqc_read_any(dvp);
3366	if (vp != NULL)
3367		ndp->ni_vp_seqc = vn_seqc_read_any(vp);
3368	return (ERELOOKUP);
3369}
3370
3371static void
3372jseg_write(struct ufsmount *ump,
3373	struct jseg *jseg,
3374	uint8_t *data)
3375{
3376	struct jsegrec *rec;
3377
3378	rec = (struct jsegrec *)data;
3379	rec->jsr_seq = jseg->js_seq;
3380	rec->jsr_oldest = jseg->js_oldseq;
3381	rec->jsr_cnt = jseg->js_cnt;
3382	rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
3383	rec->jsr_crc = 0;
3384	rec->jsr_time = ump->um_fs->fs_mtime;
3385}
3386
3387static inline void
3388inoref_write(struct inoref *inoref,
3389	struct jseg *jseg,
3390	struct jrefrec *rec)
3391{
3392
3393	inoref->if_jsegdep->jd_seg = jseg;
3394	rec->jr_ino = inoref->if_ino;
3395	rec->jr_parent = inoref->if_parent;
3396	rec->jr_nlink = inoref->if_nlink;
3397	rec->jr_mode = inoref->if_mode;
3398	rec->jr_diroff = inoref->if_diroff;
3399}
3400
3401static void
3402jaddref_write(struct jaddref *jaddref,
3403	struct jseg *jseg,
3404	uint8_t *data)
3405{
3406	struct jrefrec *rec;
3407
3408	rec = (struct jrefrec *)data;
3409	rec->jr_op = JOP_ADDREF;
3410	inoref_write(&jaddref->ja_ref, jseg, rec);
3411}
3412
3413static void
3414jremref_write(struct jremref *jremref,
3415	struct jseg *jseg,
3416	uint8_t *data)
3417{
3418	struct jrefrec *rec;
3419
3420	rec = (struct jrefrec *)data;
3421	rec->jr_op = JOP_REMREF;
3422	inoref_write(&jremref->jr_ref, jseg, rec);
3423}
3424
3425static void
3426jmvref_write(struct jmvref *jmvref,
3427	struct jseg *jseg,
3428	uint8_t *data)
3429{
3430	struct jmvrec *rec;
3431
3432	rec = (struct jmvrec *)data;
3433	rec->jm_op = JOP_MVREF;
3434	rec->jm_ino = jmvref->jm_ino;
3435	rec->jm_parent = jmvref->jm_parent;
3436	rec->jm_oldoff = jmvref->jm_oldoff;
3437	rec->jm_newoff = jmvref->jm_newoff;
3438}
3439
3440static void
3441jnewblk_write(struct jnewblk *jnewblk,
3442	struct jseg *jseg,
3443	uint8_t *data)
3444{
3445	struct jblkrec *rec;
3446
3447	jnewblk->jn_jsegdep->jd_seg = jseg;
3448	rec = (struct jblkrec *)data;
3449	rec->jb_op = JOP_NEWBLK;
3450	rec->jb_ino = jnewblk->jn_ino;
3451	rec->jb_blkno = jnewblk->jn_blkno;
3452	rec->jb_lbn = jnewblk->jn_lbn;
3453	rec->jb_frags = jnewblk->jn_frags;
3454	rec->jb_oldfrags = jnewblk->jn_oldfrags;
3455}
3456
3457static void
3458jfreeblk_write(struct jfreeblk *jfreeblk,
3459	struct jseg *jseg,
3460	uint8_t *data)
3461{
3462	struct jblkrec *rec;
3463
3464	jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3465	rec = (struct jblkrec *)data;
3466	rec->jb_op = JOP_FREEBLK;
3467	rec->jb_ino = jfreeblk->jf_ino;
3468	rec->jb_blkno = jfreeblk->jf_blkno;
3469	rec->jb_lbn = jfreeblk->jf_lbn;
3470	rec->jb_frags = jfreeblk->jf_frags;
3471	rec->jb_oldfrags = 0;
3472}
3473
3474static void
3475jfreefrag_write(struct jfreefrag *jfreefrag,
3476	struct jseg *jseg,
3477	uint8_t *data)
3478{
3479	struct jblkrec *rec;
3480
3481	jfreefrag->fr_jsegdep->jd_seg = jseg;
3482	rec = (struct jblkrec *)data;
3483	rec->jb_op = JOP_FREEBLK;
3484	rec->jb_ino = jfreefrag->fr_ino;
3485	rec->jb_blkno = jfreefrag->fr_blkno;
3486	rec->jb_lbn = jfreefrag->fr_lbn;
3487	rec->jb_frags = jfreefrag->fr_frags;
3488	rec->jb_oldfrags = 0;
3489}
3490
3491static void
3492jtrunc_write(struct jtrunc *jtrunc,
3493	struct jseg *jseg,
3494	uint8_t *data)
3495{
3496	struct jtrncrec *rec;
3497
3498	jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3499	rec = (struct jtrncrec *)data;
3500	rec->jt_op = JOP_TRUNC;
3501	rec->jt_ino = jtrunc->jt_ino;
3502	rec->jt_size = jtrunc->jt_size;
3503	rec->jt_extsize = jtrunc->jt_extsize;
3504}
3505
3506static void
3507jfsync_write(struct jfsync *jfsync,
3508	struct jseg *jseg,
3509	uint8_t *data)
3510{
3511	struct jtrncrec *rec;
3512
3513	rec = (struct jtrncrec *)data;
3514	rec->jt_op = JOP_SYNC;
3515	rec->jt_ino = jfsync->jfs_ino;
3516	rec->jt_size = jfsync->jfs_size;
3517	rec->jt_extsize = jfsync->jfs_extsize;
3518}
3519
3520static void
3521softdep_flushjournal(struct mount *mp)
3522{
3523	struct jblocks *jblocks;
3524	struct ufsmount *ump;
3525
3526	if (MOUNTEDSUJ(mp) == 0)
3527		return;
3528	ump = VFSTOUFS(mp);
3529	jblocks = ump->softdep_jblocks;
3530	ACQUIRE_LOCK(ump);
3531	while (ump->softdep_on_journal) {
3532		jblocks->jb_needseg = 1;
3533		softdep_process_journal(mp, NULL, MNT_WAIT);
3534	}
3535	FREE_LOCK(ump);
3536}
3537
3538static void softdep_synchronize_completed(struct bio *);
3539static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3540
3541static void
3542softdep_synchronize_completed(struct bio *bp)
3543{
3544	struct jseg *oldest;
3545	struct jseg *jseg;
3546	struct ufsmount *ump;
3547
3548	/*
3549	 * caller1 marks the last segment written before we issued the
3550	 * synchronize cache.
3551	 */
3552	jseg = bp->bio_caller1;
3553	if (jseg == NULL) {
3554		g_destroy_bio(bp);
3555		return;
3556	}
3557	ump = VFSTOUFS(jseg->js_list.wk_mp);
3558	ACQUIRE_LOCK(ump);
3559	oldest = NULL;
3560	/*
3561	 * Mark all the journal entries waiting on the synchronize cache
3562	 * as completed so they may continue on.
3563	 */
3564	while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3565		jseg->js_state |= COMPLETE;
3566		oldest = jseg;
3567		jseg = TAILQ_PREV(jseg, jseglst, js_next);
3568	}
3569	/*
3570	 * Restart deferred journal entry processing from the oldest
3571	 * completed jseg.
3572	 */
3573	if (oldest)
3574		complete_jsegs(oldest);
3575
3576	FREE_LOCK(ump);
3577	g_destroy_bio(bp);
3578}
3579
3580/*
3581 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3582 * barriers.  The journal must be written prior to any blocks that depend
3583 * on it and the journal can not be released until the blocks have be
3584 * written.  This code handles both barriers simultaneously.
3585 */
3586static void
3587softdep_synchronize(struct bio *bp,
3588	struct ufsmount *ump,
3589	void *caller1)
3590{
3591
3592	bp->bio_cmd = BIO_FLUSH;
3593	bp->bio_flags |= BIO_ORDERED;
3594	bp->bio_data = NULL;
3595	bp->bio_offset = ump->um_cp->provider->mediasize;
3596	bp->bio_length = 0;
3597	bp->bio_done = softdep_synchronize_completed;
3598	bp->bio_caller1 = caller1;
3599	g_io_request(bp, ump->um_cp);
3600}
3601
3602/*
3603 * Flush some journal records to disk.
3604 */
3605static void
3606softdep_process_journal(struct mount *mp,
3607	struct worklist *needwk,
3608	int flags)
3609{
3610	struct jblocks *jblocks;
3611	struct ufsmount *ump;
3612	struct worklist *wk;
3613	struct jseg *jseg;
3614	struct buf *bp;
3615	struct bio *bio;
3616	uint8_t *data;
3617	struct fs *fs;
3618	int shouldflush;
3619	int segwritten;
3620	int jrecmin;	/* Minimum records per block. */
3621	int jrecmax;	/* Maximum records per block. */
3622	int size;
3623	int cnt;
3624	int off;
3625	int devbsize;
3626
3627	ump = VFSTOUFS(mp);
3628	if (ump->um_softdep == NULL || ump->um_softdep->sd_jblocks == NULL)
3629		return;
3630	shouldflush = softdep_flushcache;
3631	bio = NULL;
3632	jseg = NULL;
3633	LOCK_OWNED(ump);
3634	fs = ump->um_fs;
3635	jblocks = ump->softdep_jblocks;
3636	devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3637	/*
3638	 * We write anywhere between a disk block and fs block.  The upper
3639	 * bound is picked to prevent buffer cache fragmentation and limit
3640	 * processing time per I/O.
3641	 */
3642	jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3643	jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3644	segwritten = 0;
3645	for (;;) {
3646		cnt = ump->softdep_on_journal;
3647		/*
3648		 * Criteria for writing a segment:
3649		 * 1) We have a full block.
3650		 * 2) We're called from jwait() and haven't found the
3651		 *    journal item yet.
3652		 * 3) Always write if needseg is set.
3653		 * 4) If we are called from process_worklist and have
3654		 *    not yet written anything we write a partial block
3655		 *    to enforce a 1 second maximum latency on journal
3656		 *    entries.
3657		 */
3658		if (cnt < (jrecmax - 1) && needwk == NULL &&
3659		    jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3660			break;
3661		cnt++;
3662		/*
3663		 * Verify some free journal space.  softdep_prealloc() should
3664		 * guarantee that we don't run out so this is indicative of
3665		 * a problem with the flow control.  Try to recover
3666		 * gracefully in any event.
3667		 */
3668		while (jblocks->jb_free == 0) {
3669			if (flags != MNT_WAIT)
3670				break;
3671			printf("softdep: Out of journal space!\n");
3672			softdep_speedup(ump);
3673			msleep(jblocks, LOCK_PTR(ump), PRIBIO, "jblocks", hz);
3674		}
3675		FREE_LOCK(ump);
3676		jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3677		workitem_alloc(&jseg->js_list, D_JSEG, mp);
3678		LIST_INIT(&jseg->js_entries);
3679		LIST_INIT(&jseg->js_indirs);
3680		jseg->js_state = ATTACHED;
3681		if (shouldflush == 0)
3682			jseg->js_state |= COMPLETE;
3683		else if (bio == NULL)
3684			bio = g_alloc_bio();
3685		jseg->js_jblocks = jblocks;
3686		bp = geteblk(fs->fs_bsize, 0);
3687		ACQUIRE_LOCK(ump);
3688		/*
3689		 * If there was a race while we were allocating the block
3690		 * and jseg the entry we care about was likely written.
3691		 * We bail out in both the WAIT and NOWAIT case and assume
3692		 * the caller will loop if the entry it cares about is
3693		 * not written.
3694		 */
3695		cnt = ump->softdep_on_journal;
3696		if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3697			bp->b_flags |= B_INVAL | B_NOCACHE;
3698			WORKITEM_FREE(jseg, D_JSEG);
3699			FREE_LOCK(ump);
3700			brelse(bp);
3701			ACQUIRE_LOCK(ump);
3702			break;
3703		}
3704		/*
3705		 * Calculate the disk block size required for the available
3706		 * records rounded to the min size.
3707		 */
3708		if (cnt == 0)
3709			size = devbsize;
3710		else if (cnt < jrecmax)
3711			size = howmany(cnt, jrecmin) * devbsize;
3712		else
3713			size = fs->fs_bsize;
3714		/*
3715		 * Allocate a disk block for this journal data and account
3716		 * for truncation of the requested size if enough contiguous
3717		 * space was not available.
3718		 */
3719		bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3720		bp->b_lblkno = bp->b_blkno;
3721		bp->b_offset = bp->b_blkno * DEV_BSIZE;
3722		bp->b_bcount = size;
3723		bp->b_flags &= ~B_INVAL;
3724		bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3725		/*
3726		 * Initialize our jseg with cnt records.  Assign the next
3727		 * sequence number to it and link it in-order.
3728		 */
3729		cnt = MIN(cnt, (size / devbsize) * jrecmin);
3730		jseg->js_buf = bp;
3731		jseg->js_cnt = cnt;
3732		jseg->js_refs = cnt + 1;	/* Self ref. */
3733		jseg->js_size = size;
3734		jseg->js_seq = jblocks->jb_nextseq++;
3735		if (jblocks->jb_oldestseg == NULL)
3736			jblocks->jb_oldestseg = jseg;
3737		jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3738		TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3739		if (jblocks->jb_writeseg == NULL)
3740			jblocks->jb_writeseg = jseg;
3741		/*
3742		 * Start filling in records from the pending list.
3743		 */
3744		data = bp->b_data;
3745		off = 0;
3746
3747		/*
3748		 * Always put a header on the first block.
3749		 * XXX As with below, there might not be a chance to get
3750		 * into the loop.  Ensure that something valid is written.
3751		 */
3752		jseg_write(ump, jseg, data);
3753		off += JREC_SIZE;
3754		data = bp->b_data + off;
3755
3756		/*
3757		 * XXX Something is wrong here.  There's no work to do,
3758		 * but we need to perform and I/O and allow it to complete
3759		 * anyways.
3760		 */
3761		if (LIST_EMPTY(&ump->softdep_journal_pending))
3762			stat_emptyjblocks++;
3763
3764		while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3765		    != NULL) {
3766			if (cnt == 0)
3767				break;
3768			/* Place a segment header on every device block. */
3769			if ((off % devbsize) == 0) {
3770				jseg_write(ump, jseg, data);
3771				off += JREC_SIZE;
3772				data = bp->b_data + off;
3773			}
3774			if (wk == needwk)
3775				needwk = NULL;
3776			remove_from_journal(wk);
3777			wk->wk_state |= INPROGRESS;
3778			WORKLIST_INSERT(&jseg->js_entries, wk);
3779			switch (wk->wk_type) {
3780			case D_JADDREF:
3781				jaddref_write(WK_JADDREF(wk), jseg, data);
3782				break;
3783			case D_JREMREF:
3784				jremref_write(WK_JREMREF(wk), jseg, data);
3785				break;
3786			case D_JMVREF:
3787				jmvref_write(WK_JMVREF(wk), jseg, data);
3788				break;
3789			case D_JNEWBLK:
3790				jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3791				break;
3792			case D_JFREEBLK:
3793				jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3794				break;
3795			case D_JFREEFRAG:
3796				jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3797				break;
3798			case D_JTRUNC:
3799				jtrunc_write(WK_JTRUNC(wk), jseg, data);
3800				break;
3801			case D_JFSYNC:
3802				jfsync_write(WK_JFSYNC(wk), jseg, data);
3803				break;
3804			default:
3805				panic("process_journal: Unknown type %s",
3806				    TYPENAME(wk->wk_type));
3807				/* NOTREACHED */
3808			}
3809			off += JREC_SIZE;
3810			data = bp->b_data + off;
3811			cnt--;
3812		}
3813
3814		/* Clear any remaining space so we don't leak kernel data */
3815		if (size > off)
3816			bzero(data, size - off);
3817
3818		/*
3819		 * Write this one buffer and continue.
3820		 */
3821		segwritten = 1;
3822		jblocks->jb_needseg = 0;
3823		WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3824		FREE_LOCK(ump);
3825		bp->b_xflags |= BX_CVTENXIO;
3826		pbgetvp(ump->um_devvp, bp);
3827		/*
3828		 * We only do the blocking wait once we find the journal
3829		 * entry we're looking for.
3830		 */
3831		if (needwk == NULL && flags == MNT_WAIT)
3832			bwrite(bp);
3833		else
3834			bawrite(bp);
3835		ACQUIRE_LOCK(ump);
3836	}
3837	/*
3838	 * If we wrote a segment issue a synchronize cache so the journal
3839	 * is reflected on disk before the data is written.  Since reclaiming
3840	 * journal space also requires writing a journal record this
3841	 * process also enforces a barrier before reclamation.
3842	 */
3843	if (segwritten && shouldflush) {
3844		softdep_synchronize(bio, ump,
3845		    TAILQ_LAST(&jblocks->jb_segs, jseglst));
3846	} else if (bio)
3847		g_destroy_bio(bio);
3848	/*
3849	 * If we've suspended the filesystem because we ran out of journal
3850	 * space either try to sync it here to make some progress or
3851	 * unsuspend it if we already have.
3852	 */
3853	if (flags == 0 && jblocks->jb_suspended) {
3854		if (journal_unsuspend(ump))
3855			return;
3856		FREE_LOCK(ump);
3857		VFS_SYNC(mp, MNT_NOWAIT);
3858		ffs_sbupdate(ump, MNT_WAIT, 0);
3859		ACQUIRE_LOCK(ump);
3860	}
3861}
3862
3863/*
3864 * Complete a jseg, allowing all dependencies awaiting journal writes
3865 * to proceed.  Each journal dependency also attaches a jsegdep to dependent
3866 * structures so that the journal segment can be freed to reclaim space.
3867 */
3868static void
3869complete_jseg(struct jseg *jseg)
3870{
3871	struct worklist *wk;
3872	struct jmvref *jmvref;
3873#ifdef INVARIANTS
3874	int i = 0;
3875#endif
3876
3877	while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3878		WORKLIST_REMOVE(wk);
3879		wk->wk_state &= ~INPROGRESS;
3880		wk->wk_state |= COMPLETE;
3881		KASSERT(i++ < jseg->js_cnt,
3882		    ("handle_written_jseg: overflow %d >= %d",
3883		    i - 1, jseg->js_cnt));
3884		switch (wk->wk_type) {
3885		case D_JADDREF:
3886			handle_written_jaddref(WK_JADDREF(wk));
3887			break;
3888		case D_JREMREF:
3889			handle_written_jremref(WK_JREMREF(wk));
3890			break;
3891		case D_JMVREF:
3892			rele_jseg(jseg);	/* No jsegdep. */
3893			jmvref = WK_JMVREF(wk);
3894			LIST_REMOVE(jmvref, jm_deps);
3895			if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3896				free_pagedep(jmvref->jm_pagedep);
3897			WORKITEM_FREE(jmvref, D_JMVREF);
3898			break;
3899		case D_JNEWBLK:
3900			handle_written_jnewblk(WK_JNEWBLK(wk));
3901			break;
3902		case D_JFREEBLK:
3903			handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3904			break;
3905		case D_JTRUNC:
3906			handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3907			break;
3908		case D_JFSYNC:
3909			rele_jseg(jseg);	/* No jsegdep. */
3910			WORKITEM_FREE(wk, D_JFSYNC);
3911			break;
3912		case D_JFREEFRAG:
3913			handle_written_jfreefrag(WK_JFREEFRAG(wk));
3914			break;
3915		default:
3916			panic("handle_written_jseg: Unknown type %s",
3917			    TYPENAME(wk->wk_type));
3918			/* NOTREACHED */
3919		}
3920	}
3921	/* Release the self reference so the structure may be freed. */
3922	rele_jseg(jseg);
3923}
3924
3925/*
3926 * Determine which jsegs are ready for completion processing.  Waits for
3927 * synchronize cache to complete as well as forcing in-order completion
3928 * of journal entries.
3929 */
3930static void
3931complete_jsegs(struct jseg *jseg)
3932{
3933	struct jblocks *jblocks;
3934	struct jseg *jsegn;
3935
3936	jblocks = jseg->js_jblocks;
3937	/*
3938	 * Don't allow out of order completions.  If this isn't the first
3939	 * block wait for it to write before we're done.
3940	 */
3941	if (jseg != jblocks->jb_writeseg)
3942		return;
3943	/* Iterate through available jsegs processing their entries. */
3944	while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3945		jblocks->jb_oldestwrseq = jseg->js_oldseq;
3946		jsegn = TAILQ_NEXT(jseg, js_next);
3947		complete_jseg(jseg);
3948		jseg = jsegn;
3949	}
3950	jblocks->jb_writeseg = jseg;
3951	/*
3952	 * Attempt to free jsegs now that oldestwrseq may have advanced.
3953	 */
3954	free_jsegs(jblocks);
3955}
3956
3957/*
3958 * Mark a jseg as DEPCOMPLETE and throw away the buffer.  Attempt to handle
3959 * the final completions.
3960 */
3961static void
3962handle_written_jseg(struct jseg *jseg, struct buf *bp)
3963{
3964
3965	if (jseg->js_refs == 0)
3966		panic("handle_written_jseg: No self-reference on %p", jseg);
3967	jseg->js_state |= DEPCOMPLETE;
3968	/*
3969	 * We'll never need this buffer again, set flags so it will be
3970	 * discarded.
3971	 */
3972	bp->b_flags |= B_INVAL | B_NOCACHE;
3973	pbrelvp(bp);
3974	complete_jsegs(jseg);
3975}
3976
3977static inline struct jsegdep *
3978inoref_jseg(struct inoref *inoref)
3979{
3980	struct jsegdep *jsegdep;
3981
3982	jsegdep = inoref->if_jsegdep;
3983	inoref->if_jsegdep = NULL;
3984
3985	return (jsegdep);
3986}
3987
3988/*
3989 * Called once a jremref has made it to stable store.  The jremref is marked
3990 * complete and we attempt to free it.  Any pagedeps writes sleeping waiting
3991 * for the jremref to complete will be awoken by free_jremref.
3992 */
3993static void
3994handle_written_jremref(struct jremref *jremref)
3995{
3996	struct inodedep *inodedep;
3997	struct jsegdep *jsegdep;
3998	struct dirrem *dirrem;
3999
4000	/* Grab the jsegdep. */
4001	jsegdep = inoref_jseg(&jremref->jr_ref);
4002	/*
4003	 * Remove us from the inoref list.
4004	 */
4005	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
4006	    0, &inodedep) == 0)
4007		panic("handle_written_jremref: Lost inodedep");
4008	TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
4009	/*
4010	 * Complete the dirrem.
4011	 */
4012	dirrem = jremref->jr_dirrem;
4013	jremref->jr_dirrem = NULL;
4014	LIST_REMOVE(jremref, jr_deps);
4015	jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
4016	jwork_insert(&dirrem->dm_jwork, jsegdep);
4017	if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
4018	    (dirrem->dm_state & COMPLETE) != 0)
4019		add_to_worklist(&dirrem->dm_list, 0);
4020	free_jremref(jremref);
4021}
4022
4023/*
4024 * Called once a jaddref has made it to stable store.  The dependency is
4025 * marked complete and any dependent structures are added to the inode
4026 * bufwait list to be completed as soon as it is written.  If a bitmap write
4027 * depends on this entry we move the inode into the inodedephd of the
4028 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
4029 */
4030static void
4031handle_written_jaddref(struct jaddref *jaddref)
4032{
4033	struct jsegdep *jsegdep;
4034	struct inodedep *inodedep;
4035	struct diradd *diradd;
4036	struct mkdir *mkdir;
4037
4038	/* Grab the jsegdep. */
4039	jsegdep = inoref_jseg(&jaddref->ja_ref);
4040	mkdir = NULL;
4041	diradd = NULL;
4042	if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4043	    0, &inodedep) == 0)
4044		panic("handle_written_jaddref: Lost inodedep.");
4045	if (jaddref->ja_diradd == NULL)
4046		panic("handle_written_jaddref: No dependency");
4047	if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
4048		diradd = jaddref->ja_diradd;
4049		WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
4050	} else if (jaddref->ja_state & MKDIR_PARENT) {
4051		mkdir = jaddref->ja_mkdir;
4052		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
4053	} else if (jaddref->ja_state & MKDIR_BODY)
4054		mkdir = jaddref->ja_mkdir;
4055	else
4056		panic("handle_written_jaddref: Unknown dependency %p",
4057		    jaddref->ja_diradd);
4058	jaddref->ja_diradd = NULL;	/* also clears ja_mkdir */
4059	/*
4060	 * Remove us from the inode list.
4061	 */
4062	TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
4063	/*
4064	 * The mkdir may be waiting on the jaddref to clear before freeing.
4065	 */
4066	if (mkdir) {
4067		KASSERT(mkdir->md_list.wk_type == D_MKDIR,
4068		    ("handle_written_jaddref: Incorrect type for mkdir %s",
4069		    TYPENAME(mkdir->md_list.wk_type)));
4070		mkdir->md_jaddref = NULL;
4071		diradd = mkdir->md_diradd;
4072		mkdir->md_state |= DEPCOMPLETE;
4073		complete_mkdir(mkdir);
4074	}
4075	jwork_insert(&diradd->da_jwork, jsegdep);
4076	if (jaddref->ja_state & NEWBLOCK) {
4077		inodedep->id_state |= ONDEPLIST;
4078		LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
4079		    inodedep, id_deps);
4080	}
4081	free_jaddref(jaddref);
4082}
4083
4084/*
4085 * Called once a jnewblk journal is written.  The allocdirect or allocindir
4086 * is placed in the bmsafemap to await notification of a written bitmap.  If
4087 * the operation was canceled we add the segdep to the appropriate
4088 * dependency to free the journal space once the canceling operation
4089 * completes.
4090 */
4091static void
4092handle_written_jnewblk(struct jnewblk *jnewblk)
4093{
4094	struct bmsafemap *bmsafemap;
4095	struct freefrag *freefrag;
4096	struct freework *freework;
4097	struct jsegdep *jsegdep;
4098	struct newblk *newblk;
4099
4100	/* Grab the jsegdep. */
4101	jsegdep = jnewblk->jn_jsegdep;
4102	jnewblk->jn_jsegdep = NULL;
4103	if (jnewblk->jn_dep == NULL)
4104		panic("handle_written_jnewblk: No dependency for the segdep.");
4105	switch (jnewblk->jn_dep->wk_type) {
4106	case D_NEWBLK:
4107	case D_ALLOCDIRECT:
4108	case D_ALLOCINDIR:
4109		/*
4110		 * Add the written block to the bmsafemap so it can
4111		 * be notified when the bitmap is on disk.
4112		 */
4113		newblk = WK_NEWBLK(jnewblk->jn_dep);
4114		newblk->nb_jnewblk = NULL;
4115		if ((newblk->nb_state & GOINGAWAY) == 0) {
4116			bmsafemap = newblk->nb_bmsafemap;
4117			newblk->nb_state |= ONDEPLIST;
4118			LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
4119			    nb_deps);
4120		}
4121		jwork_insert(&newblk->nb_jwork, jsegdep);
4122		break;
4123	case D_FREEFRAG:
4124		/*
4125		 * A newblock being removed by a freefrag when replaced by
4126		 * frag extension.
4127		 */
4128		freefrag = WK_FREEFRAG(jnewblk->jn_dep);
4129		freefrag->ff_jdep = NULL;
4130		jwork_insert(&freefrag->ff_jwork, jsegdep);
4131		break;
4132	case D_FREEWORK:
4133		/*
4134		 * A direct block was removed by truncate.
4135		 */
4136		freework = WK_FREEWORK(jnewblk->jn_dep);
4137		freework->fw_jnewblk = NULL;
4138		jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
4139		break;
4140	default:
4141		panic("handle_written_jnewblk: Unknown type %d.",
4142		    jnewblk->jn_dep->wk_type);
4143	}
4144	jnewblk->jn_dep = NULL;
4145	free_jnewblk(jnewblk);
4146}
4147
4148/*
4149 * Cancel a jfreefrag that won't be needed, probably due to colliding with
4150 * an in-flight allocation that has not yet been committed.  Divorce us
4151 * from the freefrag and mark it DEPCOMPLETE so that it may be added
4152 * to the worklist.
4153 */
4154static void
4155cancel_jfreefrag(struct jfreefrag *jfreefrag)
4156{
4157	struct freefrag *freefrag;
4158
4159	if (jfreefrag->fr_jsegdep) {
4160		free_jsegdep(jfreefrag->fr_jsegdep);
4161		jfreefrag->fr_jsegdep = NULL;
4162	}
4163	freefrag = jfreefrag->fr_freefrag;
4164	jfreefrag->fr_freefrag = NULL;
4165	free_jfreefrag(jfreefrag);
4166	freefrag->ff_state |= DEPCOMPLETE;
4167	CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
4168}
4169
4170/*
4171 * Free a jfreefrag when the parent freefrag is rendered obsolete.
4172 */
4173static void
4174free_jfreefrag(struct jfreefrag *jfreefrag)
4175{
4176
4177	if (jfreefrag->fr_state & INPROGRESS)
4178		WORKLIST_REMOVE(&jfreefrag->fr_list);
4179	else if (jfreefrag->fr_state & ONWORKLIST)
4180		remove_from_journal(&jfreefrag->fr_list);
4181	if (jfreefrag->fr_freefrag != NULL)
4182		panic("free_jfreefrag:  Still attached to a freefrag.");
4183	WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
4184}
4185
4186/*
4187 * Called when the journal write for a jfreefrag completes.  The parent
4188 * freefrag is added to the worklist if this completes its dependencies.
4189 */
4190static void
4191handle_written_jfreefrag(struct jfreefrag *jfreefrag)
4192{
4193	struct jsegdep *jsegdep;
4194	struct freefrag *freefrag;
4195
4196	/* Grab the jsegdep. */
4197	jsegdep = jfreefrag->fr_jsegdep;
4198	jfreefrag->fr_jsegdep = NULL;
4199	freefrag = jfreefrag->fr_freefrag;
4200	if (freefrag == NULL)
4201		panic("handle_written_jfreefrag: No freefrag.");
4202	freefrag->ff_state |= DEPCOMPLETE;
4203	freefrag->ff_jdep = NULL;
4204	jwork_insert(&freefrag->ff_jwork, jsegdep);
4205	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
4206		add_to_worklist(&freefrag->ff_list, 0);
4207	jfreefrag->fr_freefrag = NULL;
4208	free_jfreefrag(jfreefrag);
4209}
4210
4211/*
4212 * Called when the journal write for a jfreeblk completes.  The jfreeblk
4213 * is removed from the freeblks list of pending journal writes and the
4214 * jsegdep is moved to the freeblks jwork to be completed when all blocks
4215 * have been reclaimed.
4216 */
4217static void
4218handle_written_jblkdep(struct jblkdep *jblkdep)
4219{
4220	struct freeblks *freeblks;
4221	struct jsegdep *jsegdep;
4222
4223	/* Grab the jsegdep. */
4224	jsegdep = jblkdep->jb_jsegdep;
4225	jblkdep->jb_jsegdep = NULL;
4226	freeblks = jblkdep->jb_freeblks;
4227	LIST_REMOVE(jblkdep, jb_deps);
4228	jwork_insert(&freeblks->fb_jwork, jsegdep);
4229	/*
4230	 * If the freeblks is all journaled, we can add it to the worklist.
4231	 */
4232	if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
4233	    (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
4234		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
4235
4236	free_jblkdep(jblkdep);
4237}
4238
4239static struct jsegdep *
4240newjsegdep(struct worklist *wk)
4241{
4242	struct jsegdep *jsegdep;
4243
4244	jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
4245	workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
4246	jsegdep->jd_seg = NULL;
4247
4248	return (jsegdep);
4249}
4250
4251static struct jmvref *
4252newjmvref(struct inode *dp,
4253	ino_t ino,
4254	off_t oldoff,
4255	off_t newoff)
4256{
4257	struct jmvref *jmvref;
4258
4259	jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
4260	workitem_alloc(&jmvref->jm_list, D_JMVREF, ITOVFS(dp));
4261	jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
4262	jmvref->jm_parent = dp->i_number;
4263	jmvref->jm_ino = ino;
4264	jmvref->jm_oldoff = oldoff;
4265	jmvref->jm_newoff = newoff;
4266
4267	return (jmvref);
4268}
4269
4270/*
4271 * Allocate a new jremref that tracks the removal of ip from dp with the
4272 * directory entry offset of diroff.  Mark the entry as ATTACHED and
4273 * DEPCOMPLETE as we have all the information required for the journal write
4274 * and the directory has already been removed from the buffer.  The caller
4275 * is responsible for linking the jremref into the pagedep and adding it
4276 * to the journal to write.  The MKDIR_PARENT flag is set if we're doing
4277 * a DOTDOT addition so handle_workitem_remove() can properly assign
4278 * the jsegdep when we're done.
4279 */
4280static struct jremref *
4281newjremref(struct dirrem *dirrem,
4282	struct inode *dp,
4283	struct inode *ip,
4284	off_t diroff,
4285	nlink_t nlink)
4286{
4287	struct jremref *jremref;
4288
4289	jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
4290	workitem_alloc(&jremref->jr_list, D_JREMREF, ITOVFS(dp));
4291	jremref->jr_state = ATTACHED;
4292	newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
4293	   nlink, ip->i_mode);
4294	jremref->jr_dirrem = dirrem;
4295
4296	return (jremref);
4297}
4298
4299static inline void
4300newinoref(struct inoref *inoref,
4301	ino_t ino,
4302	ino_t parent,
4303	off_t diroff,
4304	nlink_t nlink,
4305	uint16_t mode)
4306{
4307
4308	inoref->if_jsegdep = newjsegdep(&inoref->if_list);
4309	inoref->if_diroff = diroff;
4310	inoref->if_ino = ino;
4311	inoref->if_parent = parent;
4312	inoref->if_nlink = nlink;
4313	inoref->if_mode = mode;
4314}
4315
4316/*
4317 * Allocate a new jaddref to track the addition of ino to dp at diroff.  The
4318 * directory offset may not be known until later.  The caller is responsible
4319 * adding the entry to the journal when this information is available.  nlink
4320 * should be the link count prior to the addition and mode is only required
4321 * to have the correct FMT.
4322 */
4323static struct jaddref *
4324newjaddref(struct inode *dp,
4325	ino_t ino,
4326	off_t diroff,
4327	int16_t nlink,
4328	uint16_t mode)
4329{
4330	struct jaddref *jaddref;
4331
4332	jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
4333	workitem_alloc(&jaddref->ja_list, D_JADDREF, ITOVFS(dp));
4334	jaddref->ja_state = ATTACHED;
4335	jaddref->ja_mkdir = NULL;
4336	newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
4337
4338	return (jaddref);
4339}
4340
4341/*
4342 * Create a new free dependency for a freework.  The caller is responsible
4343 * for adjusting the reference count when it has the lock held.  The freedep
4344 * will track an outstanding bitmap write that will ultimately clear the
4345 * freework to continue.
4346 */
4347static struct freedep *
4348newfreedep(struct freework *freework)
4349{
4350	struct freedep *freedep;
4351
4352	freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
4353	workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
4354	freedep->fd_freework = freework;
4355
4356	return (freedep);
4357}
4358
4359/*
4360 * Free a freedep structure once the buffer it is linked to is written.  If
4361 * this is the last reference to the freework schedule it for completion.
4362 */
4363static void
4364free_freedep(struct freedep *freedep)
4365{
4366	struct freework *freework;
4367
4368	freework = freedep->fd_freework;
4369	freework->fw_freeblks->fb_cgwait--;
4370	if (--freework->fw_ref == 0)
4371		freework_enqueue(freework);
4372	WORKITEM_FREE(freedep, D_FREEDEP);
4373}
4374
4375/*
4376 * Allocate a new freework structure that may be a level in an indirect
4377 * when parent is not NULL or a top level block when it is.  The top level
4378 * freework structures are allocated without the per-filesystem lock held
4379 * and before the freeblks is visible outside of softdep_setup_freeblocks().
4380 */
4381static struct freework *
4382newfreework(struct ufsmount *ump,
4383	struct freeblks *freeblks,
4384	struct freework *parent,
4385	ufs_lbn_t lbn,
4386	ufs2_daddr_t nb,
4387	int frags,
4388	int off,
4389	int journal)
4390{
4391	struct freework *freework;
4392
4393	freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
4394	workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
4395	freework->fw_state = ATTACHED;
4396	freework->fw_jnewblk = NULL;
4397	freework->fw_freeblks = freeblks;
4398	freework->fw_parent = parent;
4399	freework->fw_lbn = lbn;
4400	freework->fw_blkno = nb;
4401	freework->fw_frags = frags;
4402	freework->fw_indir = NULL;
4403	freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 ||
4404	    lbn >= -UFS_NXADDR) ? 0 : NINDIR(ump->um_fs) + 1;
4405	freework->fw_start = freework->fw_off = off;
4406	if (journal)
4407		newjfreeblk(freeblks, lbn, nb, frags);
4408	if (parent == NULL) {
4409		ACQUIRE_LOCK(ump);
4410		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
4411		freeblks->fb_ref++;
4412		FREE_LOCK(ump);
4413	}
4414
4415	return (freework);
4416}
4417
4418/*
4419 * Eliminate a jfreeblk for a block that does not need journaling.
4420 */
4421static void
4422cancel_jfreeblk(struct freeblks *freeblks, ufs2_daddr_t blkno)
4423{
4424	struct jfreeblk *jfreeblk;
4425	struct jblkdep *jblkdep;
4426
4427	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
4428		if (jblkdep->jb_list.wk_type != D_JFREEBLK)
4429			continue;
4430		jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
4431		if (jfreeblk->jf_blkno == blkno)
4432			break;
4433	}
4434	if (jblkdep == NULL)
4435		return;
4436	CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4437	free_jsegdep(jblkdep->jb_jsegdep);
4438	LIST_REMOVE(jblkdep, jb_deps);
4439	WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4440}
4441
4442/*
4443 * Allocate a new jfreeblk to journal top level block pointer when truncating
4444 * a file.  The caller must add this to the worklist when the per-filesystem
4445 * lock is held.
4446 */
4447static struct jfreeblk *
4448newjfreeblk(struct freeblks *freeblks,
4449	ufs_lbn_t lbn,
4450	ufs2_daddr_t blkno,
4451	int frags)
4452{
4453	struct jfreeblk *jfreeblk;
4454
4455	jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4456	workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4457	    freeblks->fb_list.wk_mp);
4458	jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4459	jfreeblk->jf_dep.jb_freeblks = freeblks;
4460	jfreeblk->jf_ino = freeblks->fb_inum;
4461	jfreeblk->jf_lbn = lbn;
4462	jfreeblk->jf_blkno = blkno;
4463	jfreeblk->jf_frags = frags;
4464	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4465
4466	return (jfreeblk);
4467}
4468
4469/*
4470 * The journal is only prepared to handle full-size block numbers, so we
4471 * have to adjust the record to reflect the change to a full-size block.
4472 * For example, suppose we have a block made up of fragments 8-15 and
4473 * want to free its last two fragments. We are given a request that says:
4474 *     FREEBLK ino=5, blkno=14, lbn=0, frags=2, oldfrags=0
4475 * where frags are the number of fragments to free and oldfrags are the
4476 * number of fragments to keep. To block align it, we have to change it to
4477 * have a valid full-size blkno, so it becomes:
4478 *     FREEBLK ino=5, blkno=8, lbn=0, frags=2, oldfrags=6
4479 */
4480static void
4481adjust_newfreework(struct freeblks *freeblks, int frag_offset)
4482{
4483	struct jfreeblk *jfreeblk;
4484
4485	KASSERT((LIST_FIRST(&freeblks->fb_jblkdephd) != NULL &&
4486	    LIST_FIRST(&freeblks->fb_jblkdephd)->jb_list.wk_type == D_JFREEBLK),
4487	    ("adjust_newfreework: Missing freeblks dependency"));
4488
4489	jfreeblk = WK_JFREEBLK(LIST_FIRST(&freeblks->fb_jblkdephd));
4490	jfreeblk->jf_blkno -= frag_offset;
4491	jfreeblk->jf_frags += frag_offset;
4492}
4493
4494/*
4495 * Allocate a new jtrunc to track a partial truncation.
4496 */
4497static struct jtrunc *
4498newjtrunc(struct freeblks *freeblks,
4499	off_t size,
4500	int extsize)
4501{
4502	struct jtrunc *jtrunc;
4503
4504	jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4505	workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4506	    freeblks->fb_list.wk_mp);
4507	jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4508	jtrunc->jt_dep.jb_freeblks = freeblks;
4509	jtrunc->jt_ino = freeblks->fb_inum;
4510	jtrunc->jt_size = size;
4511	jtrunc->jt_extsize = extsize;
4512	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4513
4514	return (jtrunc);
4515}
4516
4517/*
4518 * If we're canceling a new bitmap we have to search for another ref
4519 * to move into the bmsafemap dep.  This might be better expressed
4520 * with another structure.
4521 */
4522static void
4523move_newblock_dep(struct jaddref *jaddref, struct inodedep *inodedep)
4524{
4525	struct inoref *inoref;
4526	struct jaddref *jaddrefn;
4527
4528	jaddrefn = NULL;
4529	for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4530	    inoref = TAILQ_NEXT(inoref, if_deps)) {
4531		if ((jaddref->ja_state & NEWBLOCK) &&
4532		    inoref->if_list.wk_type == D_JADDREF) {
4533			jaddrefn = (struct jaddref *)inoref;
4534			break;
4535		}
4536	}
4537	if (jaddrefn == NULL)
4538		return;
4539	jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4540	jaddrefn->ja_state |= jaddref->ja_state &
4541	    (ATTACHED | UNDONE | NEWBLOCK);
4542	jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4543	jaddref->ja_state |= ATTACHED;
4544	LIST_REMOVE(jaddref, ja_bmdeps);
4545	LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4546	    ja_bmdeps);
4547}
4548
4549/*
4550 * Cancel a jaddref either before it has been written or while it is being
4551 * written.  This happens when a link is removed before the add reaches
4552 * the disk.  The jaddref dependency is kept linked into the bmsafemap
4553 * and inode to prevent the link count or bitmap from reaching the disk
4554 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4555 * required.
4556 *
4557 * Returns 1 if the canceled addref requires journaling of the remove and
4558 * 0 otherwise.
4559 */
4560static int
4561cancel_jaddref(struct jaddref *jaddref,
4562	struct inodedep *inodedep,
4563	struct workhead *wkhd)
4564{
4565	struct inoref *inoref;
4566	struct jsegdep *jsegdep;
4567	int needsj;
4568
4569	KASSERT((jaddref->ja_state & COMPLETE) == 0,
4570	    ("cancel_jaddref: Canceling complete jaddref"));
4571	if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4572		needsj = 1;
4573	else
4574		needsj = 0;
4575	if (inodedep == NULL)
4576		if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4577		    0, &inodedep) == 0)
4578			panic("cancel_jaddref: Lost inodedep");
4579	/*
4580	 * We must adjust the nlink of any reference operation that follows
4581	 * us so that it is consistent with the in-memory reference.  This
4582	 * ensures that inode nlink rollbacks always have the correct link.
4583	 */
4584	if (needsj == 0) {
4585		for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4586		    inoref = TAILQ_NEXT(inoref, if_deps)) {
4587			if (inoref->if_state & GOINGAWAY)
4588				break;
4589			inoref->if_nlink--;
4590		}
4591	}
4592	jsegdep = inoref_jseg(&jaddref->ja_ref);
4593	if (jaddref->ja_state & NEWBLOCK)
4594		move_newblock_dep(jaddref, inodedep);
4595	wake_worklist(&jaddref->ja_list);
4596	jaddref->ja_mkdir = NULL;
4597	if (jaddref->ja_state & INPROGRESS) {
4598		jaddref->ja_state &= ~INPROGRESS;
4599		WORKLIST_REMOVE(&jaddref->ja_list);
4600		jwork_insert(wkhd, jsegdep);
4601	} else {
4602		free_jsegdep(jsegdep);
4603		if (jaddref->ja_state & DEPCOMPLETE)
4604			remove_from_journal(&jaddref->ja_list);
4605	}
4606	jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4607	/*
4608	 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4609	 * can arrange for them to be freed with the bitmap.  Otherwise we
4610	 * no longer need this addref attached to the inoreflst and it
4611	 * will incorrectly adjust nlink if we leave it.
4612	 */
4613	if ((jaddref->ja_state & NEWBLOCK) == 0) {
4614		TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4615		    if_deps);
4616		jaddref->ja_state |= COMPLETE;
4617		free_jaddref(jaddref);
4618		return (needsj);
4619	}
4620	/*
4621	 * Leave the head of the list for jsegdeps for fast merging.
4622	 */
4623	if (LIST_FIRST(wkhd) != NULL) {
4624		jaddref->ja_state |= ONWORKLIST;
4625		LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4626	} else
4627		WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4628
4629	return (needsj);
4630}
4631
4632/*
4633 * Attempt to free a jaddref structure when some work completes.  This
4634 * should only succeed once the entry is written and all dependencies have
4635 * been notified.
4636 */
4637static void
4638free_jaddref(struct jaddref *jaddref)
4639{
4640
4641	if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4642		return;
4643	if (jaddref->ja_ref.if_jsegdep)
4644		panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4645		    jaddref, jaddref->ja_state);
4646	if (jaddref->ja_state & NEWBLOCK)
4647		LIST_REMOVE(jaddref, ja_bmdeps);
4648	if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4649		panic("free_jaddref: Bad state %p(0x%X)",
4650		    jaddref, jaddref->ja_state);
4651	if (jaddref->ja_mkdir != NULL)
4652		panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4653	WORKITEM_FREE(jaddref, D_JADDREF);
4654}
4655
4656/*
4657 * Free a jremref structure once it has been written or discarded.
4658 */
4659static void
4660free_jremref(struct jremref *jremref)
4661{
4662
4663	if (jremref->jr_ref.if_jsegdep)
4664		free_jsegdep(jremref->jr_ref.if_jsegdep);
4665	if (jremref->jr_state & INPROGRESS)
4666		panic("free_jremref: IO still pending");
4667	WORKITEM_FREE(jremref, D_JREMREF);
4668}
4669
4670/*
4671 * Free a jnewblk structure.
4672 */
4673static void
4674free_jnewblk(struct jnewblk *jnewblk)
4675{
4676
4677	if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4678		return;
4679	LIST_REMOVE(jnewblk, jn_deps);
4680	if (jnewblk->jn_dep != NULL)
4681		panic("free_jnewblk: Dependency still attached.");
4682	WORKITEM_FREE(jnewblk, D_JNEWBLK);
4683}
4684
4685/*
4686 * Cancel a jnewblk which has been been made redundant by frag extension.
4687 */
4688static void
4689cancel_jnewblk(struct jnewblk *jnewblk, struct workhead *wkhd)
4690{
4691	struct jsegdep *jsegdep;
4692
4693	CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4694	jsegdep = jnewblk->jn_jsegdep;
4695	if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4696		panic("cancel_jnewblk: Invalid state");
4697	jnewblk->jn_jsegdep  = NULL;
4698	jnewblk->jn_dep = NULL;
4699	jnewblk->jn_state |= GOINGAWAY;
4700	if (jnewblk->jn_state & INPROGRESS) {
4701		jnewblk->jn_state &= ~INPROGRESS;
4702		WORKLIST_REMOVE(&jnewblk->jn_list);
4703		jwork_insert(wkhd, jsegdep);
4704	} else {
4705		free_jsegdep(jsegdep);
4706		remove_from_journal(&jnewblk->jn_list);
4707	}
4708	wake_worklist(&jnewblk->jn_list);
4709	WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4710}
4711
4712static void
4713free_jblkdep(struct jblkdep *jblkdep)
4714{
4715
4716	if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4717		WORKITEM_FREE(jblkdep, D_JFREEBLK);
4718	else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4719		WORKITEM_FREE(jblkdep, D_JTRUNC);
4720	else
4721		panic("free_jblkdep: Unexpected type %s",
4722		    TYPENAME(jblkdep->jb_list.wk_type));
4723}
4724
4725/*
4726 * Free a single jseg once it is no longer referenced in memory or on
4727 * disk.  Reclaim journal blocks and dependencies waiting for the segment
4728 * to disappear.
4729 */
4730static void
4731free_jseg(struct jseg *jseg, struct jblocks *jblocks)
4732{
4733	struct freework *freework;
4734
4735	/*
4736	 * Free freework structures that were lingering to indicate freed
4737	 * indirect blocks that forced journal write ordering on reallocate.
4738	 */
4739	while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4740		indirblk_remove(freework);
4741	if (jblocks->jb_oldestseg == jseg)
4742		jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4743	TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4744	jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4745	KASSERT(LIST_EMPTY(&jseg->js_entries),
4746	    ("free_jseg: Freed jseg has valid entries."));
4747	WORKITEM_FREE(jseg, D_JSEG);
4748}
4749
4750/*
4751 * Free all jsegs that meet the criteria for being reclaimed and update
4752 * oldestseg.
4753 */
4754static void
4755free_jsegs(struct jblocks *jblocks)
4756{
4757	struct jseg *jseg;
4758
4759	/*
4760	 * Free only those jsegs which have none allocated before them to
4761	 * preserve the journal space ordering.
4762	 */
4763	while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4764		/*
4765		 * Only reclaim space when nothing depends on this journal
4766		 * set and another set has written that it is no longer
4767		 * valid.
4768		 */
4769		if (jseg->js_refs != 0) {
4770			jblocks->jb_oldestseg = jseg;
4771			return;
4772		}
4773		if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4774			break;
4775		if (jseg->js_seq > jblocks->jb_oldestwrseq)
4776			break;
4777		/*
4778		 * We can free jsegs that didn't write entries when
4779		 * oldestwrseq == js_seq.
4780		 */
4781		if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4782		    jseg->js_cnt != 0)
4783			break;
4784		free_jseg(jseg, jblocks);
4785	}
4786	/*
4787	 * If we exited the loop above we still must discover the
4788	 * oldest valid segment.
4789	 */
4790	if (jseg)
4791		for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4792		     jseg = TAILQ_NEXT(jseg, js_next))
4793			if (jseg->js_refs != 0)
4794				break;
4795	jblocks->jb_oldestseg = jseg;
4796	/*
4797	 * The journal has no valid records but some jsegs may still be
4798	 * waiting on oldestwrseq to advance.  We force a small record
4799	 * out to permit these lingering records to be reclaimed.
4800	 */
4801	if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4802		jblocks->jb_needseg = 1;
4803}
4804
4805/*
4806 * Release one reference to a jseg and free it if the count reaches 0.  This
4807 * should eventually reclaim journal space as well.
4808 */
4809static void
4810rele_jseg(struct jseg *jseg)
4811{
4812
4813	KASSERT(jseg->js_refs > 0,
4814	    ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4815	if (--jseg->js_refs != 0)
4816		return;
4817	free_jsegs(jseg->js_jblocks);
4818}
4819
4820/*
4821 * Release a jsegdep and decrement the jseg count.
4822 */
4823static void
4824free_jsegdep(struct jsegdep *jsegdep)
4825{
4826
4827	if (jsegdep->jd_seg)
4828		rele_jseg(jsegdep->jd_seg);
4829	WORKITEM_FREE(jsegdep, D_JSEGDEP);
4830}
4831
4832/*
4833 * Wait for a journal item to make it to disk.  Initiate journal processing
4834 * if required.
4835 */
4836static int
4837jwait(struct worklist *wk, int waitfor)
4838{
4839
4840	LOCK_OWNED(VFSTOUFS(wk->wk_mp));
4841	/*
4842	 * Blocking journal waits cause slow synchronous behavior.  Record
4843	 * stats on the frequency of these blocking operations.
4844	 */
4845	if (waitfor == MNT_WAIT) {
4846		stat_journal_wait++;
4847		switch (wk->wk_type) {
4848		case D_JREMREF:
4849		case D_JMVREF:
4850			stat_jwait_filepage++;
4851			break;
4852		case D_JTRUNC:
4853		case D_JFREEBLK:
4854			stat_jwait_freeblks++;
4855			break;
4856		case D_JNEWBLK:
4857			stat_jwait_newblk++;
4858			break;
4859		case D_JADDREF:
4860			stat_jwait_inode++;
4861			break;
4862		default:
4863			break;
4864		}
4865	}
4866	/*
4867	 * If IO has not started we process the journal.  We can't mark the
4868	 * worklist item as IOWAITING because we drop the lock while
4869	 * processing the journal and the worklist entry may be freed after
4870	 * this point.  The caller may call back in and re-issue the request.
4871	 */
4872	if ((wk->wk_state & INPROGRESS) == 0) {
4873		softdep_process_journal(wk->wk_mp, wk, waitfor);
4874		if (waitfor != MNT_WAIT)
4875			return (EBUSY);
4876		return (0);
4877	}
4878	if (waitfor != MNT_WAIT)
4879		return (EBUSY);
4880	wait_worklist(wk, "jwait");
4881	return (0);
4882}
4883
4884/*
4885 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4886 * appropriate.  This is a convenience function to reduce duplicate code
4887 * for the setup and revert functions below.
4888 */
4889static struct inodedep *
4890inodedep_lookup_ip(struct inode *ip)
4891{
4892	struct inodedep *inodedep;
4893
4894	KASSERT(ip->i_nlink >= ip->i_effnlink,
4895	    ("inodedep_lookup_ip: bad delta"));
4896	(void) inodedep_lookup(ITOVFS(ip), ip->i_number, DEPALLOC,
4897	    &inodedep);
4898	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4899	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4900
4901	return (inodedep);
4902}
4903
4904/*
4905 * Called prior to creating a new inode and linking it to a directory.  The
4906 * jaddref structure must already be allocated by softdep_setup_inomapdep
4907 * and it is discovered here so we can initialize the mode and update
4908 * nlinkdelta.
4909 */
4910void
4911softdep_setup_create(struct inode *dp, struct inode *ip)
4912{
4913	struct inodedep *inodedep;
4914	struct jaddref *jaddref __diagused;
4915	struct vnode *dvp;
4916
4917	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
4918	    ("softdep_setup_create called on non-softdep filesystem"));
4919	KASSERT(ip->i_nlink == 1,
4920	    ("softdep_setup_create: Invalid link count."));
4921	dvp = ITOV(dp);
4922	ACQUIRE_LOCK(ITOUMP(dp));
4923	inodedep = inodedep_lookup_ip(ip);
4924	if (DOINGSUJ(dvp)) {
4925		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4926		    inoreflst);
4927		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4928		    ("softdep_setup_create: No addref structure present."));
4929	}
4930	FREE_LOCK(ITOUMP(dp));
4931}
4932
4933/*
4934 * Create a jaddref structure to track the addition of a DOTDOT link when
4935 * we are reparenting an inode as part of a rename.  This jaddref will be
4936 * found by softdep_setup_directory_change.  Adjusts nlinkdelta for
4937 * non-journaling softdep.
4938 */
4939void
4940softdep_setup_dotdot_link(struct inode *dp, struct inode *ip)
4941{
4942	struct inodedep *inodedep;
4943	struct jaddref *jaddref;
4944	struct vnode *dvp;
4945
4946	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
4947	    ("softdep_setup_dotdot_link called on non-softdep filesystem"));
4948	dvp = ITOV(dp);
4949	jaddref = NULL;
4950	/*
4951	 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4952	 * is used as a normal link would be.
4953	 */
4954	if (DOINGSUJ(dvp))
4955		jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4956		    dp->i_effnlink - 1, dp->i_mode);
4957	ACQUIRE_LOCK(ITOUMP(dp));
4958	inodedep = inodedep_lookup_ip(dp);
4959	if (jaddref)
4960		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4961		    if_deps);
4962	FREE_LOCK(ITOUMP(dp));
4963}
4964
4965/*
4966 * Create a jaddref structure to track a new link to an inode.  The directory
4967 * offset is not known until softdep_setup_directory_add or
4968 * softdep_setup_directory_change.  Adjusts nlinkdelta for non-journaling
4969 * softdep.
4970 */
4971void
4972softdep_setup_link(struct inode *dp, struct inode *ip)
4973{
4974	struct inodedep *inodedep;
4975	struct jaddref *jaddref;
4976	struct vnode *dvp;
4977
4978	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
4979	    ("softdep_setup_link called on non-softdep filesystem"));
4980	dvp = ITOV(dp);
4981	jaddref = NULL;
4982	if (DOINGSUJ(dvp))
4983		jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4984		    ip->i_mode);
4985	ACQUIRE_LOCK(ITOUMP(dp));
4986	inodedep = inodedep_lookup_ip(ip);
4987	if (jaddref)
4988		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4989		    if_deps);
4990	FREE_LOCK(ITOUMP(dp));
4991}
4992
4993/*
4994 * Called to create the jaddref structures to track . and .. references as
4995 * well as lookup and further initialize the incomplete jaddref created
4996 * by softdep_setup_inomapdep when the inode was allocated.  Adjusts
4997 * nlinkdelta for non-journaling softdep.
4998 */
4999void
5000softdep_setup_mkdir(struct inode *dp, struct inode *ip)
5001{
5002	struct inodedep *inodedep;
5003	struct jaddref *dotdotaddref;
5004	struct jaddref *dotaddref;
5005	struct jaddref *jaddref;
5006	struct vnode *dvp;
5007
5008	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5009	    ("softdep_setup_mkdir called on non-softdep filesystem"));
5010	dvp = ITOV(dp);
5011	dotaddref = dotdotaddref = NULL;
5012	if (DOINGSUJ(dvp)) {
5013		dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
5014		    ip->i_mode);
5015		dotaddref->ja_state |= MKDIR_BODY;
5016		dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
5017		    dp->i_effnlink - 1, dp->i_mode);
5018		dotdotaddref->ja_state |= MKDIR_PARENT;
5019	}
5020	ACQUIRE_LOCK(ITOUMP(dp));
5021	inodedep = inodedep_lookup_ip(ip);
5022	if (DOINGSUJ(dvp)) {
5023		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5024		    inoreflst);
5025		KASSERT(jaddref != NULL,
5026		    ("softdep_setup_mkdir: No addref structure present."));
5027		KASSERT(jaddref->ja_parent == dp->i_number,
5028		    ("softdep_setup_mkdir: bad parent %ju",
5029		    (uintmax_t)jaddref->ja_parent));
5030		TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
5031		    if_deps);
5032	}
5033	inodedep = inodedep_lookup_ip(dp);
5034	if (DOINGSUJ(dvp))
5035		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
5036		    &dotdotaddref->ja_ref, if_deps);
5037	FREE_LOCK(ITOUMP(dp));
5038}
5039
5040/*
5041 * Called to track nlinkdelta of the inode and parent directories prior to
5042 * unlinking a directory.
5043 */
5044void
5045softdep_setup_rmdir(struct inode *dp, struct inode *ip)
5046{
5047
5048	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5049	    ("softdep_setup_rmdir called on non-softdep filesystem"));
5050	ACQUIRE_LOCK(ITOUMP(dp));
5051	(void) inodedep_lookup_ip(ip);
5052	(void) inodedep_lookup_ip(dp);
5053	FREE_LOCK(ITOUMP(dp));
5054}
5055
5056/*
5057 * Called to track nlinkdelta of the inode and parent directories prior to
5058 * unlink.
5059 */
5060void
5061softdep_setup_unlink(struct inode *dp, struct inode *ip)
5062{
5063
5064	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5065	    ("softdep_setup_unlink called on non-softdep filesystem"));
5066	ACQUIRE_LOCK(ITOUMP(dp));
5067	(void) inodedep_lookup_ip(ip);
5068	(void) inodedep_lookup_ip(dp);
5069	FREE_LOCK(ITOUMP(dp));
5070}
5071
5072/*
5073 * Called to release the journal structures created by a failed non-directory
5074 * creation.  Adjusts nlinkdelta for non-journaling softdep.
5075 */
5076void
5077softdep_revert_create(struct inode *dp, struct inode *ip)
5078{
5079	struct inodedep *inodedep;
5080	struct jaddref *jaddref;
5081	struct vnode *dvp;
5082
5083	KASSERT(MOUNTEDSOFTDEP(ITOVFS((dp))) != 0,
5084	    ("softdep_revert_create called on non-softdep filesystem"));
5085	dvp = ITOV(dp);
5086	ACQUIRE_LOCK(ITOUMP(dp));
5087	inodedep = inodedep_lookup_ip(ip);
5088	if (DOINGSUJ(dvp)) {
5089		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5090		    inoreflst);
5091		KASSERT(jaddref->ja_parent == dp->i_number,
5092		    ("softdep_revert_create: addref parent mismatch"));
5093		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5094	}
5095	FREE_LOCK(ITOUMP(dp));
5096}
5097
5098/*
5099 * Called to release the journal structures created by a failed link
5100 * addition.  Adjusts nlinkdelta for non-journaling softdep.
5101 */
5102void
5103softdep_revert_link(struct inode *dp, struct inode *ip)
5104{
5105	struct inodedep *inodedep;
5106	struct jaddref *jaddref;
5107	struct vnode *dvp;
5108
5109	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5110	    ("softdep_revert_link called on non-softdep filesystem"));
5111	dvp = ITOV(dp);
5112	ACQUIRE_LOCK(ITOUMP(dp));
5113	inodedep = inodedep_lookup_ip(ip);
5114	if (DOINGSUJ(dvp)) {
5115		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5116		    inoreflst);
5117		KASSERT(jaddref->ja_parent == dp->i_number,
5118		    ("softdep_revert_link: addref parent mismatch"));
5119		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5120	}
5121	FREE_LOCK(ITOUMP(dp));
5122}
5123
5124/*
5125 * Called to release the journal structures created by a failed mkdir
5126 * attempt.  Adjusts nlinkdelta for non-journaling softdep.
5127 */
5128void
5129softdep_revert_mkdir(struct inode *dp, struct inode *ip)
5130{
5131	struct inodedep *inodedep;
5132	struct jaddref *jaddref;
5133	struct jaddref *dotaddref;
5134	struct vnode *dvp;
5135
5136	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5137	    ("softdep_revert_mkdir called on non-softdep filesystem"));
5138	dvp = ITOV(dp);
5139
5140	ACQUIRE_LOCK(ITOUMP(dp));
5141	inodedep = inodedep_lookup_ip(dp);
5142	if (DOINGSUJ(dvp)) {
5143		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5144		    inoreflst);
5145		KASSERT(jaddref->ja_parent == ip->i_number,
5146		    ("softdep_revert_mkdir: dotdot addref parent mismatch"));
5147		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5148	}
5149	inodedep = inodedep_lookup_ip(ip);
5150	if (DOINGSUJ(dvp)) {
5151		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
5152		    inoreflst);
5153		KASSERT(jaddref->ja_parent == dp->i_number,
5154		    ("softdep_revert_mkdir: addref parent mismatch"));
5155		dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
5156		    inoreflst, if_deps);
5157		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
5158		KASSERT(dotaddref->ja_parent == ip->i_number,
5159		    ("softdep_revert_mkdir: dot addref parent mismatch"));
5160		cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
5161	}
5162	FREE_LOCK(ITOUMP(dp));
5163}
5164
5165/*
5166 * Called to correct nlinkdelta after a failed rmdir.
5167 */
5168void
5169softdep_revert_rmdir(struct inode *dp, struct inode *ip)
5170{
5171
5172	KASSERT(MOUNTEDSOFTDEP(ITOVFS(dp)) != 0,
5173	    ("softdep_revert_rmdir called on non-softdep filesystem"));
5174	ACQUIRE_LOCK(ITOUMP(dp));
5175	(void) inodedep_lookup_ip(ip);
5176	(void) inodedep_lookup_ip(dp);
5177	FREE_LOCK(ITOUMP(dp));
5178}
5179
5180/*
5181 * Protecting the freemaps (or bitmaps).
5182 *
5183 * To eliminate the need to execute fsck before mounting a filesystem
5184 * after a power failure, one must (conservatively) guarantee that the
5185 * on-disk copy of the bitmaps never indicate that a live inode or block is
5186 * free.  So, when a block or inode is allocated, the bitmap should be
5187 * updated (on disk) before any new pointers.  When a block or inode is
5188 * freed, the bitmap should not be updated until all pointers have been
5189 * reset.  The latter dependency is handled by the delayed de-allocation
5190 * approach described below for block and inode de-allocation.  The former
5191 * dependency is handled by calling the following procedure when a block or
5192 * inode is allocated. When an inode is allocated an "inodedep" is created
5193 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
5194 * Each "inodedep" is also inserted into the hash indexing structure so
5195 * that any additional link additions can be made dependent on the inode
5196 * allocation.
5197 *
5198 * The ufs filesystem maintains a number of free block counts (e.g., per
5199 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
5200 * in addition to the bitmaps.  These counts are used to improve efficiency
5201 * during allocation and therefore must be consistent with the bitmaps.
5202 * There is no convenient way to guarantee post-crash consistency of these
5203 * counts with simple update ordering, for two main reasons: (1) The counts
5204 * and bitmaps for a single cylinder group block are not in the same disk
5205 * sector.  If a disk write is interrupted (e.g., by power failure), one may
5206 * be written and the other not.  (2) Some of the counts are located in the
5207 * superblock rather than the cylinder group block. So, we focus our soft
5208 * updates implementation on protecting the bitmaps. When mounting a
5209 * filesystem, we recompute the auxiliary counts from the bitmaps.
5210 */
5211
5212/*
5213 * Called just after updating the cylinder group block to allocate an inode.
5214 */
5215void
5216softdep_setup_inomapdep(
5217	struct buf *bp,		/* buffer for cylgroup block with inode map */
5218	struct inode *ip,	/* inode related to allocation */
5219	ino_t newinum,		/* new inode number being allocated */
5220	int mode)
5221{
5222	struct inodedep *inodedep;
5223	struct bmsafemap *bmsafemap;
5224	struct jaddref *jaddref;
5225	struct mount *mp;
5226	struct fs *fs;
5227
5228	mp = ITOVFS(ip);
5229	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5230	    ("softdep_setup_inomapdep called on non-softdep filesystem"));
5231	fs = VFSTOUFS(mp)->um_fs;
5232	jaddref = NULL;
5233
5234	/*
5235	 * Allocate the journal reference add structure so that the bitmap
5236	 * can be dependent on it.
5237	 */
5238	if (MOUNTEDSUJ(mp)) {
5239		jaddref = newjaddref(ip, newinum, 0, 0, mode);
5240		jaddref->ja_state |= NEWBLOCK;
5241	}
5242
5243	/*
5244	 * Create a dependency for the newly allocated inode.
5245	 * Panic if it already exists as something is seriously wrong.
5246	 * Otherwise add it to the dependency list for the buffer holding
5247	 * the cylinder group map from which it was allocated.
5248	 *
5249	 * We have to preallocate a bmsafemap entry in case it is needed
5250	 * in bmsafemap_lookup since once we allocate the inodedep, we
5251	 * have to finish initializing it before we can FREE_LOCK().
5252	 * By preallocating, we avoid FREE_LOCK() while doing a malloc
5253	 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
5254	 * creating the inodedep as it can be freed during the time
5255	 * that we FREE_LOCK() while allocating the inodedep. We must
5256	 * call workitem_alloc() before entering the locked section as
5257	 * it also acquires the lock and we must avoid trying doing so
5258	 * recursively.
5259	 */
5260	bmsafemap = malloc(sizeof(struct bmsafemap),
5261	    M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5262	workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5263	ACQUIRE_LOCK(ITOUMP(ip));
5264	if ((inodedep_lookup(mp, newinum, DEPALLOC, &inodedep)))
5265		panic("softdep_setup_inomapdep: dependency %p for new"
5266		    "inode already exists", inodedep);
5267	bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
5268	if (jaddref) {
5269		LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
5270		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
5271		    if_deps);
5272	} else {
5273		inodedep->id_state |= ONDEPLIST;
5274		LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
5275	}
5276	inodedep->id_bmsafemap = bmsafemap;
5277	inodedep->id_state &= ~DEPCOMPLETE;
5278	FREE_LOCK(ITOUMP(ip));
5279}
5280
5281/*
5282 * Called just after updating the cylinder group block to
5283 * allocate block or fragment.
5284 */
5285void
5286softdep_setup_blkmapdep(
5287	struct buf *bp,		/* buffer for cylgroup block with block map */
5288	struct mount *mp,	/* filesystem doing allocation */
5289	ufs2_daddr_t newblkno,	/* number of newly allocated block */
5290	int frags,		/* Number of fragments. */
5291	int oldfrags)		/* Previous number of fragments for extend. */
5292{
5293	struct newblk *newblk;
5294	struct bmsafemap *bmsafemap;
5295	struct jnewblk *jnewblk;
5296	struct ufsmount *ump;
5297	struct fs *fs;
5298
5299	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5300	    ("softdep_setup_blkmapdep called on non-softdep filesystem"));
5301	ump = VFSTOUFS(mp);
5302	fs = ump->um_fs;
5303	jnewblk = NULL;
5304	/*
5305	 * Create a dependency for the newly allocated block.
5306	 * Add it to the dependency list for the buffer holding
5307	 * the cylinder group map from which it was allocated.
5308	 */
5309	if (MOUNTEDSUJ(mp)) {
5310		jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
5311		workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
5312		jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
5313		jnewblk->jn_state = ATTACHED;
5314		jnewblk->jn_blkno = newblkno;
5315		jnewblk->jn_frags = frags;
5316		jnewblk->jn_oldfrags = oldfrags;
5317#ifdef INVARIANTS
5318		{
5319			struct cg *cgp;
5320			uint8_t *blksfree;
5321			long bno;
5322			int i;
5323
5324			cgp = (struct cg *)bp->b_data;
5325			blksfree = cg_blksfree(cgp);
5326			bno = dtogd(fs, jnewblk->jn_blkno);
5327			for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
5328			    i++) {
5329				if (isset(blksfree, bno + i))
5330					panic("softdep_setup_blkmapdep: "
5331					    "free fragment %d from %d-%d "
5332					    "state 0x%X dep %p", i,
5333					    jnewblk->jn_oldfrags,
5334					    jnewblk->jn_frags,
5335					    jnewblk->jn_state,
5336					    jnewblk->jn_dep);
5337			}
5338		}
5339#endif
5340	}
5341
5342	CTR3(KTR_SUJ,
5343	    "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
5344	    newblkno, frags, oldfrags);
5345	ACQUIRE_LOCK(ump);
5346	if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
5347		panic("softdep_setup_blkmapdep: found block");
5348	newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
5349	    dtog(fs, newblkno), NULL);
5350	if (jnewblk) {
5351		jnewblk->jn_dep = (struct worklist *)newblk;
5352		LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
5353	} else {
5354		newblk->nb_state |= ONDEPLIST;
5355		LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
5356	}
5357	newblk->nb_bmsafemap = bmsafemap;
5358	newblk->nb_jnewblk = jnewblk;
5359	FREE_LOCK(ump);
5360}
5361
5362#define	BMSAFEMAP_HASH(ump, cg) \
5363      (&(ump)->bmsafemap_hashtbl[(cg) & (ump)->bmsafemap_hash_size])
5364
5365static int
5366bmsafemap_find(
5367	struct bmsafemap_hashhead *bmsafemaphd,
5368	int cg,
5369	struct bmsafemap **bmsafemapp)
5370{
5371	struct bmsafemap *bmsafemap;
5372
5373	LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
5374		if (bmsafemap->sm_cg == cg)
5375			break;
5376	if (bmsafemap) {
5377		*bmsafemapp = bmsafemap;
5378		return (1);
5379	}
5380	*bmsafemapp = NULL;
5381
5382	return (0);
5383}
5384
5385/*
5386 * Find the bmsafemap associated with a cylinder group buffer.
5387 * If none exists, create one. The buffer must be locked when
5388 * this routine is called and this routine must be called with
5389 * the softdep lock held. To avoid giving up the lock while
5390 * allocating a new bmsafemap, a preallocated bmsafemap may be
5391 * provided. If it is provided but not needed, it is freed.
5392 */
5393static struct bmsafemap *
5394bmsafemap_lookup(struct mount *mp,
5395	struct buf *bp,
5396	int cg,
5397	struct bmsafemap *newbmsafemap)
5398{
5399	struct bmsafemap_hashhead *bmsafemaphd;
5400	struct bmsafemap *bmsafemap, *collision;
5401	struct worklist *wk;
5402	struct ufsmount *ump;
5403
5404	ump = VFSTOUFS(mp);
5405	LOCK_OWNED(ump);
5406	KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
5407	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5408		if (wk->wk_type == D_BMSAFEMAP) {
5409			if (newbmsafemap)
5410				WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5411			return (WK_BMSAFEMAP(wk));
5412		}
5413	}
5414	bmsafemaphd = BMSAFEMAP_HASH(ump, cg);
5415	if (bmsafemap_find(bmsafemaphd, cg, &bmsafemap) == 1) {
5416		if (newbmsafemap)
5417			WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5418		return (bmsafemap);
5419	}
5420	if (newbmsafemap) {
5421		bmsafemap = newbmsafemap;
5422	} else {
5423		FREE_LOCK(ump);
5424		bmsafemap = malloc(sizeof(struct bmsafemap),
5425			M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5426		workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5427		ACQUIRE_LOCK(ump);
5428	}
5429	bmsafemap->sm_buf = bp;
5430	LIST_INIT(&bmsafemap->sm_inodedephd);
5431	LIST_INIT(&bmsafemap->sm_inodedepwr);
5432	LIST_INIT(&bmsafemap->sm_newblkhd);
5433	LIST_INIT(&bmsafemap->sm_newblkwr);
5434	LIST_INIT(&bmsafemap->sm_jaddrefhd);
5435	LIST_INIT(&bmsafemap->sm_jnewblkhd);
5436	LIST_INIT(&bmsafemap->sm_freehd);
5437	LIST_INIT(&bmsafemap->sm_freewr);
5438	if (bmsafemap_find(bmsafemaphd, cg, &collision) == 1) {
5439		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5440		return (collision);
5441	}
5442	bmsafemap->sm_cg = cg;
5443	LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5444	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
5445	WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5446	return (bmsafemap);
5447}
5448
5449/*
5450 * Direct block allocation dependencies.
5451 *
5452 * When a new block is allocated, the corresponding disk locations must be
5453 * initialized (with zeros or new data) before the on-disk inode points to
5454 * them.  Also, the freemap from which the block was allocated must be
5455 * updated (on disk) before the inode's pointer. These two dependencies are
5456 * independent of each other and are needed for all file blocks and indirect
5457 * blocks that are pointed to directly by the inode.  Just before the
5458 * "in-core" version of the inode is updated with a newly allocated block
5459 * number, a procedure (below) is called to setup allocation dependency
5460 * structures.  These structures are removed when the corresponding
5461 * dependencies are satisfied or when the block allocation becomes obsolete
5462 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5463 * fragment that gets upgraded).  All of these cases are handled in
5464 * procedures described later.
5465 *
5466 * When a file extension causes a fragment to be upgraded, either to a larger
5467 * fragment or to a full block, the on-disk location may change (if the
5468 * previous fragment could not simply be extended). In this case, the old
5469 * fragment must be de-allocated, but not until after the inode's pointer has
5470 * been updated. In most cases, this is handled by later procedures, which
5471 * will construct a "freefrag" structure to be added to the workitem queue
5472 * when the inode update is complete (or obsolete).  The main exception to
5473 * this is when an allocation occurs while a pending allocation dependency
5474 * (for the same block pointer) remains.  This case is handled in the main
5475 * allocation dependency setup procedure by immediately freeing the
5476 * unreferenced fragments.
5477 */
5478void
5479softdep_setup_allocdirect(
5480	struct inode *ip,	/* inode to which block is being added */
5481	ufs_lbn_t off,		/* block pointer within inode */
5482	ufs2_daddr_t newblkno,	/* disk block number being added */
5483	ufs2_daddr_t oldblkno,	/* previous block number, 0 unless frag */
5484	long newsize,		/* size of new block */
5485	long oldsize,		/* size of new block */
5486	struct buf *bp)		/* bp for allocated block */
5487{
5488	struct allocdirect *adp, *oldadp;
5489	struct allocdirectlst *adphead;
5490	struct freefrag *freefrag;
5491	struct inodedep *inodedep;
5492	struct pagedep *pagedep;
5493	struct jnewblk *jnewblk;
5494	struct newblk *newblk;
5495	struct mount *mp;
5496	ufs_lbn_t lbn;
5497
5498	lbn = bp->b_lblkno;
5499	mp = ITOVFS(ip);
5500	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5501	    ("softdep_setup_allocdirect called on non-softdep filesystem"));
5502	if (oldblkno && oldblkno != newblkno)
5503		/*
5504		 * The usual case is that a smaller fragment that
5505		 * was just allocated has been replaced with a bigger
5506		 * fragment or a full-size block. If it is marked as
5507		 * B_DELWRI, the current contents have not been written
5508		 * to disk. It is possible that the block was written
5509		 * earlier, but very uncommon. If the block has never
5510		 * been written, there is no need to send a BIO_DELETE
5511		 * for it when it is freed. The gain from avoiding the
5512		 * TRIMs for the common case of unwritten blocks far
5513		 * exceeds the cost of the write amplification for the
5514		 * uncommon case of failing to send a TRIM for a block
5515		 * that had been written.
5516		 */
5517		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn,
5518		    (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY);
5519	else
5520		freefrag = NULL;
5521
5522	CTR6(KTR_SUJ,
5523	    "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5524	    "off %jd newsize %ld oldsize %d",
5525	    ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5526	ACQUIRE_LOCK(ITOUMP(ip));
5527	if (off >= UFS_NDADDR) {
5528		if (lbn > 0)
5529			panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5530			    lbn, off);
5531		/* allocating an indirect block */
5532		if (oldblkno != 0)
5533			panic("softdep_setup_allocdirect: non-zero indir");
5534	} else {
5535		if (off != lbn)
5536			panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5537			    lbn, off);
5538		/*
5539		 * Allocating a direct block.
5540		 *
5541		 * If we are allocating a directory block, then we must
5542		 * allocate an associated pagedep to track additions and
5543		 * deletions.
5544		 */
5545		if ((ip->i_mode & IFMT) == IFDIR)
5546			pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5547			    &pagedep);
5548	}
5549	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5550		panic("softdep_setup_allocdirect: lost block");
5551	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5552	    ("softdep_setup_allocdirect: newblk already initialized"));
5553	/*
5554	 * Convert the newblk to an allocdirect.
5555	 */
5556	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5557	adp = (struct allocdirect *)newblk;
5558	newblk->nb_freefrag = freefrag;
5559	adp->ad_offset = off;
5560	adp->ad_oldblkno = oldblkno;
5561	adp->ad_newsize = newsize;
5562	adp->ad_oldsize = oldsize;
5563
5564	/*
5565	 * Finish initializing the journal.
5566	 */
5567	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5568		jnewblk->jn_ino = ip->i_number;
5569		jnewblk->jn_lbn = lbn;
5570		add_to_journal(&jnewblk->jn_list);
5571	}
5572	if (freefrag && freefrag->ff_jdep != NULL &&
5573	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5574		add_to_journal(freefrag->ff_jdep);
5575	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5576	adp->ad_inodedep = inodedep;
5577
5578	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5579	/*
5580	 * The list of allocdirects must be kept in sorted and ascending
5581	 * order so that the rollback routines can quickly determine the
5582	 * first uncommitted block (the size of the file stored on disk
5583	 * ends at the end of the lowest committed fragment, or if there
5584	 * are no fragments, at the end of the highest committed block).
5585	 * Since files generally grow, the typical case is that the new
5586	 * block is to be added at the end of the list. We speed this
5587	 * special case by checking against the last allocdirect in the
5588	 * list before laboriously traversing the list looking for the
5589	 * insertion point.
5590	 */
5591	adphead = &inodedep->id_newinoupdt;
5592	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5593	if (oldadp == NULL || oldadp->ad_offset <= off) {
5594		/* insert at end of list */
5595		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5596		if (oldadp != NULL && oldadp->ad_offset == off)
5597			allocdirect_merge(adphead, adp, oldadp);
5598		FREE_LOCK(ITOUMP(ip));
5599		return;
5600	}
5601	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5602		if (oldadp->ad_offset >= off)
5603			break;
5604	}
5605	if (oldadp == NULL)
5606		panic("softdep_setup_allocdirect: lost entry");
5607	/* insert in middle of list */
5608	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5609	if (oldadp->ad_offset == off)
5610		allocdirect_merge(adphead, adp, oldadp);
5611
5612	FREE_LOCK(ITOUMP(ip));
5613}
5614
5615/*
5616 * Merge a newer and older journal record to be stored either in a
5617 * newblock or freefrag.  This handles aggregating journal records for
5618 * fragment allocation into a second record as well as replacing a
5619 * journal free with an aborted journal allocation.  A segment for the
5620 * oldest record will be placed on wkhd if it has been written.  If not
5621 * the segment for the newer record will suffice.
5622 */
5623static struct worklist *
5624jnewblk_merge(struct worklist *new,
5625	struct worklist *old,
5626	struct workhead *wkhd)
5627{
5628	struct jnewblk *njnewblk;
5629	struct jnewblk *jnewblk;
5630
5631	/* Handle NULLs to simplify callers. */
5632	if (new == NULL)
5633		return (old);
5634	if (old == NULL)
5635		return (new);
5636	/* Replace a jfreefrag with a jnewblk. */
5637	if (new->wk_type == D_JFREEFRAG) {
5638		if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5639			panic("jnewblk_merge: blkno mismatch: %p, %p",
5640			    old, new);
5641		cancel_jfreefrag(WK_JFREEFRAG(new));
5642		return (old);
5643	}
5644	if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5645		panic("jnewblk_merge: Bad type: old %d new %d\n",
5646		    old->wk_type, new->wk_type);
5647	/*
5648	 * Handle merging of two jnewblk records that describe
5649	 * different sets of fragments in the same block.
5650	 */
5651	jnewblk = WK_JNEWBLK(old);
5652	njnewblk = WK_JNEWBLK(new);
5653	if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5654		panic("jnewblk_merge: Merging disparate blocks.");
5655	/*
5656	 * The record may be rolled back in the cg.
5657	 */
5658	if (jnewblk->jn_state & UNDONE) {
5659		jnewblk->jn_state &= ~UNDONE;
5660		njnewblk->jn_state |= UNDONE;
5661		njnewblk->jn_state &= ~ATTACHED;
5662	}
5663	/*
5664	 * We modify the newer addref and free the older so that if neither
5665	 * has been written the most up-to-date copy will be on disk.  If
5666	 * both have been written but rolled back we only temporarily need
5667	 * one of them to fix the bits when the cg write completes.
5668	 */
5669	jnewblk->jn_state |= ATTACHED | COMPLETE;
5670	njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5671	cancel_jnewblk(jnewblk, wkhd);
5672	WORKLIST_REMOVE(&jnewblk->jn_list);
5673	free_jnewblk(jnewblk);
5674	return (new);
5675}
5676
5677/*
5678 * Replace an old allocdirect dependency with a newer one.
5679 */
5680static void
5681allocdirect_merge(
5682	struct allocdirectlst *adphead,	/* head of list holding allocdirects */
5683	struct allocdirect *newadp,	/* allocdirect being added */
5684	struct allocdirect *oldadp)	/* existing allocdirect being checked */
5685{
5686	struct worklist *wk;
5687	struct freefrag *freefrag;
5688
5689	freefrag = NULL;
5690	LOCK_OWNED(VFSTOUFS(newadp->ad_list.wk_mp));
5691	if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5692	    newadp->ad_oldsize != oldadp->ad_newsize ||
5693	    newadp->ad_offset >= UFS_NDADDR)
5694		panic("%s %jd != new %jd || old size %ld != new %ld",
5695		    "allocdirect_merge: old blkno",
5696		    (intmax_t)newadp->ad_oldblkno,
5697		    (intmax_t)oldadp->ad_newblkno,
5698		    newadp->ad_oldsize, oldadp->ad_newsize);
5699	newadp->ad_oldblkno = oldadp->ad_oldblkno;
5700	newadp->ad_oldsize = oldadp->ad_oldsize;
5701	/*
5702	 * If the old dependency had a fragment to free or had never
5703	 * previously had a block allocated, then the new dependency
5704	 * can immediately post its freefrag and adopt the old freefrag.
5705	 * This action is done by swapping the freefrag dependencies.
5706	 * The new dependency gains the old one's freefrag, and the
5707	 * old one gets the new one and then immediately puts it on
5708	 * the worklist when it is freed by free_newblk. It is
5709	 * not possible to do this swap when the old dependency had a
5710	 * non-zero size but no previous fragment to free. This condition
5711	 * arises when the new block is an extension of the old block.
5712	 * Here, the first part of the fragment allocated to the new
5713	 * dependency is part of the block currently claimed on disk by
5714	 * the old dependency, so cannot legitimately be freed until the
5715	 * conditions for the new dependency are fulfilled.
5716	 */
5717	freefrag = newadp->ad_freefrag;
5718	if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5719		newadp->ad_freefrag = oldadp->ad_freefrag;
5720		oldadp->ad_freefrag = freefrag;
5721	}
5722	/*
5723	 * If we are tracking a new directory-block allocation,
5724	 * move it from the old allocdirect to the new allocdirect.
5725	 */
5726	if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5727		WORKLIST_REMOVE(wk);
5728		if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5729			panic("allocdirect_merge: extra newdirblk");
5730		WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5731	}
5732	TAILQ_REMOVE(adphead, oldadp, ad_next);
5733	/*
5734	 * We need to move any journal dependencies over to the freefrag
5735	 * that releases this block if it exists.  Otherwise we are
5736	 * extending an existing block and we'll wait until that is
5737	 * complete to release the journal space and extend the
5738	 * new journal to cover this old space as well.
5739	 */
5740	if (freefrag == NULL) {
5741		if (oldadp->ad_newblkno != newadp->ad_newblkno)
5742			panic("allocdirect_merge: %jd != %jd",
5743			    oldadp->ad_newblkno, newadp->ad_newblkno);
5744		newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5745		    jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5746		    &oldadp->ad_block.nb_jnewblk->jn_list,
5747		    &newadp->ad_block.nb_jwork);
5748		oldadp->ad_block.nb_jnewblk = NULL;
5749		cancel_newblk(&oldadp->ad_block, NULL,
5750		    &newadp->ad_block.nb_jwork);
5751	} else {
5752		wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5753		    &freefrag->ff_list, &freefrag->ff_jwork);
5754		freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5755		    &freefrag->ff_jwork);
5756	}
5757	free_newblk(&oldadp->ad_block);
5758}
5759
5760/*
5761 * Allocate a jfreefrag structure to journal a single block free.
5762 */
5763static struct jfreefrag *
5764newjfreefrag(struct freefrag *freefrag,
5765	struct inode *ip,
5766	ufs2_daddr_t blkno,
5767	long size,
5768	ufs_lbn_t lbn)
5769{
5770	struct jfreefrag *jfreefrag;
5771	struct fs *fs;
5772
5773	fs = ITOFS(ip);
5774	jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5775	    M_SOFTDEP_FLAGS);
5776	workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, ITOVFS(ip));
5777	jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5778	jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5779	jfreefrag->fr_ino = ip->i_number;
5780	jfreefrag->fr_lbn = lbn;
5781	jfreefrag->fr_blkno = blkno;
5782	jfreefrag->fr_frags = numfrags(fs, size);
5783	jfreefrag->fr_freefrag = freefrag;
5784
5785	return (jfreefrag);
5786}
5787
5788/*
5789 * Allocate a new freefrag structure.
5790 */
5791static struct freefrag *
5792newfreefrag(struct inode *ip,
5793	ufs2_daddr_t blkno,
5794	long size,
5795	ufs_lbn_t lbn,
5796	uint64_t key)
5797{
5798	struct freefrag *freefrag;
5799	struct ufsmount *ump;
5800	struct fs *fs;
5801
5802	CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5803	    ip->i_number, blkno, size, lbn);
5804	ump = ITOUMP(ip);
5805	fs = ump->um_fs;
5806	if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5807		panic("newfreefrag: frag size");
5808	freefrag = malloc(sizeof(struct freefrag),
5809	    M_FREEFRAG, M_SOFTDEP_FLAGS);
5810	workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ump));
5811	freefrag->ff_state = ATTACHED;
5812	LIST_INIT(&freefrag->ff_jwork);
5813	freefrag->ff_inum = ip->i_number;
5814	freefrag->ff_vtype = ITOV(ip)->v_type;
5815	freefrag->ff_blkno = blkno;
5816	freefrag->ff_fragsize = size;
5817	freefrag->ff_key = key;
5818
5819	if (MOUNTEDSUJ(UFSTOVFS(ump))) {
5820		freefrag->ff_jdep = (struct worklist *)
5821		    newjfreefrag(freefrag, ip, blkno, size, lbn);
5822	} else {
5823		freefrag->ff_state |= DEPCOMPLETE;
5824		freefrag->ff_jdep = NULL;
5825	}
5826
5827	return (freefrag);
5828}
5829
5830/*
5831 * This workitem de-allocates fragments that were replaced during
5832 * file block allocation.
5833 */
5834static void
5835handle_workitem_freefrag(struct freefrag *freefrag)
5836{
5837	struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5838	struct workhead wkhd;
5839
5840	CTR3(KTR_SUJ,
5841	    "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5842	    freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5843	/*
5844	 * It would be illegal to add new completion items to the
5845	 * freefrag after it was schedule to be done so it must be
5846	 * safe to modify the list head here.
5847	 */
5848	LIST_INIT(&wkhd);
5849	ACQUIRE_LOCK(ump);
5850	LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5851	/*
5852	 * If the journal has not been written we must cancel it here.
5853	 */
5854	if (freefrag->ff_jdep) {
5855		if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5856			panic("handle_workitem_freefrag: Unexpected type %d\n",
5857			    freefrag->ff_jdep->wk_type);
5858		cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5859	}
5860	FREE_LOCK(ump);
5861	ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5862	   freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype,
5863	   &wkhd, freefrag->ff_key);
5864	ACQUIRE_LOCK(ump);
5865	WORKITEM_FREE(freefrag, D_FREEFRAG);
5866	FREE_LOCK(ump);
5867}
5868
5869/*
5870 * Set up a dependency structure for an external attributes data block.
5871 * This routine follows much of the structure of softdep_setup_allocdirect.
5872 * See the description of softdep_setup_allocdirect above for details.
5873 */
5874void
5875softdep_setup_allocext(
5876	struct inode *ip,
5877	ufs_lbn_t off,
5878	ufs2_daddr_t newblkno,
5879	ufs2_daddr_t oldblkno,
5880	long newsize,
5881	long oldsize,
5882	struct buf *bp)
5883{
5884	struct allocdirect *adp, *oldadp;
5885	struct allocdirectlst *adphead;
5886	struct freefrag *freefrag;
5887	struct inodedep *inodedep;
5888	struct jnewblk *jnewblk;
5889	struct newblk *newblk;
5890	struct mount *mp;
5891	struct ufsmount *ump;
5892	ufs_lbn_t lbn;
5893
5894	mp = ITOVFS(ip);
5895	ump = VFSTOUFS(mp);
5896	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5897	    ("softdep_setup_allocext called on non-softdep filesystem"));
5898	KASSERT(off < UFS_NXADDR,
5899	    ("softdep_setup_allocext: lbn %lld > UFS_NXADDR", (long long)off));
5900
5901	lbn = bp->b_lblkno;
5902	if (oldblkno && oldblkno != newblkno)
5903		/*
5904		 * The usual case is that a smaller fragment that
5905		 * was just allocated has been replaced with a bigger
5906		 * fragment or a full-size block. If it is marked as
5907		 * B_DELWRI, the current contents have not been written
5908		 * to disk. It is possible that the block was written
5909		 * earlier, but very uncommon. If the block has never
5910		 * been written, there is no need to send a BIO_DELETE
5911		 * for it when it is freed. The gain from avoiding the
5912		 * TRIMs for the common case of unwritten blocks far
5913		 * exceeds the cost of the write amplification for the
5914		 * uncommon case of failing to send a TRIM for a block
5915		 * that had been written.
5916		 */
5917		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn,
5918		    (bp->b_flags & B_DELWRI) != 0 ? NOTRIM_KEY : SINGLETON_KEY);
5919	else
5920		freefrag = NULL;
5921
5922	ACQUIRE_LOCK(ump);
5923	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5924		panic("softdep_setup_allocext: lost block");
5925	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5926	    ("softdep_setup_allocext: newblk already initialized"));
5927	/*
5928	 * Convert the newblk to an allocdirect.
5929	 */
5930	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5931	adp = (struct allocdirect *)newblk;
5932	newblk->nb_freefrag = freefrag;
5933	adp->ad_offset = off;
5934	adp->ad_oldblkno = oldblkno;
5935	adp->ad_newsize = newsize;
5936	adp->ad_oldsize = oldsize;
5937	adp->ad_state |=  EXTDATA;
5938
5939	/*
5940	 * Finish initializing the journal.
5941	 */
5942	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5943		jnewblk->jn_ino = ip->i_number;
5944		jnewblk->jn_lbn = lbn;
5945		add_to_journal(&jnewblk->jn_list);
5946	}
5947	if (freefrag && freefrag->ff_jdep != NULL &&
5948	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5949		add_to_journal(freefrag->ff_jdep);
5950	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
5951	adp->ad_inodedep = inodedep;
5952
5953	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5954	/*
5955	 * The list of allocdirects must be kept in sorted and ascending
5956	 * order so that the rollback routines can quickly determine the
5957	 * first uncommitted block (the size of the file stored on disk
5958	 * ends at the end of the lowest committed fragment, or if there
5959	 * are no fragments, at the end of the highest committed block).
5960	 * Since files generally grow, the typical case is that the new
5961	 * block is to be added at the end of the list. We speed this
5962	 * special case by checking against the last allocdirect in the
5963	 * list before laboriously traversing the list looking for the
5964	 * insertion point.
5965	 */
5966	adphead = &inodedep->id_newextupdt;
5967	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5968	if (oldadp == NULL || oldadp->ad_offset <= off) {
5969		/* insert at end of list */
5970		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5971		if (oldadp != NULL && oldadp->ad_offset == off)
5972			allocdirect_merge(adphead, adp, oldadp);
5973		FREE_LOCK(ump);
5974		return;
5975	}
5976	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5977		if (oldadp->ad_offset >= off)
5978			break;
5979	}
5980	if (oldadp == NULL)
5981		panic("softdep_setup_allocext: lost entry");
5982	/* insert in middle of list */
5983	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5984	if (oldadp->ad_offset == off)
5985		allocdirect_merge(adphead, adp, oldadp);
5986	FREE_LOCK(ump);
5987}
5988
5989/*
5990 * Indirect block allocation dependencies.
5991 *
5992 * The same dependencies that exist for a direct block also exist when
5993 * a new block is allocated and pointed to by an entry in a block of
5994 * indirect pointers. The undo/redo states described above are also
5995 * used here. Because an indirect block contains many pointers that
5996 * may have dependencies, a second copy of the entire in-memory indirect
5997 * block is kept. The buffer cache copy is always completely up-to-date.
5998 * The second copy, which is used only as a source for disk writes,
5999 * contains only the safe pointers (i.e., those that have no remaining
6000 * update dependencies). The second copy is freed when all pointers
6001 * are safe. The cache is not allowed to replace indirect blocks with
6002 * pending update dependencies. If a buffer containing an indirect
6003 * block with dependencies is written, these routines will mark it
6004 * dirty again. It can only be successfully written once all the
6005 * dependencies are removed. The ffs_fsync routine in conjunction with
6006 * softdep_sync_metadata work together to get all the dependencies
6007 * removed so that a file can be successfully written to disk. Three
6008 * procedures are used when setting up indirect block pointer
6009 * dependencies. The division is necessary because of the organization
6010 * of the "balloc" routine and because of the distinction between file
6011 * pages and file metadata blocks.
6012 */
6013
6014/*
6015 * Allocate a new allocindir structure.
6016 */
6017static struct allocindir *
6018newallocindir(
6019	struct inode *ip,	/* inode for file being extended */
6020	int ptrno,		/* offset of pointer in indirect block */
6021	ufs2_daddr_t newblkno,	/* disk block number being added */
6022	ufs2_daddr_t oldblkno,	/* previous block number, 0 if none */
6023	ufs_lbn_t lbn)
6024{
6025	struct newblk *newblk;
6026	struct allocindir *aip;
6027	struct freefrag *freefrag;
6028	struct jnewblk *jnewblk;
6029
6030	if (oldblkno)
6031		freefrag = newfreefrag(ip, oldblkno, ITOFS(ip)->fs_bsize, lbn,
6032		    SINGLETON_KEY);
6033	else
6034		freefrag = NULL;
6035	ACQUIRE_LOCK(ITOUMP(ip));
6036	if (newblk_lookup(ITOVFS(ip), newblkno, 0, &newblk) == 0)
6037		panic("new_allocindir: lost block");
6038	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
6039	    ("newallocindir: newblk already initialized"));
6040	WORKITEM_REASSIGN(newblk, D_ALLOCINDIR);
6041	newblk->nb_freefrag = freefrag;
6042	aip = (struct allocindir *)newblk;
6043	aip->ai_offset = ptrno;
6044	aip->ai_oldblkno = oldblkno;
6045	aip->ai_lbn = lbn;
6046	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
6047		jnewblk->jn_ino = ip->i_number;
6048		jnewblk->jn_lbn = lbn;
6049		add_to_journal(&jnewblk->jn_list);
6050	}
6051	if (freefrag && freefrag->ff_jdep != NULL &&
6052	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
6053		add_to_journal(freefrag->ff_jdep);
6054	return (aip);
6055}
6056
6057/*
6058 * Called just before setting an indirect block pointer
6059 * to a newly allocated file page.
6060 */
6061void
6062softdep_setup_allocindir_page(
6063	struct inode *ip,	/* inode for file being extended */
6064	ufs_lbn_t lbn,		/* allocated block number within file */
6065	struct buf *bp,		/* buffer with indirect blk referencing page */
6066	int ptrno,		/* offset of pointer in indirect block */
6067	ufs2_daddr_t newblkno,	/* disk block number being added */
6068	ufs2_daddr_t oldblkno,	/* previous block number, 0 if none */
6069	struct buf *nbp)	/* buffer holding allocated page */
6070{
6071	struct inodedep *inodedep;
6072	struct freefrag *freefrag;
6073	struct allocindir *aip;
6074	struct pagedep *pagedep;
6075	struct mount *mp;
6076	struct ufsmount *ump;
6077
6078	mp = ITOVFS(ip);
6079	ump = VFSTOUFS(mp);
6080	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6081	    ("softdep_setup_allocindir_page called on non-softdep filesystem"));
6082	KASSERT(lbn == nbp->b_lblkno,
6083	    ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
6084	    lbn, bp->b_lblkno));
6085	CTR4(KTR_SUJ,
6086	    "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
6087	    "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
6088	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
6089	aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
6090	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6091	/*
6092	 * If we are allocating a directory page, then we must
6093	 * allocate an associated pagedep to track additions and
6094	 * deletions.
6095	 */
6096	if ((ip->i_mode & IFMT) == IFDIR)
6097		pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
6098	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
6099	freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
6100	FREE_LOCK(ump);
6101	if (freefrag)
6102		handle_workitem_freefrag(freefrag);
6103}
6104
6105/*
6106 * Called just before setting an indirect block pointer to a
6107 * newly allocated indirect block.
6108 */
6109void
6110softdep_setup_allocindir_meta(
6111	struct buf *nbp,	/* newly allocated indirect block */
6112	struct inode *ip,	/* inode for file being extended */
6113	struct buf *bp,		/* indirect block referencing allocated block */
6114	int ptrno,		/* offset of pointer in indirect block */
6115	ufs2_daddr_t newblkno)	/* disk block number being added */
6116{
6117	struct inodedep *inodedep;
6118	struct allocindir *aip;
6119	struct ufsmount *ump;
6120	ufs_lbn_t lbn;
6121
6122	ump = ITOUMP(ip);
6123	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
6124	    ("softdep_setup_allocindir_meta called on non-softdep filesystem"));
6125	CTR3(KTR_SUJ,
6126	    "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
6127	    ip->i_number, newblkno, ptrno);
6128	lbn = nbp->b_lblkno;
6129	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
6130	aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
6131	inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep);
6132	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
6133	if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
6134		panic("softdep_setup_allocindir_meta: Block already existed");
6135	FREE_LOCK(ump);
6136}
6137
6138static void
6139indirdep_complete(struct indirdep *indirdep)
6140{
6141	struct allocindir *aip;
6142
6143	LIST_REMOVE(indirdep, ir_next);
6144	indirdep->ir_state |= DEPCOMPLETE;
6145
6146	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
6147		LIST_REMOVE(aip, ai_next);
6148		free_newblk(&aip->ai_block);
6149	}
6150	/*
6151	 * If this indirdep is not attached to a buf it was simply waiting
6152	 * on completion to clear completehd.  free_indirdep() asserts
6153	 * that nothing is dangling.
6154	 */
6155	if ((indirdep->ir_state & ONWORKLIST) == 0)
6156		free_indirdep(indirdep);
6157}
6158
6159static struct indirdep *
6160indirdep_lookup(struct mount *mp,
6161	struct inode *ip,
6162	struct buf *bp)
6163{
6164	struct indirdep *indirdep, *newindirdep;
6165	struct newblk *newblk;
6166	struct ufsmount *ump;
6167	struct worklist *wk;
6168	struct fs *fs;
6169	ufs2_daddr_t blkno;
6170
6171	ump = VFSTOUFS(mp);
6172	LOCK_OWNED(ump);
6173	indirdep = NULL;
6174	newindirdep = NULL;
6175	fs = ump->um_fs;
6176	for (;;) {
6177		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
6178			if (wk->wk_type != D_INDIRDEP)
6179				continue;
6180			indirdep = WK_INDIRDEP(wk);
6181			break;
6182		}
6183		/* Found on the buffer worklist, no new structure to free. */
6184		if (indirdep != NULL && newindirdep == NULL)
6185			return (indirdep);
6186		if (indirdep != NULL && newindirdep != NULL)
6187			panic("indirdep_lookup: simultaneous create");
6188		/* None found on the buffer and a new structure is ready. */
6189		if (indirdep == NULL && newindirdep != NULL)
6190			break;
6191		/* None found and no new structure available. */
6192		FREE_LOCK(ump);
6193		newindirdep = malloc(sizeof(struct indirdep),
6194		    M_INDIRDEP, M_SOFTDEP_FLAGS);
6195		workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
6196		newindirdep->ir_state = ATTACHED;
6197		if (I_IS_UFS1(ip))
6198			newindirdep->ir_state |= UFS1FMT;
6199		TAILQ_INIT(&newindirdep->ir_trunc);
6200		newindirdep->ir_saveddata = NULL;
6201		LIST_INIT(&newindirdep->ir_deplisthd);
6202		LIST_INIT(&newindirdep->ir_donehd);
6203		LIST_INIT(&newindirdep->ir_writehd);
6204		LIST_INIT(&newindirdep->ir_completehd);
6205		if (bp->b_blkno == bp->b_lblkno) {
6206			ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
6207			    NULL, NULL);
6208			bp->b_blkno = blkno;
6209		}
6210		newindirdep->ir_freeblks = NULL;
6211		newindirdep->ir_savebp =
6212		    getblk(ump->um_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
6213		newindirdep->ir_bp = bp;
6214		BUF_KERNPROC(newindirdep->ir_savebp);
6215		bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
6216		ACQUIRE_LOCK(ump);
6217	}
6218	indirdep = newindirdep;
6219	WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
6220	/*
6221	 * If the block is not yet allocated we don't set DEPCOMPLETE so
6222	 * that we don't free dependencies until the pointers are valid.
6223	 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
6224	 * than using the hash.
6225	 */
6226	if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
6227		LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
6228	else
6229		indirdep->ir_state |= DEPCOMPLETE;
6230	return (indirdep);
6231}
6232
6233/*
6234 * Called to finish the allocation of the "aip" allocated
6235 * by one of the two routines above.
6236 */
6237static struct freefrag *
6238setup_allocindir_phase2(
6239	struct buf *bp,		/* in-memory copy of the indirect block */
6240	struct inode *ip,	/* inode for file being extended */
6241	struct inodedep *inodedep, /* Inodedep for ip */
6242	struct allocindir *aip,	/* allocindir allocated by the above routines */
6243	ufs_lbn_t lbn)		/* Logical block number for this block. */
6244{
6245	struct fs *fs __diagused;
6246	struct indirdep *indirdep;
6247	struct allocindir *oldaip;
6248	struct freefrag *freefrag;
6249	struct mount *mp;
6250	struct ufsmount *ump;
6251
6252	mp = ITOVFS(ip);
6253	ump = VFSTOUFS(mp);
6254	LOCK_OWNED(ump);
6255	fs = ump->um_fs;
6256	if (bp->b_lblkno >= 0)
6257		panic("setup_allocindir_phase2: not indir blk");
6258	KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
6259	    ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
6260	indirdep = indirdep_lookup(mp, ip, bp);
6261	KASSERT(indirdep->ir_savebp != NULL,
6262	    ("setup_allocindir_phase2 NULL ir_savebp"));
6263	aip->ai_indirdep = indirdep;
6264	/*
6265	 * Check for an unwritten dependency for this indirect offset.  If
6266	 * there is, merge the old dependency into the new one.  This happens
6267	 * as a result of reallocblk only.
6268	 */
6269	freefrag = NULL;
6270	if (aip->ai_oldblkno != 0) {
6271		LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
6272			if (oldaip->ai_offset == aip->ai_offset) {
6273				freefrag = allocindir_merge(aip, oldaip);
6274				goto done;
6275			}
6276		}
6277		LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
6278			if (oldaip->ai_offset == aip->ai_offset) {
6279				freefrag = allocindir_merge(aip, oldaip);
6280				goto done;
6281			}
6282		}
6283	}
6284done:
6285	LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
6286	return (freefrag);
6287}
6288
6289/*
6290 * Merge two allocindirs which refer to the same block.  Move newblock
6291 * dependencies and setup the freefrags appropriately.
6292 */
6293static struct freefrag *
6294allocindir_merge(
6295	struct allocindir *aip,
6296	struct allocindir *oldaip)
6297{
6298	struct freefrag *freefrag;
6299	struct worklist *wk;
6300
6301	if (oldaip->ai_newblkno != aip->ai_oldblkno)
6302		panic("allocindir_merge: blkno");
6303	aip->ai_oldblkno = oldaip->ai_oldblkno;
6304	freefrag = aip->ai_freefrag;
6305	aip->ai_freefrag = oldaip->ai_freefrag;
6306	oldaip->ai_freefrag = NULL;
6307	KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
6308	/*
6309	 * If we are tracking a new directory-block allocation,
6310	 * move it from the old allocindir to the new allocindir.
6311	 */
6312	if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
6313		WORKLIST_REMOVE(wk);
6314		if (!LIST_EMPTY(&oldaip->ai_newdirblk))
6315			panic("allocindir_merge: extra newdirblk");
6316		WORKLIST_INSERT(&aip->ai_newdirblk, wk);
6317	}
6318	/*
6319	 * We can skip journaling for this freefrag and just complete
6320	 * any pending journal work for the allocindir that is being
6321	 * removed after the freefrag completes.
6322	 */
6323	if (freefrag->ff_jdep)
6324		cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
6325	LIST_REMOVE(oldaip, ai_next);
6326	freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
6327	    &freefrag->ff_list, &freefrag->ff_jwork);
6328	free_newblk(&oldaip->ai_block);
6329
6330	return (freefrag);
6331}
6332
6333static inline void
6334setup_freedirect(
6335	struct freeblks *freeblks,
6336	struct inode *ip,
6337	int i,
6338	int needj)
6339{
6340	struct ufsmount *ump;
6341	ufs2_daddr_t blkno;
6342	int frags;
6343
6344	blkno = DIP(ip, i_db[i]);
6345	if (blkno == 0)
6346		return;
6347	DIP_SET(ip, i_db[i], 0);
6348	ump = ITOUMP(ip);
6349	frags = sblksize(ump->um_fs, ip->i_size, i);
6350	frags = numfrags(ump->um_fs, frags);
6351	newfreework(ump, freeblks, NULL, i, blkno, frags, 0, needj);
6352}
6353
6354static inline void
6355setup_freeext(
6356	struct freeblks *freeblks,
6357	struct inode *ip,
6358	int i,
6359	int needj)
6360{
6361	struct ufsmount *ump;
6362	ufs2_daddr_t blkno;
6363	int frags;
6364
6365	blkno = ip->i_din2->di_extb[i];
6366	if (blkno == 0)
6367		return;
6368	ip->i_din2->di_extb[i] = 0;
6369	ump = ITOUMP(ip);
6370	frags = sblksize(ump->um_fs, ip->i_din2->di_extsize, i);
6371	frags = numfrags(ump->um_fs, frags);
6372	newfreework(ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
6373}
6374
6375static inline void
6376setup_freeindir(
6377	struct freeblks *freeblks,
6378	struct inode *ip,
6379	int i,
6380	ufs_lbn_t lbn,
6381	int needj)
6382{
6383	struct ufsmount *ump;
6384	ufs2_daddr_t blkno;
6385
6386	blkno = DIP(ip, i_ib[i]);
6387	if (blkno == 0)
6388		return;
6389	DIP_SET(ip, i_ib[i], 0);
6390	ump = ITOUMP(ip);
6391	newfreework(ump, freeblks, NULL, lbn, blkno, ump->um_fs->fs_frag,
6392	    0, needj);
6393}
6394
6395static inline struct freeblks *
6396newfreeblks(struct mount *mp, struct inode *ip)
6397{
6398	struct freeblks *freeblks;
6399
6400	freeblks = malloc(sizeof(struct freeblks),
6401		M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
6402	workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
6403	LIST_INIT(&freeblks->fb_jblkdephd);
6404	LIST_INIT(&freeblks->fb_jwork);
6405	freeblks->fb_ref = 0;
6406	freeblks->fb_cgwait = 0;
6407	freeblks->fb_state = ATTACHED;
6408	freeblks->fb_uid = ip->i_uid;
6409	freeblks->fb_inum = ip->i_number;
6410	freeblks->fb_vtype = ITOV(ip)->v_type;
6411	freeblks->fb_modrev = DIP(ip, i_modrev);
6412	freeblks->fb_devvp = ITODEVVP(ip);
6413	freeblks->fb_chkcnt = 0;
6414	freeblks->fb_len = 0;
6415
6416	return (freeblks);
6417}
6418
6419static void
6420trunc_indirdep(
6421	struct indirdep *indirdep,
6422	struct freeblks *freeblks,
6423	struct buf *bp,
6424	int off)
6425{
6426	struct allocindir *aip, *aipn;
6427
6428	/*
6429	 * The first set of allocindirs won't be in savedbp.
6430	 */
6431	LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
6432		if (aip->ai_offset > off)
6433			cancel_allocindir(aip, bp, freeblks, 1);
6434	LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
6435		if (aip->ai_offset > off)
6436			cancel_allocindir(aip, bp, freeblks, 1);
6437	/*
6438	 * These will exist in savedbp.
6439	 */
6440	LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
6441		if (aip->ai_offset > off)
6442			cancel_allocindir(aip, NULL, freeblks, 0);
6443	LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
6444		if (aip->ai_offset > off)
6445			cancel_allocindir(aip, NULL, freeblks, 0);
6446}
6447
6448/*
6449 * Follow the chain of indirects down to lastlbn creating a freework
6450 * structure for each.  This will be used to start indir_trunc() at
6451 * the right offset and create the journal records for the parrtial
6452 * truncation.  A second step will handle the truncated dependencies.
6453 */
6454static int
6455setup_trunc_indir(
6456	struct freeblks *freeblks,
6457	struct inode *ip,
6458	ufs_lbn_t lbn,
6459	ufs_lbn_t lastlbn,
6460	ufs2_daddr_t blkno)
6461{
6462	struct indirdep *indirdep;
6463	struct indirdep *indirn;
6464	struct freework *freework;
6465	struct newblk *newblk;
6466	struct mount *mp;
6467	struct ufsmount *ump;
6468	struct buf *bp;
6469	uint8_t *start;
6470	uint8_t *end;
6471	ufs_lbn_t lbnadd;
6472	int level;
6473	int error;
6474	int off;
6475
6476	freework = NULL;
6477	if (blkno == 0)
6478		return (0);
6479	mp = freeblks->fb_list.wk_mp;
6480	ump = VFSTOUFS(mp);
6481	/*
6482	 * Here, calls to VOP_BMAP() will fail.  However, we already have
6483	 * the on-disk address, so we just pass it to bread() instead of
6484	 * having bread() attempt to calculate it using VOP_BMAP().
6485	 */
6486	error = ffs_breadz(ump, ITOV(ip), lbn, blkptrtodb(ump, blkno),
6487	    (int)mp->mnt_stat.f_iosize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
6488	if (error)
6489		return (error);
6490	level = lbn_level(lbn);
6491	lbnadd = lbn_offset(ump->um_fs, level);
6492	/*
6493	 * Compute the offset of the last block we want to keep.  Store
6494	 * in the freework the first block we want to completely free.
6495	 */
6496	off = (lastlbn - -(lbn + level)) / lbnadd;
6497	if (off + 1 == NINDIR(ump->um_fs))
6498		goto nowork;
6499	freework = newfreework(ump, freeblks, NULL, lbn, blkno, 0, off + 1, 0);
6500	/*
6501	 * Link the freework into the indirdep.  This will prevent any new
6502	 * allocations from proceeding until we are finished with the
6503	 * truncate and the block is written.
6504	 */
6505	ACQUIRE_LOCK(ump);
6506	indirdep = indirdep_lookup(mp, ip, bp);
6507	if (indirdep->ir_freeblks)
6508		panic("setup_trunc_indir: indirdep already truncated.");
6509	TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6510	freework->fw_indir = indirdep;
6511	/*
6512	 * Cancel any allocindirs that will not make it to disk.
6513	 * We have to do this for all copies of the indirdep that
6514	 * live on this newblk.
6515	 */
6516	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6517		if (newblk_lookup(mp, dbtofsb(ump->um_fs, bp->b_blkno), 0,
6518		    &newblk) == 0)
6519			panic("setup_trunc_indir: lost block");
6520		LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6521			trunc_indirdep(indirn, freeblks, bp, off);
6522	} else
6523		trunc_indirdep(indirdep, freeblks, bp, off);
6524	FREE_LOCK(ump);
6525	/*
6526	 * Creation is protected by the buf lock. The saveddata is only
6527	 * needed if a full truncation follows a partial truncation but it
6528	 * is difficult to allocate in that case so we fetch it anyway.
6529	 */
6530	if (indirdep->ir_saveddata == NULL)
6531		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6532		    M_SOFTDEP_FLAGS);
6533nowork:
6534	/* Fetch the blkno of the child and the zero start offset. */
6535	if (I_IS_UFS1(ip)) {
6536		blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6537		start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6538	} else {
6539		blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6540		start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6541	}
6542	if (freework) {
6543		/* Zero the truncated pointers. */
6544		end = bp->b_data + bp->b_bcount;
6545		bzero(start, end - start);
6546		bdwrite(bp);
6547	} else
6548		bqrelse(bp);
6549	if (level == 0)
6550		return (0);
6551	lbn++; /* adjust level */
6552	lbn -= (off * lbnadd);
6553	return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6554}
6555
6556/*
6557 * Complete the partial truncation of an indirect block setup by
6558 * setup_trunc_indir().  This zeros the truncated pointers in the saved
6559 * copy and writes them to disk before the freeblks is allowed to complete.
6560 */
6561static void
6562complete_trunc_indir(struct freework *freework)
6563{
6564	struct freework *fwn;
6565	struct indirdep *indirdep;
6566	struct ufsmount *ump;
6567	struct buf *bp;
6568	uintptr_t start;
6569	int count;
6570
6571	ump = VFSTOUFS(freework->fw_list.wk_mp);
6572	LOCK_OWNED(ump);
6573	indirdep = freework->fw_indir;
6574	for (;;) {
6575		bp = indirdep->ir_bp;
6576		/* See if the block was discarded. */
6577		if (bp == NULL)
6578			break;
6579		/* Inline part of getdirtybuf().  We dont want bremfree. */
6580		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6581			break;
6582		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6583		    LOCK_PTR(ump)) == 0)
6584			BUF_UNLOCK(bp);
6585		ACQUIRE_LOCK(ump);
6586	}
6587	freework->fw_state |= DEPCOMPLETE;
6588	TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6589	/*
6590	 * Zero the pointers in the saved copy.
6591	 */
6592	if (indirdep->ir_state & UFS1FMT)
6593		start = sizeof(ufs1_daddr_t);
6594	else
6595		start = sizeof(ufs2_daddr_t);
6596	start *= freework->fw_start;
6597	count = indirdep->ir_savebp->b_bcount - start;
6598	start += (uintptr_t)indirdep->ir_savebp->b_data;
6599	bzero((char *)start, count);
6600	/*
6601	 * We need to start the next truncation in the list if it has not
6602	 * been started yet.
6603	 */
6604	fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6605	if (fwn != NULL) {
6606		if (fwn->fw_freeblks == indirdep->ir_freeblks)
6607			TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6608		if ((fwn->fw_state & ONWORKLIST) == 0)
6609			freework_enqueue(fwn);
6610	}
6611	/*
6612	 * If bp is NULL the block was fully truncated, restore
6613	 * the saved block list otherwise free it if it is no
6614	 * longer needed.
6615	 */
6616	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6617		if (bp == NULL)
6618			bcopy(indirdep->ir_saveddata,
6619			    indirdep->ir_savebp->b_data,
6620			    indirdep->ir_savebp->b_bcount);
6621		free(indirdep->ir_saveddata, M_INDIRDEP);
6622		indirdep->ir_saveddata = NULL;
6623	}
6624	/*
6625	 * When bp is NULL there is a full truncation pending.  We
6626	 * must wait for this full truncation to be journaled before
6627	 * we can release this freework because the disk pointers will
6628	 * never be written as zero.
6629	 */
6630	if (bp == NULL)  {
6631		if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6632			handle_written_freework(freework);
6633		else
6634			WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6635			   &freework->fw_list);
6636		if (fwn == NULL) {
6637			freework->fw_indir = (void *)0x0000deadbeef0000;
6638			bp = indirdep->ir_savebp;
6639			indirdep->ir_savebp = NULL;
6640			free_indirdep(indirdep);
6641			FREE_LOCK(ump);
6642			brelse(bp);
6643			ACQUIRE_LOCK(ump);
6644		}
6645	} else {
6646		/* Complete when the real copy is written. */
6647		WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6648		BUF_UNLOCK(bp);
6649	}
6650}
6651
6652/*
6653 * Calculate the number of blocks we are going to release where datablocks
6654 * is the current total and length is the new file size.
6655 */
6656static ufs2_daddr_t
6657blkcount(struct fs *fs,
6658	ufs2_daddr_t datablocks,
6659	off_t length)
6660{
6661	off_t totblks, numblks;
6662
6663	totblks = 0;
6664	numblks = howmany(length, fs->fs_bsize);
6665	if (numblks <= UFS_NDADDR) {
6666		totblks = howmany(length, fs->fs_fsize);
6667		goto out;
6668	}
6669        totblks = blkstofrags(fs, numblks);
6670	numblks -= UFS_NDADDR;
6671	/*
6672	 * Count all single, then double, then triple indirects required.
6673	 * Subtracting one indirects worth of blocks for each pass
6674	 * acknowledges one of each pointed to by the inode.
6675	 */
6676	for (;;) {
6677		totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6678		numblks -= NINDIR(fs);
6679		if (numblks <= 0)
6680			break;
6681		numblks = howmany(numblks, NINDIR(fs));
6682	}
6683out:
6684	totblks = fsbtodb(fs, totblks);
6685	/*
6686	 * Handle sparse files.  We can't reclaim more blocks than the inode
6687	 * references.  We will correct it later in handle_complete_freeblks()
6688	 * when we know the real count.
6689	 */
6690	if (totblks > datablocks)
6691		return (0);
6692	return (datablocks - totblks);
6693}
6694
6695/*
6696 * Handle freeblocks for journaled softupdate filesystems.
6697 *
6698 * Contrary to normal softupdates, we must preserve the block pointers in
6699 * indirects until their subordinates are free.  This is to avoid journaling
6700 * every block that is freed which may consume more space than the journal
6701 * itself.  The recovery program will see the free block journals at the
6702 * base of the truncated area and traverse them to reclaim space.  The
6703 * pointers in the inode may be cleared immediately after the journal
6704 * records are written because each direct and indirect pointer in the
6705 * inode is recorded in a journal.  This permits full truncation to proceed
6706 * asynchronously.  The write order is journal -> inode -> cgs -> indirects.
6707 *
6708 * The algorithm is as follows:
6709 * 1) Traverse the in-memory state and create journal entries to release
6710 *    the relevant blocks and full indirect trees.
6711 * 2) Traverse the indirect block chain adding partial truncation freework
6712 *    records to indirects in the path to lastlbn.  The freework will
6713 *    prevent new allocation dependencies from being satisfied in this
6714 *    indirect until the truncation completes.
6715 * 3) Read and lock the inode block, performing an update with the new size
6716 *    and pointers.  This prevents truncated data from becoming valid on
6717 *    disk through step 4.
6718 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6719 *    eliminate journal work for those records that do not require it.
6720 * 5) Schedule the journal records to be written followed by the inode block.
6721 * 6) Allocate any necessary frags for the end of file.
6722 * 7) Zero any partially truncated blocks.
6723 *
6724 * From this truncation proceeds asynchronously using the freework and
6725 * indir_trunc machinery.  The file will not be extended again into a
6726 * partially truncated indirect block until all work is completed but
6727 * the normal dependency mechanism ensures that it is rolled back/forward
6728 * as appropriate.  Further truncation may occur without delay and is
6729 * serialized in indir_trunc().
6730 */
6731void
6732softdep_journal_freeblocks(
6733	struct inode *ip,	/* The inode whose length is to be reduced */
6734	struct ucred *cred,
6735	off_t length,		/* The new length for the file */
6736	int flags)		/* IO_EXT and/or IO_NORMAL */
6737{
6738	struct freeblks *freeblks, *fbn;
6739	struct worklist *wk, *wkn;
6740	struct inodedep *inodedep;
6741	struct jblkdep *jblkdep;
6742	struct allocdirect *adp, *adpn;
6743	struct ufsmount *ump;
6744	struct fs *fs;
6745	struct buf *bp;
6746	struct vnode *vp;
6747	struct mount *mp;
6748	daddr_t dbn;
6749	ufs2_daddr_t extblocks, datablocks;
6750	ufs_lbn_t tmpval, lbn, lastlbn;
6751	int frags, lastoff, iboff, allocblock, needj, error, i;
6752
6753	ump = ITOUMP(ip);
6754	mp = UFSTOVFS(ump);
6755	fs = ump->um_fs;
6756	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6757	    ("softdep_journal_freeblocks called on non-softdep filesystem"));
6758	vp = ITOV(ip);
6759	needj = 1;
6760	iboff = -1;
6761	allocblock = 0;
6762	extblocks = 0;
6763	datablocks = 0;
6764	frags = 0;
6765	freeblks = newfreeblks(mp, ip);
6766	ACQUIRE_LOCK(ump);
6767	/*
6768	 * If we're truncating a removed file that will never be written
6769	 * we don't need to journal the block frees.  The canceled journals
6770	 * for the allocations will suffice.
6771	 */
6772	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6773	if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6774	    length == 0)
6775		needj = 0;
6776	CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6777	    ip->i_number, length, needj);
6778	FREE_LOCK(ump);
6779	/*
6780	 * Calculate the lbn that we are truncating to.  This results in -1
6781	 * if we're truncating the 0 bytes.  So it is the last lbn we want
6782	 * to keep, not the first lbn we want to truncate.
6783	 */
6784	lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6785	lastoff = blkoff(fs, length);
6786	/*
6787	 * Compute frags we are keeping in lastlbn.  0 means all.
6788	 */
6789	if (lastlbn >= 0 && lastlbn < UFS_NDADDR) {
6790		frags = fragroundup(fs, lastoff);
6791		/* adp offset of last valid allocdirect. */
6792		iboff = lastlbn;
6793	} else if (lastlbn > 0)
6794		iboff = UFS_NDADDR;
6795	if (fs->fs_magic == FS_UFS2_MAGIC)
6796		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6797	/*
6798	 * Handle normal data blocks and indirects.  This section saves
6799	 * values used after the inode update to complete frag and indirect
6800	 * truncation.
6801	 */
6802	if ((flags & IO_NORMAL) != 0) {
6803		/*
6804		 * Handle truncation of whole direct and indirect blocks.
6805		 */
6806		for (i = iboff + 1; i < UFS_NDADDR; i++)
6807			setup_freedirect(freeblks, ip, i, needj);
6808		for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR;
6809		    i < UFS_NIADDR;
6810		    i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6811			/* Release a whole indirect tree. */
6812			if (lbn > lastlbn) {
6813				setup_freeindir(freeblks, ip, i, -lbn -i,
6814				    needj);
6815				continue;
6816			}
6817			iboff = i + UFS_NDADDR;
6818			/*
6819			 * Traverse partially truncated indirect tree.
6820			 */
6821			if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6822				setup_trunc_indir(freeblks, ip, -lbn - i,
6823				    lastlbn, DIP(ip, i_ib[i]));
6824		}
6825		/*
6826		 * Handle partial truncation to a frag boundary.
6827		 */
6828		if (frags) {
6829			ufs2_daddr_t blkno;
6830			long oldfrags;
6831
6832			oldfrags = blksize(fs, ip, lastlbn);
6833			blkno = DIP(ip, i_db[lastlbn]);
6834			if (blkno && oldfrags != frags) {
6835				oldfrags -= frags;
6836				oldfrags = numfrags(fs, oldfrags);
6837				blkno += numfrags(fs, frags);
6838				newfreework(ump, freeblks, NULL, lastlbn,
6839				    blkno, oldfrags, 0, needj);
6840				if (needj)
6841					adjust_newfreework(freeblks,
6842					    numfrags(fs, frags));
6843			} else if (blkno == 0)
6844				allocblock = 1;
6845		}
6846		/*
6847		 * Add a journal record for partial truncate if we are
6848		 * handling indirect blocks.  Non-indirects need no extra
6849		 * journaling.
6850		 */
6851		if (length != 0 && lastlbn >= UFS_NDADDR) {
6852			UFS_INODE_SET_FLAG(ip, IN_TRUNCATED);
6853			newjtrunc(freeblks, length, 0);
6854		}
6855		ip->i_size = length;
6856		DIP_SET(ip, i_size, ip->i_size);
6857		UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
6858		datablocks = DIP(ip, i_blocks) - extblocks;
6859		if (length != 0)
6860			datablocks = blkcount(fs, datablocks, length);
6861		freeblks->fb_len = length;
6862	}
6863	if ((flags & IO_EXT) != 0) {
6864		for (i = 0; i < UFS_NXADDR; i++)
6865			setup_freeext(freeblks, ip, i, needj);
6866		ip->i_din2->di_extsize = 0;
6867		datablocks += extblocks;
6868		UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
6869	}
6870#ifdef QUOTA
6871	/* Reference the quotas in case the block count is wrong in the end. */
6872	quotaref(vp, freeblks->fb_quota);
6873	(void) chkdq(ip, -datablocks, NOCRED, FORCE);
6874#endif
6875	freeblks->fb_chkcnt = -datablocks;
6876	UFS_LOCK(ump);
6877	fs->fs_pendingblocks += datablocks;
6878	UFS_UNLOCK(ump);
6879	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6880	/*
6881	 * Handle truncation of incomplete alloc direct dependencies.  We
6882	 * hold the inode block locked to prevent incomplete dependencies
6883	 * from reaching the disk while we are eliminating those that
6884	 * have been truncated.  This is a partially inlined ffs_update().
6885	 */
6886	ufs_itimes(vp);
6887	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6888	dbn = fsbtodb(fs, ino_to_fsba(fs, ip->i_number));
6889	error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize,
6890	    NULL, NULL, 0, cred, 0, NULL, &bp);
6891	if (error) {
6892		softdep_error("softdep_journal_freeblocks", error);
6893		return;
6894	}
6895	if (bp->b_bufsize == fs->fs_bsize)
6896		bp->b_flags |= B_CLUSTEROK;
6897	softdep_update_inodeblock(ip, bp, 0);
6898	if (ump->um_fstype == UFS1) {
6899		*((struct ufs1_dinode *)bp->b_data +
6900		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6901	} else {
6902		ffs_update_dinode_ckhash(fs, ip->i_din2);
6903		*((struct ufs2_dinode *)bp->b_data +
6904		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6905	}
6906	ACQUIRE_LOCK(ump);
6907	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
6908	if ((inodedep->id_state & IOSTARTED) != 0)
6909		panic("softdep_setup_freeblocks: inode busy");
6910	/*
6911	 * Add the freeblks structure to the list of operations that
6912	 * must await the zero'ed inode being written to disk. If we
6913	 * still have a bitmap dependency (needj), then the inode
6914	 * has never been written to disk, so we can process the
6915	 * freeblks below once we have deleted the dependencies.
6916	 */
6917	if (needj)
6918		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6919	else
6920		freeblks->fb_state |= COMPLETE;
6921	if ((flags & IO_NORMAL) != 0) {
6922		TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6923			if (adp->ad_offset > iboff)
6924				cancel_allocdirect(&inodedep->id_inoupdt, adp,
6925				    freeblks);
6926			/*
6927			 * Truncate the allocdirect.  We could eliminate
6928			 * or modify journal records as well.
6929			 */
6930			else if (adp->ad_offset == iboff && frags)
6931				adp->ad_newsize = frags;
6932		}
6933	}
6934	if ((flags & IO_EXT) != 0)
6935		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
6936			cancel_allocdirect(&inodedep->id_extupdt, adp,
6937			    freeblks);
6938	/*
6939	 * Scan the bufwait list for newblock dependencies that will never
6940	 * make it to disk.
6941	 */
6942	LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6943		if (wk->wk_type != D_ALLOCDIRECT)
6944			continue;
6945		adp = WK_ALLOCDIRECT(wk);
6946		if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6947		    ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6948			cancel_jfreeblk(freeblks, adp->ad_newblkno);
6949			cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6950			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6951		}
6952	}
6953	/*
6954	 * Add journal work.
6955	 */
6956	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6957		add_to_journal(&jblkdep->jb_list);
6958	FREE_LOCK(ump);
6959	bdwrite(bp);
6960	/*
6961	 * Truncate dependency structures beyond length.
6962	 */
6963	trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6964	/*
6965	 * This is only set when we need to allocate a fragment because
6966	 * none existed at the end of a frag-sized file.  It handles only
6967	 * allocating a new, zero filled block.
6968	 */
6969	if (allocblock) {
6970		ip->i_size = length - lastoff;
6971		DIP_SET(ip, i_size, ip->i_size);
6972		error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6973		if (error != 0) {
6974			softdep_error("softdep_journal_freeblks", error);
6975			return;
6976		}
6977		ip->i_size = length;
6978		DIP_SET(ip, i_size, length);
6979		UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE | IN_UPDATE);
6980		allocbuf(bp, frags);
6981		ffs_update(vp, 0);
6982		bawrite(bp);
6983	} else if (lastoff != 0 && vp->v_type != VDIR) {
6984		int size;
6985
6986		/*
6987		 * Zero the end of a truncated frag or block.
6988		 */
6989		size = sblksize(fs, length, lastlbn);
6990		error = bread(vp, lastlbn, size, cred, &bp);
6991		if (error == 0) {
6992			bzero((char *)bp->b_data + lastoff, size - lastoff);
6993			bawrite(bp);
6994		} else if (!ffs_fsfail_cleanup(ump, error)) {
6995			softdep_error("softdep_journal_freeblks", error);
6996			return;
6997		}
6998	}
6999	ACQUIRE_LOCK(ump);
7000	inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
7001	TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
7002	freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
7003	/*
7004	 * We zero earlier truncations so they don't erroneously
7005	 * update i_blocks.
7006	 */
7007	if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
7008		TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
7009			fbn->fb_len = 0;
7010	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
7011	    LIST_EMPTY(&freeblks->fb_jblkdephd))
7012		freeblks->fb_state |= INPROGRESS;
7013	else
7014		freeblks = NULL;
7015	FREE_LOCK(ump);
7016	if (freeblks)
7017		handle_workitem_freeblocks(freeblks, 0);
7018	trunc_pages(ip, length, extblocks, flags);
7019
7020}
7021
7022/*
7023 * Flush a JOP_SYNC to the journal.
7024 */
7025void
7026softdep_journal_fsync(struct inode *ip)
7027{
7028	struct jfsync *jfsync;
7029	struct ufsmount *ump;
7030
7031	ump = ITOUMP(ip);
7032	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7033	    ("softdep_journal_fsync called on non-softdep filesystem"));
7034	if ((ip->i_flag & IN_TRUNCATED) == 0)
7035		return;
7036	ip->i_flag &= ~IN_TRUNCATED;
7037	jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
7038	workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ump));
7039	jfsync->jfs_size = ip->i_size;
7040	jfsync->jfs_ino = ip->i_number;
7041	ACQUIRE_LOCK(ump);
7042	add_to_journal(&jfsync->jfs_list);
7043	jwait(&jfsync->jfs_list, MNT_WAIT);
7044	FREE_LOCK(ump);
7045}
7046
7047/*
7048 * Block de-allocation dependencies.
7049 *
7050 * When blocks are de-allocated, the on-disk pointers must be nullified before
7051 * the blocks are made available for use by other files.  (The true
7052 * requirement is that old pointers must be nullified before new on-disk
7053 * pointers are set.  We chose this slightly more stringent requirement to
7054 * reduce complexity.) Our implementation handles this dependency by updating
7055 * the inode (or indirect block) appropriately but delaying the actual block
7056 * de-allocation (i.e., freemap and free space count manipulation) until
7057 * after the updated versions reach stable storage.  After the disk is
7058 * updated, the blocks can be safely de-allocated whenever it is convenient.
7059 * This implementation handles only the common case of reducing a file's
7060 * length to zero. Other cases are handled by the conventional synchronous
7061 * write approach.
7062 *
7063 * The ffs implementation with which we worked double-checks
7064 * the state of the block pointers and file size as it reduces
7065 * a file's length.  Some of this code is replicated here in our
7066 * soft updates implementation.  The freeblks->fb_chkcnt field is
7067 * used to transfer a part of this information to the procedure
7068 * that eventually de-allocates the blocks.
7069 *
7070 * This routine should be called from the routine that shortens
7071 * a file's length, before the inode's size or block pointers
7072 * are modified. It will save the block pointer information for
7073 * later release and zero the inode so that the calling routine
7074 * can release it.
7075 */
7076void
7077softdep_setup_freeblocks(
7078	struct inode *ip,	/* The inode whose length is to be reduced */
7079	off_t length,		/* The new length for the file */
7080	int flags)		/* IO_EXT and/or IO_NORMAL */
7081{
7082	struct ufs1_dinode *dp1;
7083	struct ufs2_dinode *dp2;
7084	struct freeblks *freeblks;
7085	struct inodedep *inodedep;
7086	struct allocdirect *adp;
7087	struct ufsmount *ump;
7088	struct buf *bp;
7089	struct fs *fs;
7090	ufs2_daddr_t extblocks, datablocks;
7091	struct mount *mp;
7092	int i, delay, error;
7093	ufs_lbn_t tmpval;
7094	ufs_lbn_t lbn;
7095
7096	ump = ITOUMP(ip);
7097	mp = UFSTOVFS(ump);
7098	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
7099	    ("softdep_setup_freeblocks called on non-softdep filesystem"));
7100	CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
7101	    ip->i_number, length);
7102	KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length"));
7103	fs = ump->um_fs;
7104	if ((error = bread(ump->um_devvp,
7105	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
7106	    (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
7107		if (!ffs_fsfail_cleanup(ump, error))
7108			softdep_error("softdep_setup_freeblocks", error);
7109		return;
7110	}
7111	freeblks = newfreeblks(mp, ip);
7112	extblocks = 0;
7113	datablocks = 0;
7114	if (fs->fs_magic == FS_UFS2_MAGIC)
7115		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
7116	if ((flags & IO_NORMAL) != 0) {
7117		for (i = 0; i < UFS_NDADDR; i++)
7118			setup_freedirect(freeblks, ip, i, 0);
7119		for (i = 0, tmpval = NINDIR(fs), lbn = UFS_NDADDR;
7120		    i < UFS_NIADDR;
7121		    i++, lbn += tmpval, tmpval *= NINDIR(fs))
7122			setup_freeindir(freeblks, ip, i, -lbn -i, 0);
7123		ip->i_size = 0;
7124		DIP_SET(ip, i_size, 0);
7125		UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
7126		datablocks = DIP(ip, i_blocks) - extblocks;
7127	}
7128	if ((flags & IO_EXT) != 0) {
7129		for (i = 0; i < UFS_NXADDR; i++)
7130			setup_freeext(freeblks, ip, i, 0);
7131		ip->i_din2->di_extsize = 0;
7132		datablocks += extblocks;
7133		UFS_INODE_SET_FLAG(ip, IN_SIZEMOD | IN_CHANGE);
7134	}
7135#ifdef QUOTA
7136	/* Reference the quotas in case the block count is wrong in the end. */
7137	quotaref(ITOV(ip), freeblks->fb_quota);
7138	(void) chkdq(ip, -datablocks, NOCRED, FORCE);
7139#endif
7140	freeblks->fb_chkcnt = -datablocks;
7141	UFS_LOCK(ump);
7142	fs->fs_pendingblocks += datablocks;
7143	UFS_UNLOCK(ump);
7144	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
7145	/*
7146	 * Push the zero'ed inode to its disk buffer so that we are free
7147	 * to delete its dependencies below. Once the dependencies are gone
7148	 * the buffer can be safely released.
7149	 */
7150	if (ump->um_fstype == UFS1) {
7151		dp1 = ((struct ufs1_dinode *)bp->b_data +
7152		    ino_to_fsbo(fs, ip->i_number));
7153		ip->i_din1->di_freelink = dp1->di_freelink;
7154		*dp1 = *ip->i_din1;
7155	} else {
7156		dp2 = ((struct ufs2_dinode *)bp->b_data +
7157		    ino_to_fsbo(fs, ip->i_number));
7158		ip->i_din2->di_freelink = dp2->di_freelink;
7159		ffs_update_dinode_ckhash(fs, ip->i_din2);
7160		*dp2 = *ip->i_din2;
7161	}
7162	/*
7163	 * Find and eliminate any inode dependencies.
7164	 */
7165	ACQUIRE_LOCK(ump);
7166	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
7167	if ((inodedep->id_state & IOSTARTED) != 0)
7168		panic("softdep_setup_freeblocks: inode busy");
7169	/*
7170	 * Add the freeblks structure to the list of operations that
7171	 * must await the zero'ed inode being written to disk. If we
7172	 * still have a bitmap dependency (delay == 0), then the inode
7173	 * has never been written to disk, so we can process the
7174	 * freeblks below once we have deleted the dependencies.
7175	 */
7176	delay = (inodedep->id_state & DEPCOMPLETE);
7177	if (delay)
7178		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
7179	else
7180		freeblks->fb_state |= COMPLETE;
7181	/*
7182	 * Because the file length has been truncated to zero, any
7183	 * pending block allocation dependency structures associated
7184	 * with this inode are obsolete and can simply be de-allocated.
7185	 * We must first merge the two dependency lists to get rid of
7186	 * any duplicate freefrag structures, then purge the merged list.
7187	 * If we still have a bitmap dependency, then the inode has never
7188	 * been written to disk, so we can free any fragments without delay.
7189	 */
7190	if (flags & IO_NORMAL) {
7191		merge_inode_lists(&inodedep->id_newinoupdt,
7192		    &inodedep->id_inoupdt);
7193		while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
7194			cancel_allocdirect(&inodedep->id_inoupdt, adp,
7195			    freeblks);
7196	}
7197	if (flags & IO_EXT) {
7198		merge_inode_lists(&inodedep->id_newextupdt,
7199		    &inodedep->id_extupdt);
7200		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
7201			cancel_allocdirect(&inodedep->id_extupdt, adp,
7202			    freeblks);
7203	}
7204	FREE_LOCK(ump);
7205	bdwrite(bp);
7206	trunc_dependencies(ip, freeblks, -1, 0, flags);
7207	ACQUIRE_LOCK(ump);
7208	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
7209		(void) free_inodedep(inodedep);
7210	freeblks->fb_state |= DEPCOMPLETE;
7211	/*
7212	 * If the inode with zeroed block pointers is now on disk
7213	 * we can start freeing blocks.
7214	 */
7215	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
7216		freeblks->fb_state |= INPROGRESS;
7217	else
7218		freeblks = NULL;
7219	FREE_LOCK(ump);
7220	if (freeblks)
7221		handle_workitem_freeblocks(freeblks, 0);
7222	trunc_pages(ip, length, extblocks, flags);
7223}
7224
7225/*
7226 * Eliminate pages from the page cache that back parts of this inode and
7227 * adjust the vnode pager's idea of our size.  This prevents stale data
7228 * from hanging around in the page cache.
7229 */
7230static void
7231trunc_pages(
7232	struct inode *ip,
7233	off_t length,
7234	ufs2_daddr_t extblocks,
7235	int flags)
7236{
7237	struct vnode *vp;
7238	struct fs *fs;
7239	ufs_lbn_t lbn;
7240	off_t end, extend;
7241
7242	vp = ITOV(ip);
7243	fs = ITOFS(ip);
7244	extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
7245	if ((flags & IO_EXT) != 0)
7246		vn_pages_remove(vp, extend, 0);
7247	if ((flags & IO_NORMAL) == 0)
7248		return;
7249	BO_LOCK(&vp->v_bufobj);
7250	drain_output(vp);
7251	BO_UNLOCK(&vp->v_bufobj);
7252	/*
7253	 * The vnode pager eliminates file pages we eliminate indirects
7254	 * below.
7255	 */
7256	vnode_pager_setsize(vp, length);
7257	/*
7258	 * Calculate the end based on the last indirect we want to keep.  If
7259	 * the block extends into indirects we can just use the negative of
7260	 * its lbn.  Doubles and triples exist at lower numbers so we must
7261	 * be careful not to remove those, if they exist.  double and triple
7262	 * indirect lbns do not overlap with others so it is not important
7263	 * to verify how many levels are required.
7264	 */
7265	lbn = lblkno(fs, length);
7266	if (lbn >= UFS_NDADDR) {
7267		/* Calculate the virtual lbn of the triple indirect. */
7268		lbn = -lbn - (UFS_NIADDR - 1);
7269		end = OFF_TO_IDX(lblktosize(fs, lbn));
7270	} else
7271		end = extend;
7272	vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
7273}
7274
7275/*
7276 * See if the buf bp is in the range eliminated by truncation.
7277 */
7278static int
7279trunc_check_buf(
7280	struct buf *bp,
7281	int *blkoffp,
7282	ufs_lbn_t lastlbn,
7283	int lastoff,
7284	int flags)
7285{
7286	ufs_lbn_t lbn;
7287
7288	*blkoffp = 0;
7289	/* Only match ext/normal blocks as appropriate. */
7290	if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
7291	    ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
7292		return (0);
7293	/* ALTDATA is always a full truncation. */
7294	if ((bp->b_xflags & BX_ALTDATA) != 0)
7295		return (1);
7296	/* -1 is full truncation. */
7297	if (lastlbn == -1)
7298		return (1);
7299	/*
7300	 * If this is a partial truncate we only want those
7301	 * blocks and indirect blocks that cover the range
7302	 * we're after.
7303	 */
7304	lbn = bp->b_lblkno;
7305	if (lbn < 0)
7306		lbn = -(lbn + lbn_level(lbn));
7307	if (lbn < lastlbn)
7308		return (0);
7309	/* Here we only truncate lblkno if it's partial. */
7310	if (lbn == lastlbn) {
7311		if (lastoff == 0)
7312			return (0);
7313		*blkoffp = lastoff;
7314	}
7315	return (1);
7316}
7317
7318/*
7319 * Eliminate any dependencies that exist in memory beyond lblkno:off
7320 */
7321static void
7322trunc_dependencies(
7323	struct inode *ip,
7324	struct freeblks *freeblks,
7325	ufs_lbn_t lastlbn,
7326	int lastoff,
7327	int flags)
7328{
7329	struct bufobj *bo;
7330	struct vnode *vp;
7331	struct buf *bp;
7332	int blkoff;
7333
7334	/*
7335	 * We must wait for any I/O in progress to finish so that
7336	 * all potential buffers on the dirty list will be visible.
7337	 * Once they are all there, walk the list and get rid of
7338	 * any dependencies.
7339	 */
7340	vp = ITOV(ip);
7341	bo = &vp->v_bufobj;
7342	BO_LOCK(bo);
7343	drain_output(vp);
7344	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
7345		bp->b_vflags &= ~BV_SCANNED;
7346restart:
7347	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
7348		if (bp->b_vflags & BV_SCANNED)
7349			continue;
7350		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7351			bp->b_vflags |= BV_SCANNED;
7352			continue;
7353		}
7354		KASSERT(bp->b_bufobj == bo, ("Wrong object in buffer"));
7355		if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL)
7356			goto restart;
7357		BO_UNLOCK(bo);
7358		if (deallocate_dependencies(bp, freeblks, blkoff))
7359			bqrelse(bp);
7360		else
7361			brelse(bp);
7362		BO_LOCK(bo);
7363		goto restart;
7364	}
7365	/*
7366	 * Now do the work of vtruncbuf while also matching indirect blocks.
7367	 */
7368	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
7369		bp->b_vflags &= ~BV_SCANNED;
7370cleanrestart:
7371	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
7372		if (bp->b_vflags & BV_SCANNED)
7373			continue;
7374		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
7375			bp->b_vflags |= BV_SCANNED;
7376			continue;
7377		}
7378		if (BUF_LOCK(bp,
7379		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
7380		    BO_LOCKPTR(bo)) == ENOLCK) {
7381			BO_LOCK(bo);
7382			goto cleanrestart;
7383		}
7384		BO_LOCK(bo);
7385		bp->b_vflags |= BV_SCANNED;
7386		BO_UNLOCK(bo);
7387		bremfree(bp);
7388		if (blkoff != 0) {
7389			allocbuf(bp, blkoff);
7390			bqrelse(bp);
7391		} else {
7392			bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
7393			brelse(bp);
7394		}
7395		BO_LOCK(bo);
7396		goto cleanrestart;
7397	}
7398	drain_output(vp);
7399	BO_UNLOCK(bo);
7400}
7401
7402static int
7403cancel_pagedep(
7404	struct pagedep *pagedep,
7405	struct freeblks *freeblks,
7406	int blkoff)
7407{
7408	struct jremref *jremref;
7409	struct jmvref *jmvref;
7410	struct dirrem *dirrem, *tmp;
7411	int i;
7412
7413	/*
7414	 * Copy any directory remove dependencies to the list
7415	 * to be processed after the freeblks proceeds.  If
7416	 * directory entry never made it to disk they
7417	 * can be dumped directly onto the work list.
7418	 */
7419	LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
7420		/* Skip this directory removal if it is intended to remain. */
7421		if (dirrem->dm_offset < blkoff)
7422			continue;
7423		/*
7424		 * If there are any dirrems we wait for the journal write
7425		 * to complete and then restart the buf scan as the lock
7426		 * has been dropped.
7427		 */
7428		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
7429			jwait(&jremref->jr_list, MNT_WAIT);
7430			return (ERESTART);
7431		}
7432		LIST_REMOVE(dirrem, dm_next);
7433		dirrem->dm_dirinum = pagedep->pd_ino;
7434		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
7435	}
7436	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
7437		jwait(&jmvref->jm_list, MNT_WAIT);
7438		return (ERESTART);
7439	}
7440	/*
7441	 * When we're partially truncating a pagedep we just want to flush
7442	 * journal entries and return.  There can not be any adds in the
7443	 * truncated portion of the directory and newblk must remain if
7444	 * part of the block remains.
7445	 */
7446	if (blkoff != 0) {
7447		struct diradd *dap;
7448
7449		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
7450			if (dap->da_offset > blkoff)
7451				panic("cancel_pagedep: diradd %p off %d > %d",
7452				    dap, dap->da_offset, blkoff);
7453		for (i = 0; i < DAHASHSZ; i++)
7454			LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
7455				if (dap->da_offset > blkoff)
7456					panic("cancel_pagedep: diradd %p off %d > %d",
7457					    dap, dap->da_offset, blkoff);
7458		return (0);
7459	}
7460	/*
7461	 * There should be no directory add dependencies present
7462	 * as the directory could not be truncated until all
7463	 * children were removed.
7464	 */
7465	KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
7466	    ("deallocate_dependencies: pendinghd != NULL"));
7467	for (i = 0; i < DAHASHSZ; i++)
7468		KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
7469		    ("deallocate_dependencies: diraddhd != NULL"));
7470	if ((pagedep->pd_state & NEWBLOCK) != 0)
7471		free_newdirblk(pagedep->pd_newdirblk);
7472	if (free_pagedep(pagedep) == 0)
7473		panic("Failed to free pagedep %p", pagedep);
7474	return (0);
7475}
7476
7477/*
7478 * Reclaim any dependency structures from a buffer that is about to
7479 * be reallocated to a new vnode. The buffer must be locked, thus,
7480 * no I/O completion operations can occur while we are manipulating
7481 * its associated dependencies. The mutex is held so that other I/O's
7482 * associated with related dependencies do not occur.
7483 */
7484static int
7485deallocate_dependencies(
7486	struct buf *bp,
7487	struct freeblks *freeblks,
7488	int off)
7489{
7490	struct indirdep *indirdep;
7491	struct pagedep *pagedep;
7492	struct worklist *wk, *wkn;
7493	struct ufsmount *ump;
7494
7495	ump = softdep_bp_to_mp(bp);
7496	if (ump == NULL)
7497		goto done;
7498	ACQUIRE_LOCK(ump);
7499	LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7500		switch (wk->wk_type) {
7501		case D_INDIRDEP:
7502			indirdep = WK_INDIRDEP(wk);
7503			if (bp->b_lblkno >= 0 ||
7504			    bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7505				panic("deallocate_dependencies: not indir");
7506			cancel_indirdep(indirdep, bp, freeblks);
7507			continue;
7508
7509		case D_PAGEDEP:
7510			pagedep = WK_PAGEDEP(wk);
7511			if (cancel_pagedep(pagedep, freeblks, off)) {
7512				FREE_LOCK(ump);
7513				return (ERESTART);
7514			}
7515			continue;
7516
7517		case D_ALLOCINDIR:
7518			/*
7519			 * Simply remove the allocindir, we'll find it via
7520			 * the indirdep where we can clear pointers if
7521			 * needed.
7522			 */
7523			WORKLIST_REMOVE(wk);
7524			continue;
7525
7526		case D_FREEWORK:
7527			/*
7528			 * A truncation is waiting for the zero'd pointers
7529			 * to be written.  It can be freed when the freeblks
7530			 * is journaled.
7531			 */
7532			WORKLIST_REMOVE(wk);
7533			wk->wk_state |= ONDEPLIST;
7534			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7535			break;
7536
7537		case D_ALLOCDIRECT:
7538			if (off != 0)
7539				continue;
7540			/* FALLTHROUGH */
7541		default:
7542			panic("deallocate_dependencies: Unexpected type %s",
7543			    TYPENAME(wk->wk_type));
7544			/* NOTREACHED */
7545		}
7546	}
7547	FREE_LOCK(ump);
7548done:
7549	/*
7550	 * Don't throw away this buf, we were partially truncating and
7551	 * some deps may always remain.
7552	 */
7553	if (off) {
7554		allocbuf(bp, off);
7555		bp->b_vflags |= BV_SCANNED;
7556		return (EBUSY);
7557	}
7558	bp->b_flags |= B_INVAL | B_NOCACHE;
7559
7560	return (0);
7561}
7562
7563/*
7564 * An allocdirect is being canceled due to a truncate.  We must make sure
7565 * the journal entry is released in concert with the blkfree that releases
7566 * the storage.  Completed journal entries must not be released until the
7567 * space is no longer pointed to by the inode or in the bitmap.
7568 */
7569static void
7570cancel_allocdirect(
7571	struct allocdirectlst *adphead,
7572	struct allocdirect *adp,
7573	struct freeblks *freeblks)
7574{
7575	struct freework *freework;
7576	struct newblk *newblk;
7577	struct worklist *wk;
7578
7579	TAILQ_REMOVE(adphead, adp, ad_next);
7580	newblk = (struct newblk *)adp;
7581	freework = NULL;
7582	/*
7583	 * Find the correct freework structure.
7584	 */
7585	LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7586		if (wk->wk_type != D_FREEWORK)
7587			continue;
7588		freework = WK_FREEWORK(wk);
7589		if (freework->fw_blkno == newblk->nb_newblkno)
7590			break;
7591	}
7592	if (freework == NULL)
7593		panic("cancel_allocdirect: Freework not found");
7594	/*
7595	 * If a newblk exists at all we still have the journal entry that
7596	 * initiated the allocation so we do not need to journal the free.
7597	 */
7598	cancel_jfreeblk(freeblks, freework->fw_blkno);
7599	/*
7600	 * If the journal hasn't been written the jnewblk must be passed
7601	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
7602	 * this by linking the journal dependency into the freework to be
7603	 * freed when freework_freeblock() is called.  If the journal has
7604	 * been written we can simply reclaim the journal space when the
7605	 * freeblks work is complete.
7606	 */
7607	freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7608	    &freeblks->fb_jwork);
7609	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7610}
7611
7612/*
7613 * Cancel a new block allocation.  May be an indirect or direct block.  We
7614 * remove it from various lists and return any journal record that needs to
7615 * be resolved by the caller.
7616 *
7617 * A special consideration is made for indirects which were never pointed
7618 * at on disk and will never be found once this block is released.
7619 */
7620static struct jnewblk *
7621cancel_newblk(
7622	struct newblk *newblk,
7623	struct worklist *wk,
7624	struct workhead *wkhd)
7625{
7626	struct jnewblk *jnewblk;
7627
7628	CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7629
7630	newblk->nb_state |= GOINGAWAY;
7631	/*
7632	 * Previously we traversed the completedhd on each indirdep
7633	 * attached to this newblk to cancel them and gather journal
7634	 * work.  Since we need only the oldest journal segment and
7635	 * the lowest point on the tree will always have the oldest
7636	 * journal segment we are free to release the segments
7637	 * of any subordinates and may leave the indirdep list to
7638	 * indirdep_complete() when this newblk is freed.
7639	 */
7640	if (newblk->nb_state & ONDEPLIST) {
7641		newblk->nb_state &= ~ONDEPLIST;
7642		LIST_REMOVE(newblk, nb_deps);
7643	}
7644	if (newblk->nb_state & ONWORKLIST)
7645		WORKLIST_REMOVE(&newblk->nb_list);
7646	/*
7647	 * If the journal entry hasn't been written we save a pointer to
7648	 * the dependency that frees it until it is written or the
7649	 * superseding operation completes.
7650	 */
7651	jnewblk = newblk->nb_jnewblk;
7652	if (jnewblk != NULL && wk != NULL) {
7653		newblk->nb_jnewblk = NULL;
7654		jnewblk->jn_dep = wk;
7655	}
7656	if (!LIST_EMPTY(&newblk->nb_jwork))
7657		jwork_move(wkhd, &newblk->nb_jwork);
7658	/*
7659	 * When truncating we must free the newdirblk early to remove
7660	 * the pagedep from the hash before returning.
7661	 */
7662	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7663		free_newdirblk(WK_NEWDIRBLK(wk));
7664	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7665		panic("cancel_newblk: extra newdirblk");
7666
7667	return (jnewblk);
7668}
7669
7670/*
7671 * Schedule the freefrag associated with a newblk to be released once
7672 * the pointers are written and the previous block is no longer needed.
7673 */
7674static void
7675newblk_freefrag(struct newblk *newblk)
7676{
7677	struct freefrag *freefrag;
7678
7679	if (newblk->nb_freefrag == NULL)
7680		return;
7681	freefrag = newblk->nb_freefrag;
7682	newblk->nb_freefrag = NULL;
7683	freefrag->ff_state |= COMPLETE;
7684	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7685		add_to_worklist(&freefrag->ff_list, 0);
7686}
7687
7688/*
7689 * Free a newblk. Generate a new freefrag work request if appropriate.
7690 * This must be called after the inode pointer and any direct block pointers
7691 * are valid or fully removed via truncate or frag extension.
7692 */
7693static void
7694free_newblk(struct newblk *newblk)
7695{
7696	struct indirdep *indirdep;
7697	struct worklist *wk;
7698
7699	KASSERT(newblk->nb_jnewblk == NULL,
7700	    ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk));
7701	KASSERT(newblk->nb_list.wk_type != D_NEWBLK,
7702	    ("free_newblk: unclaimed newblk"));
7703	LOCK_OWNED(VFSTOUFS(newblk->nb_list.wk_mp));
7704	newblk_freefrag(newblk);
7705	if (newblk->nb_state & ONDEPLIST)
7706		LIST_REMOVE(newblk, nb_deps);
7707	if (newblk->nb_state & ONWORKLIST)
7708		WORKLIST_REMOVE(&newblk->nb_list);
7709	LIST_REMOVE(newblk, nb_hash);
7710	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7711		free_newdirblk(WK_NEWDIRBLK(wk));
7712	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7713		panic("free_newblk: extra newdirblk");
7714	while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7715		indirdep_complete(indirdep);
7716	handle_jwork(&newblk->nb_jwork);
7717	WORKITEM_FREE(newblk, D_NEWBLK);
7718}
7719
7720/*
7721 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7722 */
7723static void
7724free_newdirblk(struct newdirblk *newdirblk)
7725{
7726	struct pagedep *pagedep;
7727	struct diradd *dap;
7728	struct worklist *wk;
7729
7730	LOCK_OWNED(VFSTOUFS(newdirblk->db_list.wk_mp));
7731	WORKLIST_REMOVE(&newdirblk->db_list);
7732	/*
7733	 * If the pagedep is still linked onto the directory buffer
7734	 * dependency chain, then some of the entries on the
7735	 * pd_pendinghd list may not be committed to disk yet. In
7736	 * this case, we will simply clear the NEWBLOCK flag and
7737	 * let the pd_pendinghd list be processed when the pagedep
7738	 * is next written. If the pagedep is no longer on the buffer
7739	 * dependency chain, then all the entries on the pd_pending
7740	 * list are committed to disk and we can free them here.
7741	 */
7742	pagedep = newdirblk->db_pagedep;
7743	pagedep->pd_state &= ~NEWBLOCK;
7744	if ((pagedep->pd_state & ONWORKLIST) == 0) {
7745		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7746			free_diradd(dap, NULL);
7747		/*
7748		 * If no dependencies remain, the pagedep will be freed.
7749		 */
7750		free_pagedep(pagedep);
7751	}
7752	/* Should only ever be one item in the list. */
7753	while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7754		WORKLIST_REMOVE(wk);
7755		handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7756	}
7757	WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7758}
7759
7760/*
7761 * Prepare an inode to be freed. The actual free operation is not
7762 * done until the zero'ed inode has been written to disk.
7763 */
7764void
7765softdep_freefile(
7766	struct vnode *pvp,
7767	ino_t ino,
7768	int mode)
7769{
7770	struct inode *ip = VTOI(pvp);
7771	struct inodedep *inodedep;
7772	struct freefile *freefile;
7773	struct freeblks *freeblks;
7774	struct ufsmount *ump;
7775
7776	ump = ITOUMP(ip);
7777	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7778	    ("softdep_freefile called on non-softdep filesystem"));
7779	/*
7780	 * This sets up the inode de-allocation dependency.
7781	 */
7782	freefile = malloc(sizeof(struct freefile),
7783		M_FREEFILE, M_SOFTDEP_FLAGS);
7784	workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7785	freefile->fx_mode = mode;
7786	freefile->fx_oldinum = ino;
7787	freefile->fx_devvp = ump->um_devvp;
7788	LIST_INIT(&freefile->fx_jwork);
7789	UFS_LOCK(ump);
7790	ump->um_fs->fs_pendinginodes += 1;
7791	UFS_UNLOCK(ump);
7792
7793	/*
7794	 * If the inodedep does not exist, then the zero'ed inode has
7795	 * been written to disk. If the allocated inode has never been
7796	 * written to disk, then the on-disk inode is zero'ed. In either
7797	 * case we can free the file immediately.  If the journal was
7798	 * canceled before being written the inode will never make it to
7799	 * disk and we must send the canceled journal entrys to
7800	 * ffs_freefile() to be cleared in conjunction with the bitmap.
7801	 * Any blocks waiting on the inode to write can be safely freed
7802	 * here as it will never been written.
7803	 */
7804	ACQUIRE_LOCK(ump);
7805	inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7806	if (inodedep) {
7807		/*
7808		 * Clear out freeblks that no longer need to reference
7809		 * this inode.
7810		 */
7811		while ((freeblks =
7812		    TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7813			TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7814			    fb_next);
7815			freeblks->fb_state &= ~ONDEPLIST;
7816		}
7817		/*
7818		 * Remove this inode from the unlinked list.
7819		 */
7820		if (inodedep->id_state & UNLINKED) {
7821			/*
7822			 * Save the journal work to be freed with the bitmap
7823			 * before we clear UNLINKED.  Otherwise it can be lost
7824			 * if the inode block is written.
7825			 */
7826			handle_bufwait(inodedep, &freefile->fx_jwork);
7827			clear_unlinked_inodedep(inodedep);
7828			/*
7829			 * Re-acquire inodedep as we've dropped the
7830			 * per-filesystem lock in clear_unlinked_inodedep().
7831			 */
7832			inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7833		}
7834	}
7835	if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7836		FREE_LOCK(ump);
7837		handle_workitem_freefile(freefile);
7838		return;
7839	}
7840	if ((inodedep->id_state & DEPCOMPLETE) == 0)
7841		inodedep->id_state |= GOINGAWAY;
7842	WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7843	FREE_LOCK(ump);
7844	if (ip->i_number == ino)
7845		UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
7846}
7847
7848/*
7849 * Check to see if an inode has never been written to disk. If
7850 * so free the inodedep and return success, otherwise return failure.
7851 *
7852 * If we still have a bitmap dependency, then the inode has never
7853 * been written to disk. Drop the dependency as it is no longer
7854 * necessary since the inode is being deallocated. We set the
7855 * ALLCOMPLETE flags since the bitmap now properly shows that the
7856 * inode is not allocated. Even if the inode is actively being
7857 * written, it has been rolled back to its zero'ed state, so we
7858 * are ensured that a zero inode is what is on the disk. For short
7859 * lived files, this change will usually result in removing all the
7860 * dependencies from the inode so that it can be freed immediately.
7861 */
7862static int
7863check_inode_unwritten(struct inodedep *inodedep)
7864{
7865
7866	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7867
7868	if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7869	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7870	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7871	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7872	    !LIST_EMPTY(&inodedep->id_inowait) ||
7873	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7874	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7875	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7876	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7877	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7878	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7879	    inodedep->id_mkdiradd != NULL ||
7880	    inodedep->id_nlinkdelta != 0)
7881		return (0);
7882	/*
7883	 * Another process might be in initiate_write_inodeblock_ufs[12]
7884	 * trying to allocate memory without holding "Softdep Lock".
7885	 */
7886	if ((inodedep->id_state & IOSTARTED) != 0 &&
7887	    inodedep->id_savedino1 == NULL)
7888		return (0);
7889
7890	if (inodedep->id_state & ONDEPLIST)
7891		LIST_REMOVE(inodedep, id_deps);
7892	inodedep->id_state &= ~ONDEPLIST;
7893	inodedep->id_state |= ALLCOMPLETE;
7894	inodedep->id_bmsafemap = NULL;
7895	if (inodedep->id_state & ONWORKLIST)
7896		WORKLIST_REMOVE(&inodedep->id_list);
7897	if (inodedep->id_savedino1 != NULL) {
7898		free(inodedep->id_savedino1, M_SAVEDINO);
7899		inodedep->id_savedino1 = NULL;
7900	}
7901	if (free_inodedep(inodedep) == 0)
7902		panic("check_inode_unwritten: busy inode");
7903	return (1);
7904}
7905
7906static int
7907check_inodedep_free(struct inodedep *inodedep)
7908{
7909
7910	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7911	if ((inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7912	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7913	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7914	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7915	    !LIST_EMPTY(&inodedep->id_inowait) ||
7916	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7917	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7918	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7919	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7920	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7921	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7922	    inodedep->id_mkdiradd != NULL ||
7923	    inodedep->id_nlinkdelta != 0 ||
7924	    inodedep->id_savedino1 != NULL)
7925		return (0);
7926	return (1);
7927}
7928
7929/*
7930 * Try to free an inodedep structure. Return 1 if it could be freed.
7931 */
7932static int
7933free_inodedep(struct inodedep *inodedep)
7934{
7935
7936	LOCK_OWNED(VFSTOUFS(inodedep->id_list.wk_mp));
7937	if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7938	    !check_inodedep_free(inodedep))
7939		return (0);
7940	if (inodedep->id_state & ONDEPLIST)
7941		LIST_REMOVE(inodedep, id_deps);
7942	LIST_REMOVE(inodedep, id_hash);
7943	WORKITEM_FREE(inodedep, D_INODEDEP);
7944	return (1);
7945}
7946
7947/*
7948 * Free the block referenced by a freework structure.  The parent freeblks
7949 * structure is released and completed when the final cg bitmap reaches
7950 * the disk.  This routine may be freeing a jnewblk which never made it to
7951 * disk in which case we do not have to wait as the operation is undone
7952 * in memory immediately.
7953 */
7954static void
7955freework_freeblock(struct freework *freework, uint64_t key)
7956{
7957	struct freeblks *freeblks;
7958	struct jnewblk *jnewblk;
7959	struct ufsmount *ump;
7960	struct workhead wkhd;
7961	struct fs *fs;
7962	int bsize;
7963	int needj;
7964
7965	ump = VFSTOUFS(freework->fw_list.wk_mp);
7966	LOCK_OWNED(ump);
7967	/*
7968	 * Handle partial truncate separately.
7969	 */
7970	if (freework->fw_indir) {
7971		complete_trunc_indir(freework);
7972		return;
7973	}
7974	freeblks = freework->fw_freeblks;
7975	fs = ump->um_fs;
7976	needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7977	bsize = lfragtosize(fs, freework->fw_frags);
7978	LIST_INIT(&wkhd);
7979	/*
7980	 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7981	 * on the indirblk hashtable and prevents premature freeing.
7982	 */
7983	freework->fw_state |= DEPCOMPLETE;
7984	/*
7985	 * SUJ needs to wait for the segment referencing freed indirect
7986	 * blocks to expire so that we know the checker will not confuse
7987	 * a re-allocated indirect block with its old contents.
7988	 */
7989	if (needj && freework->fw_lbn <= -UFS_NDADDR)
7990		indirblk_insert(freework);
7991	/*
7992	 * If we are canceling an existing jnewblk pass it to the free
7993	 * routine, otherwise pass the freeblk which will ultimately
7994	 * release the freeblks.  If we're not journaling, we can just
7995	 * free the freeblks immediately.
7996	 */
7997	jnewblk = freework->fw_jnewblk;
7998	if (jnewblk != NULL) {
7999		cancel_jnewblk(jnewblk, &wkhd);
8000		needj = 0;
8001	} else if (needj) {
8002		freework->fw_state |= DELAYEDFREE;
8003		freeblks->fb_cgwait++;
8004		WORKLIST_INSERT(&wkhd, &freework->fw_list);
8005	}
8006	FREE_LOCK(ump);
8007	freeblks_free(ump, freeblks, btodb(bsize));
8008	CTR4(KTR_SUJ,
8009	    "freework_freeblock: ino %jd blkno %jd lbn %jd size %d",
8010	    freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
8011	ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
8012	    freeblks->fb_inum, freeblks->fb_vtype, &wkhd, key);
8013	ACQUIRE_LOCK(ump);
8014	/*
8015	 * The jnewblk will be discarded and the bits in the map never
8016	 * made it to disk.  We can immediately free the freeblk.
8017	 */
8018	if (needj == 0)
8019		handle_written_freework(freework);
8020}
8021
8022/*
8023 * We enqueue freework items that need processing back on the freeblks and
8024 * add the freeblks to the worklist.  This makes it easier to find all work
8025 * required to flush a truncation in process_truncates().
8026 */
8027static void
8028freework_enqueue(struct freework *freework)
8029{
8030	struct freeblks *freeblks;
8031
8032	freeblks = freework->fw_freeblks;
8033	if ((freework->fw_state & INPROGRESS) == 0)
8034		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
8035	if ((freeblks->fb_state &
8036	    (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
8037	    LIST_EMPTY(&freeblks->fb_jblkdephd))
8038		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
8039}
8040
8041/*
8042 * Start, continue, or finish the process of freeing an indirect block tree.
8043 * The free operation may be paused at any point with fw_off containing the
8044 * offset to restart from.  This enables us to implement some flow control
8045 * for large truncates which may fan out and generate a huge number of
8046 * dependencies.
8047 */
8048static void
8049handle_workitem_indirblk(struct freework *freework)
8050{
8051	struct freeblks *freeblks;
8052	struct ufsmount *ump;
8053	struct fs *fs;
8054
8055	freeblks = freework->fw_freeblks;
8056	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8057	fs = ump->um_fs;
8058	if (freework->fw_state & DEPCOMPLETE) {
8059		handle_written_freework(freework);
8060		return;
8061	}
8062	if (freework->fw_off == NINDIR(fs)) {
8063		freework_freeblock(freework, SINGLETON_KEY);
8064		return;
8065	}
8066	freework->fw_state |= INPROGRESS;
8067	FREE_LOCK(ump);
8068	indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
8069	    freework->fw_lbn);
8070	ACQUIRE_LOCK(ump);
8071}
8072
8073/*
8074 * Called when a freework structure attached to a cg buf is written.  The
8075 * ref on either the parent or the freeblks structure is released and
8076 * the freeblks is added back to the worklist if there is more work to do.
8077 */
8078static void
8079handle_written_freework(struct freework *freework)
8080{
8081	struct freeblks *freeblks;
8082	struct freework *parent;
8083
8084	freeblks = freework->fw_freeblks;
8085	parent = freework->fw_parent;
8086	if (freework->fw_state & DELAYEDFREE)
8087		freeblks->fb_cgwait--;
8088	freework->fw_state |= COMPLETE;
8089	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
8090		WORKITEM_FREE(freework, D_FREEWORK);
8091	if (parent) {
8092		if (--parent->fw_ref == 0)
8093			freework_enqueue(parent);
8094		return;
8095	}
8096	if (--freeblks->fb_ref != 0)
8097		return;
8098	if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
8099	    ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
8100		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
8101}
8102
8103/*
8104 * This workitem routine performs the block de-allocation.
8105 * The workitem is added to the pending list after the updated
8106 * inode block has been written to disk.  As mentioned above,
8107 * checks regarding the number of blocks de-allocated (compared
8108 * to the number of blocks allocated for the file) are also
8109 * performed in this function.
8110 */
8111static int
8112handle_workitem_freeblocks(struct freeblks *freeblks, int flags)
8113{
8114	struct freework *freework;
8115	struct newblk *newblk;
8116	struct allocindir *aip;
8117	struct ufsmount *ump;
8118	struct worklist *wk;
8119	uint64_t key;
8120
8121	KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
8122	    ("handle_workitem_freeblocks: Journal entries not written."));
8123	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8124	key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum);
8125	ACQUIRE_LOCK(ump);
8126	while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
8127		WORKLIST_REMOVE(wk);
8128		switch (wk->wk_type) {
8129		case D_DIRREM:
8130			wk->wk_state |= COMPLETE;
8131			add_to_worklist(wk, 0);
8132			continue;
8133
8134		case D_ALLOCDIRECT:
8135			free_newblk(WK_NEWBLK(wk));
8136			continue;
8137
8138		case D_ALLOCINDIR:
8139			aip = WK_ALLOCINDIR(wk);
8140			freework = NULL;
8141			if (aip->ai_state & DELAYEDFREE) {
8142				FREE_LOCK(ump);
8143				freework = newfreework(ump, freeblks, NULL,
8144				    aip->ai_lbn, aip->ai_newblkno,
8145				    ump->um_fs->fs_frag, 0, 0);
8146				ACQUIRE_LOCK(ump);
8147			}
8148			newblk = WK_NEWBLK(wk);
8149			if (newblk->nb_jnewblk) {
8150				freework->fw_jnewblk = newblk->nb_jnewblk;
8151				newblk->nb_jnewblk->jn_dep = &freework->fw_list;
8152				newblk->nb_jnewblk = NULL;
8153			}
8154			free_newblk(newblk);
8155			continue;
8156
8157		case D_FREEWORK:
8158			freework = WK_FREEWORK(wk);
8159			if (freework->fw_lbn <= -UFS_NDADDR)
8160				handle_workitem_indirblk(freework);
8161			else
8162				freework_freeblock(freework, key);
8163			continue;
8164		default:
8165			panic("handle_workitem_freeblocks: Unknown type %s",
8166			    TYPENAME(wk->wk_type));
8167		}
8168	}
8169	if (freeblks->fb_ref != 0) {
8170		freeblks->fb_state &= ~INPROGRESS;
8171		wake_worklist(&freeblks->fb_list);
8172		freeblks = NULL;
8173	}
8174	FREE_LOCK(ump);
8175	ffs_blkrelease_finish(ump, key);
8176	if (freeblks)
8177		return handle_complete_freeblocks(freeblks, flags);
8178	return (0);
8179}
8180
8181/*
8182 * Handle completion of block free via truncate.  This allows fs_pending
8183 * to track the actual free block count more closely than if we only updated
8184 * it at the end.  We must be careful to handle cases where the block count
8185 * on free was incorrect.
8186 */
8187static void
8188freeblks_free(struct ufsmount *ump,
8189	struct freeblks *freeblks,
8190	int blocks)
8191{
8192	struct fs *fs;
8193	ufs2_daddr_t remain;
8194
8195	UFS_LOCK(ump);
8196	remain = -freeblks->fb_chkcnt;
8197	freeblks->fb_chkcnt += blocks;
8198	if (remain > 0) {
8199		if (remain < blocks)
8200			blocks = remain;
8201		fs = ump->um_fs;
8202		fs->fs_pendingblocks -= blocks;
8203	}
8204	UFS_UNLOCK(ump);
8205}
8206
8207/*
8208 * Once all of the freework workitems are complete we can retire the
8209 * freeblocks dependency and any journal work awaiting completion.  This
8210 * can not be called until all other dependencies are stable on disk.
8211 */
8212static int
8213handle_complete_freeblocks(struct freeblks *freeblks, int flags)
8214{
8215	struct inodedep *inodedep;
8216	struct inode *ip;
8217	struct vnode *vp;
8218	struct fs *fs;
8219	struct ufsmount *ump;
8220	ufs2_daddr_t spare;
8221
8222	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
8223	fs = ump->um_fs;
8224	flags = LK_EXCLUSIVE | flags;
8225	spare = freeblks->fb_chkcnt;
8226
8227	/*
8228	 * If we did not release the expected number of blocks we may have
8229	 * to adjust the inode block count here.  Only do so if it wasn't
8230	 * a truncation to zero and the modrev still matches.
8231	 */
8232	if (spare && freeblks->fb_len != 0) {
8233		if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
8234		    flags, &vp, FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP) != 0)
8235			return (EBUSY);
8236		ip = VTOI(vp);
8237		if (ip->i_mode == 0) {
8238			vgone(vp);
8239		} else if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
8240			DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
8241			UFS_INODE_SET_FLAG(ip, IN_CHANGE);
8242			/*
8243			 * We must wait so this happens before the
8244			 * journal is reclaimed.
8245			 */
8246			ffs_update(vp, 1);
8247		}
8248		vput(vp);
8249	}
8250	if (spare < 0) {
8251		UFS_LOCK(ump);
8252		fs->fs_pendingblocks += spare;
8253		UFS_UNLOCK(ump);
8254	}
8255#ifdef QUOTA
8256	/* Handle spare. */
8257	if (spare)
8258		quotaadj(freeblks->fb_quota, ump, -spare);
8259	quotarele(freeblks->fb_quota);
8260#endif
8261	ACQUIRE_LOCK(ump);
8262	if (freeblks->fb_state & ONDEPLIST) {
8263		inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
8264		    0, &inodedep);
8265		TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
8266		freeblks->fb_state &= ~ONDEPLIST;
8267		if (TAILQ_EMPTY(&inodedep->id_freeblklst))
8268			free_inodedep(inodedep);
8269	}
8270	/*
8271	 * All of the freeblock deps must be complete prior to this call
8272	 * so it's now safe to complete earlier outstanding journal entries.
8273	 */
8274	handle_jwork(&freeblks->fb_jwork);
8275	WORKITEM_FREE(freeblks, D_FREEBLKS);
8276	FREE_LOCK(ump);
8277	return (0);
8278}
8279
8280/*
8281 * Release blocks associated with the freeblks and stored in the indirect
8282 * block dbn. If level is greater than SINGLE, the block is an indirect block
8283 * and recursive calls to indirtrunc must be used to cleanse other indirect
8284 * blocks.
8285 *
8286 * This handles partial and complete truncation of blocks.  Partial is noted
8287 * with goingaway == 0.  In this case the freework is completed after the
8288 * zero'd indirects are written to disk.  For full truncation the freework
8289 * is completed after the block is freed.
8290 */
8291static void
8292indir_trunc(struct freework *freework,
8293	ufs2_daddr_t dbn,
8294	ufs_lbn_t lbn)
8295{
8296	struct freework *nfreework;
8297	struct workhead wkhd;
8298	struct freeblks *freeblks;
8299	struct buf *bp;
8300	struct fs *fs;
8301	struct indirdep *indirdep;
8302	struct mount *mp;
8303	struct ufsmount *ump;
8304	ufs1_daddr_t *bap1;
8305	ufs2_daddr_t nb, nnb, *bap2;
8306	ufs_lbn_t lbnadd, nlbn;
8307	uint64_t key;
8308	int nblocks, ufs1fmt, freedblocks;
8309	int goingaway, freedeps, needj, level, cnt, i, error;
8310
8311	freeblks = freework->fw_freeblks;
8312	mp = freeblks->fb_list.wk_mp;
8313	ump = VFSTOUFS(mp);
8314	fs = ump->um_fs;
8315	/*
8316	 * Get buffer of block pointers to be freed.  There are three cases:
8317	 *
8318	 * 1) Partial truncate caches the indirdep pointer in the freework
8319	 *    which provides us a back copy to the save bp which holds the
8320	 *    pointers we want to clear.  When this completes the zero
8321	 *    pointers are written to the real copy.
8322	 * 2) The indirect is being completely truncated, cancel_indirdep()
8323	 *    eliminated the real copy and placed the indirdep on the saved
8324	 *    copy.  The indirdep and buf are discarded when this completes.
8325	 * 3) The indirect was not in memory, we read a copy off of the disk
8326	 *    using the devvp and drop and invalidate the buffer when we're
8327	 *    done.
8328	 */
8329	goingaway = 1;
8330	indirdep = NULL;
8331	if (freework->fw_indir != NULL) {
8332		goingaway = 0;
8333		indirdep = freework->fw_indir;
8334		bp = indirdep->ir_savebp;
8335		if (bp == NULL || bp->b_blkno != dbn)
8336			panic("indir_trunc: Bad saved buf %p blkno %jd",
8337			    bp, (intmax_t)dbn);
8338	} else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
8339		/*
8340		 * The lock prevents the buf dep list from changing and
8341	 	 * indirects on devvp should only ever have one dependency.
8342		 */
8343		indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
8344		if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
8345			panic("indir_trunc: Bad indirdep %p from buf %p",
8346			    indirdep, bp);
8347	} else {
8348		error = ffs_breadz(ump, freeblks->fb_devvp, dbn, dbn,
8349		    (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL, &bp);
8350		if (error)
8351			return;
8352	}
8353	ACQUIRE_LOCK(ump);
8354	/* Protects against a race with complete_trunc_indir(). */
8355	freework->fw_state &= ~INPROGRESS;
8356	/*
8357	 * If we have an indirdep we need to enforce the truncation order
8358	 * and discard it when it is complete.
8359	 */
8360	if (indirdep) {
8361		if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
8362		    !TAILQ_EMPTY(&indirdep->ir_trunc)) {
8363			/*
8364			 * Add the complete truncate to the list on the
8365			 * indirdep to enforce in-order processing.
8366			 */
8367			if (freework->fw_indir == NULL)
8368				TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
8369				    freework, fw_next);
8370			FREE_LOCK(ump);
8371			return;
8372		}
8373		/*
8374		 * If we're goingaway, free the indirdep.  Otherwise it will
8375		 * linger until the write completes.
8376		 */
8377		if (goingaway) {
8378			KASSERT(indirdep->ir_savebp == bp,
8379			    ("indir_trunc: losing ir_savebp %p",
8380			    indirdep->ir_savebp));
8381			indirdep->ir_savebp = NULL;
8382			free_indirdep(indirdep);
8383		}
8384	}
8385	FREE_LOCK(ump);
8386	/* Initialize pointers depending on block size. */
8387	if (ump->um_fstype == UFS1) {
8388		bap1 = (ufs1_daddr_t *)bp->b_data;
8389		nb = bap1[freework->fw_off];
8390		ufs1fmt = 1;
8391		bap2 = NULL;
8392	} else {
8393		bap2 = (ufs2_daddr_t *)bp->b_data;
8394		nb = bap2[freework->fw_off];
8395		ufs1fmt = 0;
8396		bap1 = NULL;
8397	}
8398	level = lbn_level(lbn);
8399	needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
8400	lbnadd = lbn_offset(fs, level);
8401	nblocks = btodb(fs->fs_bsize);
8402	nfreework = freework;
8403	freedeps = 0;
8404	cnt = 0;
8405	/*
8406	 * Reclaim blocks.  Traverses into nested indirect levels and
8407	 * arranges for the current level to be freed when subordinates
8408	 * are free when journaling.
8409	 */
8410	key = ffs_blkrelease_start(ump, freeblks->fb_devvp, freeblks->fb_inum);
8411	for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
8412		if (UFS_CHECK_BLKNO(mp, freeblks->fb_inum, nb,
8413		    fs->fs_bsize) != 0)
8414			nb = 0;
8415		if (i != NINDIR(fs) - 1) {
8416			if (ufs1fmt)
8417				nnb = bap1[i+1];
8418			else
8419				nnb = bap2[i+1];
8420		} else
8421			nnb = 0;
8422		if (nb == 0)
8423			continue;
8424		cnt++;
8425		if (level != 0) {
8426			nlbn = (lbn + 1) - (i * lbnadd);
8427			if (needj != 0) {
8428				nfreework = newfreework(ump, freeblks, freework,
8429				    nlbn, nb, fs->fs_frag, 0, 0);
8430				freedeps++;
8431			}
8432			indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
8433		} else {
8434			struct freedep *freedep;
8435
8436			/*
8437			 * Attempt to aggregate freedep dependencies for
8438			 * all blocks being released to the same CG.
8439			 */
8440			LIST_INIT(&wkhd);
8441			if (needj != 0 &&
8442			    (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
8443				freedep = newfreedep(freework);
8444				WORKLIST_INSERT_UNLOCKED(&wkhd,
8445				    &freedep->fd_list);
8446				freedeps++;
8447			}
8448			CTR3(KTR_SUJ,
8449			    "indir_trunc: ino %jd blkno %jd size %d",
8450			    freeblks->fb_inum, nb, fs->fs_bsize);
8451			ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
8452			    fs->fs_bsize, freeblks->fb_inum,
8453			    freeblks->fb_vtype, &wkhd, key);
8454		}
8455	}
8456	ffs_blkrelease_finish(ump, key);
8457	if (goingaway) {
8458		bp->b_flags |= B_INVAL | B_NOCACHE;
8459		brelse(bp);
8460	}
8461	freedblocks = 0;
8462	if (level == 0)
8463		freedblocks = (nblocks * cnt);
8464	if (needj == 0)
8465		freedblocks += nblocks;
8466	freeblks_free(ump, freeblks, freedblocks);
8467	/*
8468	 * If we are journaling set up the ref counts and offset so this
8469	 * indirect can be completed when its children are free.
8470	 */
8471	if (needj) {
8472		ACQUIRE_LOCK(ump);
8473		freework->fw_off = i;
8474		freework->fw_ref += freedeps;
8475		freework->fw_ref -= NINDIR(fs) + 1;
8476		if (level == 0)
8477			freeblks->fb_cgwait += freedeps;
8478		if (freework->fw_ref == 0)
8479			freework_freeblock(freework, SINGLETON_KEY);
8480		FREE_LOCK(ump);
8481		return;
8482	}
8483	/*
8484	 * If we're not journaling we can free the indirect now.
8485	 */
8486	dbn = dbtofsb(fs, dbn);
8487	CTR3(KTR_SUJ,
8488	    "indir_trunc 2: ino %jd blkno %jd size %d",
8489	    freeblks->fb_inum, dbn, fs->fs_bsize);
8490	ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8491	    freeblks->fb_inum, freeblks->fb_vtype, NULL, SINGLETON_KEY);
8492	/* Non SUJ softdep does single-threaded truncations. */
8493	if (freework->fw_blkno == dbn) {
8494		freework->fw_state |= ALLCOMPLETE;
8495		ACQUIRE_LOCK(ump);
8496		handle_written_freework(freework);
8497		FREE_LOCK(ump);
8498	}
8499	return;
8500}
8501
8502/*
8503 * Cancel an allocindir when it is removed via truncation.  When bp is not
8504 * NULL the indirect never appeared on disk and is scheduled to be freed
8505 * independently of the indir so we can more easily track journal work.
8506 */
8507static void
8508cancel_allocindir(
8509	struct allocindir *aip,
8510	struct buf *bp,
8511	struct freeblks *freeblks,
8512	int trunc)
8513{
8514	struct indirdep *indirdep;
8515	struct freefrag *freefrag;
8516	struct newblk *newblk;
8517
8518	newblk = (struct newblk *)aip;
8519	LIST_REMOVE(aip, ai_next);
8520	/*
8521	 * We must eliminate the pointer in bp if it must be freed on its
8522	 * own due to partial truncate or pending journal work.
8523	 */
8524	if (bp && (trunc || newblk->nb_jnewblk)) {
8525		/*
8526		 * Clear the pointer and mark the aip to be freed
8527		 * directly if it never existed on disk.
8528		 */
8529		aip->ai_state |= DELAYEDFREE;
8530		indirdep = aip->ai_indirdep;
8531		if (indirdep->ir_state & UFS1FMT)
8532			((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8533		else
8534			((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8535	}
8536	/*
8537	 * When truncating the previous pointer will be freed via
8538	 * savedbp.  Eliminate the freefrag which would dup free.
8539	 */
8540	if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8541		newblk->nb_freefrag = NULL;
8542		if (freefrag->ff_jdep)
8543			cancel_jfreefrag(
8544			    WK_JFREEFRAG(freefrag->ff_jdep));
8545		jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8546		WORKITEM_FREE(freefrag, D_FREEFRAG);
8547	}
8548	/*
8549	 * If the journal hasn't been written the jnewblk must be passed
8550	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
8551	 * this by leaving the journal dependency on the newblk to be freed
8552	 * when a freework is created in handle_workitem_freeblocks().
8553	 */
8554	cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8555	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8556}
8557
8558/*
8559 * Create the mkdir dependencies for . and .. in a new directory.  Link them
8560 * in to a newdirblk so any subsequent additions are tracked properly.  The
8561 * caller is responsible for adding the mkdir1 dependency to the journal
8562 * and updating id_mkdiradd.  This function returns with the per-filesystem
8563 * lock held.
8564 */
8565static struct mkdir *
8566setup_newdir(
8567	struct diradd *dap,
8568	ino_t newinum,
8569	ino_t dinum,
8570	struct buf *newdirbp,
8571	struct mkdir **mkdirp)
8572{
8573	struct newblk *newblk;
8574	struct pagedep *pagedep;
8575	struct inodedep *inodedep;
8576	struct newdirblk *newdirblk;
8577	struct mkdir *mkdir1, *mkdir2;
8578	struct worklist *wk;
8579	struct jaddref *jaddref;
8580	struct ufsmount *ump;
8581	struct mount *mp;
8582
8583	mp = dap->da_list.wk_mp;
8584	ump = VFSTOUFS(mp);
8585	newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8586	    M_SOFTDEP_FLAGS);
8587	workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8588	LIST_INIT(&newdirblk->db_mkdir);
8589	mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8590	workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8591	mkdir1->md_state = ATTACHED | MKDIR_BODY;
8592	mkdir1->md_diradd = dap;
8593	mkdir1->md_jaddref = NULL;
8594	mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8595	workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8596	mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8597	mkdir2->md_diradd = dap;
8598	mkdir2->md_jaddref = NULL;
8599	if (MOUNTEDSUJ(mp) == 0) {
8600		mkdir1->md_state |= DEPCOMPLETE;
8601		mkdir2->md_state |= DEPCOMPLETE;
8602	}
8603	/*
8604	 * Dependency on "." and ".." being written to disk.
8605	 */
8606	mkdir1->md_buf = newdirbp;
8607	ACQUIRE_LOCK(VFSTOUFS(mp));
8608	LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir1, md_mkdirs);
8609	/*
8610	 * We must link the pagedep, allocdirect, and newdirblk for
8611	 * the initial file page so the pointer to the new directory
8612	 * is not written until the directory contents are live and
8613	 * any subsequent additions are not marked live until the
8614	 * block is reachable via the inode.
8615	 */
8616	if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8617		panic("setup_newdir: lost pagedep");
8618	LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8619		if (wk->wk_type == D_ALLOCDIRECT)
8620			break;
8621	if (wk == NULL)
8622		panic("setup_newdir: lost allocdirect");
8623	if (pagedep->pd_state & NEWBLOCK)
8624		panic("setup_newdir: NEWBLOCK already set");
8625	newblk = WK_NEWBLK(wk);
8626	pagedep->pd_state |= NEWBLOCK;
8627	pagedep->pd_newdirblk = newdirblk;
8628	newdirblk->db_pagedep = pagedep;
8629	WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8630	WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8631	/*
8632	 * Look up the inodedep for the parent directory so that we
8633	 * can link mkdir2 into the pending dotdot jaddref or
8634	 * the inode write if there is none.  If the inode is
8635	 * ALLCOMPLETE and no jaddref is present all dependencies have
8636	 * been satisfied and mkdir2 can be freed.
8637	 */
8638	inodedep_lookup(mp, dinum, 0, &inodedep);
8639	if (MOUNTEDSUJ(mp)) {
8640		if (inodedep == NULL)
8641			panic("setup_newdir: Lost parent.");
8642		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8643		    inoreflst);
8644		KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8645		    (jaddref->ja_state & MKDIR_PARENT),
8646		    ("setup_newdir: bad dotdot jaddref %p", jaddref));
8647		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8648		mkdir2->md_jaddref = jaddref;
8649		jaddref->ja_mkdir = mkdir2;
8650	} else if (inodedep == NULL ||
8651	    (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8652		dap->da_state &= ~MKDIR_PARENT;
8653		WORKITEM_FREE(mkdir2, D_MKDIR);
8654		mkdir2 = NULL;
8655	} else {
8656		LIST_INSERT_HEAD(&ump->softdep_mkdirlisthd, mkdir2, md_mkdirs);
8657		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8658	}
8659	*mkdirp = mkdir2;
8660
8661	return (mkdir1);
8662}
8663
8664/*
8665 * Directory entry addition dependencies.
8666 *
8667 * When adding a new directory entry, the inode (with its incremented link
8668 * count) must be written to disk before the directory entry's pointer to it.
8669 * Also, if the inode is newly allocated, the corresponding freemap must be
8670 * updated (on disk) before the directory entry's pointer. These requirements
8671 * are met via undo/redo on the directory entry's pointer, which consists
8672 * simply of the inode number.
8673 *
8674 * As directory entries are added and deleted, the free space within a
8675 * directory block can become fragmented.  The ufs filesystem will compact
8676 * a fragmented directory block to make space for a new entry. When this
8677 * occurs, the offsets of previously added entries change. Any "diradd"
8678 * dependency structures corresponding to these entries must be updated with
8679 * the new offsets.
8680 */
8681
8682/*
8683 * This routine is called after the in-memory inode's link
8684 * count has been incremented, but before the directory entry's
8685 * pointer to the inode has been set.
8686 */
8687int
8688softdep_setup_directory_add(
8689	struct buf *bp,		/* buffer containing directory block */
8690	struct inode *dp,	/* inode for directory */
8691	off_t diroffset,	/* offset of new entry in directory */
8692	ino_t newinum,		/* inode referenced by new directory entry */
8693	struct buf *newdirbp,	/* non-NULL => contents of new mkdir */
8694	int isnewblk)		/* entry is in a newly allocated block */
8695{
8696	int offset;		/* offset of new entry within directory block */
8697	ufs_lbn_t lbn;		/* block in directory containing new entry */
8698	struct fs *fs;
8699	struct diradd *dap;
8700	struct newblk *newblk;
8701	struct pagedep *pagedep;
8702	struct inodedep *inodedep;
8703	struct newdirblk *newdirblk;
8704	struct mkdir *mkdir1, *mkdir2;
8705	struct jaddref *jaddref;
8706	struct ufsmount *ump;
8707	struct mount *mp;
8708	int isindir;
8709
8710	mp = ITOVFS(dp);
8711	ump = VFSTOUFS(mp);
8712	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8713	    ("softdep_setup_directory_add called on non-softdep filesystem"));
8714	/*
8715	 * Whiteouts have no dependencies.
8716	 */
8717	if (newinum == UFS_WINO) {
8718		if (newdirbp != NULL)
8719			bdwrite(newdirbp);
8720		return (0);
8721	}
8722	jaddref = NULL;
8723	mkdir1 = mkdir2 = NULL;
8724	fs = ump->um_fs;
8725	lbn = lblkno(fs, diroffset);
8726	offset = blkoff(fs, diroffset);
8727	dap = malloc(sizeof(struct diradd), M_DIRADD,
8728		M_SOFTDEP_FLAGS|M_ZERO);
8729	workitem_alloc(&dap->da_list, D_DIRADD, mp);
8730	dap->da_offset = offset;
8731	dap->da_newinum = newinum;
8732	dap->da_state = ATTACHED;
8733	LIST_INIT(&dap->da_jwork);
8734	isindir = bp->b_lblkno >= UFS_NDADDR;
8735	newdirblk = NULL;
8736	if (isnewblk &&
8737	    (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8738		newdirblk = malloc(sizeof(struct newdirblk),
8739		    M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8740		workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8741		LIST_INIT(&newdirblk->db_mkdir);
8742	}
8743	/*
8744	 * If we're creating a new directory setup the dependencies and set
8745	 * the dap state to wait for them.  Otherwise it's COMPLETE and
8746	 * we can move on.
8747	 */
8748	if (newdirbp == NULL) {
8749		dap->da_state |= DEPCOMPLETE;
8750		ACQUIRE_LOCK(ump);
8751	} else {
8752		dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8753		mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8754		    &mkdir2);
8755	}
8756	/*
8757	 * Link into parent directory pagedep to await its being written.
8758	 */
8759	pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8760#ifdef INVARIANTS
8761	if (diradd_lookup(pagedep, offset) != NULL)
8762		panic("softdep_setup_directory_add: %p already at off %d\n",
8763		    diradd_lookup(pagedep, offset), offset);
8764#endif
8765	dap->da_pagedep = pagedep;
8766	LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8767	    da_pdlist);
8768	inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
8769	/*
8770	 * If we're journaling, link the diradd into the jaddref so it
8771	 * may be completed after the journal entry is written.  Otherwise,
8772	 * link the diradd into its inodedep.  If the inode is not yet
8773	 * written place it on the bufwait list, otherwise do the post-inode
8774	 * write processing to put it on the id_pendinghd list.
8775	 */
8776	if (MOUNTEDSUJ(mp)) {
8777		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8778		    inoreflst);
8779		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8780		    ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8781		jaddref->ja_diroff = diroffset;
8782		jaddref->ja_diradd = dap;
8783		add_to_journal(&jaddref->ja_list);
8784	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8785		diradd_inode_written(dap, inodedep);
8786	else
8787		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8788	/*
8789	 * Add the journal entries for . and .. links now that the primary
8790	 * link is written.
8791	 */
8792	if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8793		jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8794		    inoreflst, if_deps);
8795		KASSERT(jaddref != NULL &&
8796		    jaddref->ja_ino == jaddref->ja_parent &&
8797		    (jaddref->ja_state & MKDIR_BODY),
8798		    ("softdep_setup_directory_add: bad dot jaddref %p",
8799		    jaddref));
8800		mkdir1->md_jaddref = jaddref;
8801		jaddref->ja_mkdir = mkdir1;
8802		/*
8803		 * It is important that the dotdot journal entry
8804		 * is added prior to the dot entry since dot writes
8805		 * both the dot and dotdot links.  These both must
8806		 * be added after the primary link for the journal
8807		 * to remain consistent.
8808		 */
8809		add_to_journal(&mkdir2->md_jaddref->ja_list);
8810		add_to_journal(&jaddref->ja_list);
8811	}
8812	/*
8813	 * If we are adding a new directory remember this diradd so that if
8814	 * we rename it we can keep the dot and dotdot dependencies.  If
8815	 * we are adding a new name for an inode that has a mkdiradd we
8816	 * must be in rename and we have to move the dot and dotdot
8817	 * dependencies to this new name.  The old name is being orphaned
8818	 * soon.
8819	 */
8820	if (mkdir1 != NULL) {
8821		if (inodedep->id_mkdiradd != NULL)
8822			panic("softdep_setup_directory_add: Existing mkdir");
8823		inodedep->id_mkdiradd = dap;
8824	} else if (inodedep->id_mkdiradd)
8825		merge_diradd(inodedep, dap);
8826	if (newdirblk != NULL) {
8827		/*
8828		 * There is nothing to do if we are already tracking
8829		 * this block.
8830		 */
8831		if ((pagedep->pd_state & NEWBLOCK) != 0) {
8832			WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8833			FREE_LOCK(ump);
8834			return (0);
8835		}
8836		if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8837		    == 0)
8838			panic("softdep_setup_directory_add: lost entry");
8839		WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8840		pagedep->pd_state |= NEWBLOCK;
8841		pagedep->pd_newdirblk = newdirblk;
8842		newdirblk->db_pagedep = pagedep;
8843		FREE_LOCK(ump);
8844		/*
8845		 * If we extended into an indirect signal direnter to sync.
8846		 */
8847		if (isindir)
8848			return (1);
8849		return (0);
8850	}
8851	FREE_LOCK(ump);
8852	return (0);
8853}
8854
8855/*
8856 * This procedure is called to change the offset of a directory
8857 * entry when compacting a directory block which must be owned
8858 * exclusively by the caller. Note that the actual entry movement
8859 * must be done in this procedure to ensure that no I/O completions
8860 * occur while the move is in progress.
8861 */
8862void
8863softdep_change_directoryentry_offset(
8864	struct buf *bp,		/* Buffer holding directory block. */
8865	struct inode *dp,	/* inode for directory */
8866	caddr_t base,		/* address of dp->i_offset */
8867	caddr_t oldloc,		/* address of old directory location */
8868	caddr_t newloc,		/* address of new directory location */
8869	int entrysize)		/* size of directory entry */
8870{
8871	int offset, oldoffset, newoffset;
8872	struct pagedep *pagedep;
8873	struct jmvref *jmvref;
8874	struct diradd *dap;
8875	struct direct *de;
8876	struct mount *mp;
8877	struct ufsmount *ump;
8878	ufs_lbn_t lbn;
8879	int flags;
8880
8881	mp = ITOVFS(dp);
8882	ump = VFSTOUFS(mp);
8883	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8884	    ("softdep_change_directoryentry_offset called on "
8885	     "non-softdep filesystem"));
8886	de = (struct direct *)oldloc;
8887	jmvref = NULL;
8888	flags = 0;
8889	/*
8890	 * Moves are always journaled as it would be too complex to
8891	 * determine if any affected adds or removes are present in the
8892	 * journal.
8893	 */
8894	if (MOUNTEDSUJ(mp)) {
8895		flags = DEPALLOC;
8896		jmvref = newjmvref(dp, de->d_ino,
8897		    I_OFFSET(dp) + (oldloc - base),
8898		    I_OFFSET(dp) + (newloc - base));
8899	}
8900	lbn = lblkno(ump->um_fs, I_OFFSET(dp));
8901	offset = blkoff(ump->um_fs, I_OFFSET(dp));
8902	oldoffset = offset + (oldloc - base);
8903	newoffset = offset + (newloc - base);
8904	ACQUIRE_LOCK(ump);
8905	if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8906		goto done;
8907	dap = diradd_lookup(pagedep, oldoffset);
8908	if (dap) {
8909		dap->da_offset = newoffset;
8910		newoffset = DIRADDHASH(newoffset);
8911		oldoffset = DIRADDHASH(oldoffset);
8912		if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8913		    newoffset != oldoffset) {
8914			LIST_REMOVE(dap, da_pdlist);
8915			LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8916			    dap, da_pdlist);
8917		}
8918	}
8919done:
8920	if (jmvref) {
8921		jmvref->jm_pagedep = pagedep;
8922		LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8923		add_to_journal(&jmvref->jm_list);
8924	}
8925	bcopy(oldloc, newloc, entrysize);
8926	FREE_LOCK(ump);
8927}
8928
8929/*
8930 * Move the mkdir dependencies and journal work from one diradd to another
8931 * when renaming a directory.  The new name must depend on the mkdir deps
8932 * completing as the old name did.  Directories can only have one valid link
8933 * at a time so one must be canonical.
8934 */
8935static void
8936merge_diradd(struct inodedep *inodedep, struct diradd *newdap)
8937{
8938	struct diradd *olddap;
8939	struct mkdir *mkdir, *nextmd;
8940	struct ufsmount *ump;
8941	short state;
8942
8943	olddap = inodedep->id_mkdiradd;
8944	inodedep->id_mkdiradd = newdap;
8945	if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8946		newdap->da_state &= ~DEPCOMPLETE;
8947		ump = VFSTOUFS(inodedep->id_list.wk_mp);
8948		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
8949		     mkdir = nextmd) {
8950			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8951			if (mkdir->md_diradd != olddap)
8952				continue;
8953			mkdir->md_diradd = newdap;
8954			state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8955			newdap->da_state |= state;
8956			olddap->da_state &= ~state;
8957			if ((olddap->da_state &
8958			    (MKDIR_PARENT | MKDIR_BODY)) == 0)
8959				break;
8960		}
8961		if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8962			panic("merge_diradd: unfound ref");
8963	}
8964	/*
8965	 * Any mkdir related journal items are not safe to be freed until
8966	 * the new name is stable.
8967	 */
8968	jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8969	olddap->da_state |= DEPCOMPLETE;
8970	complete_diradd(olddap);
8971}
8972
8973/*
8974 * Move the diradd to the pending list when all diradd dependencies are
8975 * complete.
8976 */
8977static void
8978complete_diradd(struct diradd *dap)
8979{
8980	struct pagedep *pagedep;
8981
8982	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8983		if (dap->da_state & DIRCHG)
8984			pagedep = dap->da_previous->dm_pagedep;
8985		else
8986			pagedep = dap->da_pagedep;
8987		LIST_REMOVE(dap, da_pdlist);
8988		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8989	}
8990}
8991
8992/*
8993 * Cancel a diradd when a dirrem overlaps with it.  We must cancel the journal
8994 * add entries and conditionally journal the remove.
8995 */
8996static void
8997cancel_diradd(
8998	struct diradd *dap,
8999	struct dirrem *dirrem,
9000	struct jremref *jremref,
9001	struct jremref *dotremref,
9002	struct jremref *dotdotremref)
9003{
9004	struct inodedep *inodedep;
9005	struct jaddref *jaddref;
9006	struct inoref *inoref;
9007	struct ufsmount *ump;
9008	struct mkdir *mkdir;
9009
9010	/*
9011	 * If no remove references were allocated we're on a non-journaled
9012	 * filesystem and can skip the cancel step.
9013	 */
9014	if (jremref == NULL) {
9015		free_diradd(dap, NULL);
9016		return;
9017	}
9018	/*
9019	 * Cancel the primary name an free it if it does not require
9020	 * journaling.
9021	 */
9022	if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
9023	    0, &inodedep) != 0) {
9024		/* Abort the addref that reference this diradd.  */
9025		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
9026			if (inoref->if_list.wk_type != D_JADDREF)
9027				continue;
9028			jaddref = (struct jaddref *)inoref;
9029			if (jaddref->ja_diradd != dap)
9030				continue;
9031			if (cancel_jaddref(jaddref, inodedep,
9032			    &dirrem->dm_jwork) == 0) {
9033				free_jremref(jremref);
9034				jremref = NULL;
9035			}
9036			break;
9037		}
9038	}
9039	/*
9040	 * Cancel subordinate names and free them if they do not require
9041	 * journaling.
9042	 */
9043	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
9044		ump = VFSTOUFS(dap->da_list.wk_mp);
9045		LIST_FOREACH(mkdir, &ump->softdep_mkdirlisthd, md_mkdirs) {
9046			if (mkdir->md_diradd != dap)
9047				continue;
9048			if ((jaddref = mkdir->md_jaddref) == NULL)
9049				continue;
9050			mkdir->md_jaddref = NULL;
9051			if (mkdir->md_state & MKDIR_PARENT) {
9052				if (cancel_jaddref(jaddref, NULL,
9053				    &dirrem->dm_jwork) == 0) {
9054					free_jremref(dotdotremref);
9055					dotdotremref = NULL;
9056				}
9057			} else {
9058				if (cancel_jaddref(jaddref, inodedep,
9059				    &dirrem->dm_jwork) == 0) {
9060					free_jremref(dotremref);
9061					dotremref = NULL;
9062				}
9063			}
9064		}
9065	}
9066
9067	if (jremref)
9068		journal_jremref(dirrem, jremref, inodedep);
9069	if (dotremref)
9070		journal_jremref(dirrem, dotremref, inodedep);
9071	if (dotdotremref)
9072		journal_jremref(dirrem, dotdotremref, NULL);
9073	jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
9074	free_diradd(dap, &dirrem->dm_jwork);
9075}
9076
9077/*
9078 * Free a diradd dependency structure.
9079 */
9080static void
9081free_diradd(struct diradd *dap, struct workhead *wkhd)
9082{
9083	struct dirrem *dirrem;
9084	struct pagedep *pagedep;
9085	struct inodedep *inodedep;
9086	struct mkdir *mkdir, *nextmd;
9087	struct ufsmount *ump;
9088
9089	ump = VFSTOUFS(dap->da_list.wk_mp);
9090	LOCK_OWNED(ump);
9091	LIST_REMOVE(dap, da_pdlist);
9092	if (dap->da_state & ONWORKLIST)
9093		WORKLIST_REMOVE(&dap->da_list);
9094	if ((dap->da_state & DIRCHG) == 0) {
9095		pagedep = dap->da_pagedep;
9096	} else {
9097		dirrem = dap->da_previous;
9098		pagedep = dirrem->dm_pagedep;
9099		dirrem->dm_dirinum = pagedep->pd_ino;
9100		dirrem->dm_state |= COMPLETE;
9101		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9102			add_to_worklist(&dirrem->dm_list, 0);
9103	}
9104	if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
9105	    0, &inodedep) != 0)
9106		if (inodedep->id_mkdiradd == dap)
9107			inodedep->id_mkdiradd = NULL;
9108	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
9109		for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
9110		     mkdir = nextmd) {
9111			nextmd = LIST_NEXT(mkdir, md_mkdirs);
9112			if (mkdir->md_diradd != dap)
9113				continue;
9114			dap->da_state &=
9115			    ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
9116			LIST_REMOVE(mkdir, md_mkdirs);
9117			if (mkdir->md_state & ONWORKLIST)
9118				WORKLIST_REMOVE(&mkdir->md_list);
9119			if (mkdir->md_jaddref != NULL)
9120				panic("free_diradd: Unexpected jaddref");
9121			WORKITEM_FREE(mkdir, D_MKDIR);
9122			if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
9123				break;
9124		}
9125		if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
9126			panic("free_diradd: unfound ref");
9127	}
9128	if (inodedep)
9129		free_inodedep(inodedep);
9130	/*
9131	 * Free any journal segments waiting for the directory write.
9132	 */
9133	handle_jwork(&dap->da_jwork);
9134	WORKITEM_FREE(dap, D_DIRADD);
9135}
9136
9137/*
9138 * Directory entry removal dependencies.
9139 *
9140 * When removing a directory entry, the entry's inode pointer must be
9141 * zero'ed on disk before the corresponding inode's link count is decremented
9142 * (possibly freeing the inode for re-use). This dependency is handled by
9143 * updating the directory entry but delaying the inode count reduction until
9144 * after the directory block has been written to disk. After this point, the
9145 * inode count can be decremented whenever it is convenient.
9146 */
9147
9148/*
9149 * This routine should be called immediately after removing
9150 * a directory entry.  The inode's link count should not be
9151 * decremented by the calling procedure -- the soft updates
9152 * code will do this task when it is safe.
9153 */
9154void
9155softdep_setup_remove(
9156	struct buf *bp,		/* buffer containing directory block */
9157	struct inode *dp,	/* inode for the directory being modified */
9158	struct inode *ip,	/* inode for directory entry being removed */
9159	int isrmdir)		/* indicates if doing RMDIR */
9160{
9161	struct dirrem *dirrem, *prevdirrem;
9162	struct inodedep *inodedep;
9163	struct ufsmount *ump;
9164	int direct;
9165
9166	ump = ITOUMP(ip);
9167	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9168	    ("softdep_setup_remove called on non-softdep filesystem"));
9169	/*
9170	 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.  We want
9171	 * newdirrem() to setup the full directory remove which requires
9172	 * isrmdir > 1.
9173	 */
9174	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9175	/*
9176	 * Add the dirrem to the inodedep's pending remove list for quick
9177	 * discovery later.
9178	 */
9179	if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0)
9180		panic("softdep_setup_remove: Lost inodedep.");
9181	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
9182	dirrem->dm_state |= ONDEPLIST;
9183	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9184
9185	/*
9186	 * If the COMPLETE flag is clear, then there were no active
9187	 * entries and we want to roll back to a zeroed entry until
9188	 * the new inode is committed to disk. If the COMPLETE flag is
9189	 * set then we have deleted an entry that never made it to
9190	 * disk. If the entry we deleted resulted from a name change,
9191	 * then the old name still resides on disk. We cannot delete
9192	 * its inode (returned to us in prevdirrem) until the zeroed
9193	 * directory entry gets to disk. The new inode has never been
9194	 * referenced on the disk, so can be deleted immediately.
9195	 */
9196	if ((dirrem->dm_state & COMPLETE) == 0) {
9197		LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
9198		    dm_next);
9199		FREE_LOCK(ump);
9200	} else {
9201		if (prevdirrem != NULL)
9202			LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
9203			    prevdirrem, dm_next);
9204		dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
9205		direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
9206		FREE_LOCK(ump);
9207		if (direct)
9208			handle_workitem_remove(dirrem, 0);
9209	}
9210}
9211
9212/*
9213 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
9214 * pd_pendinghd list of a pagedep.
9215 */
9216static struct diradd *
9217diradd_lookup(struct pagedep *pagedep, int offset)
9218{
9219	struct diradd *dap;
9220
9221	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
9222		if (dap->da_offset == offset)
9223			return (dap);
9224	LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
9225		if (dap->da_offset == offset)
9226			return (dap);
9227	return (NULL);
9228}
9229
9230/*
9231 * Search for a .. diradd dependency in a directory that is being removed.
9232 * If the directory was renamed to a new parent we have a diradd rather
9233 * than a mkdir for the .. entry.  We need to cancel it now before
9234 * it is found in truncate().
9235 */
9236static struct jremref *
9237cancel_diradd_dotdot(struct inode *ip,
9238	struct dirrem *dirrem,
9239	struct jremref *jremref)
9240{
9241	struct pagedep *pagedep;
9242	struct diradd *dap;
9243	struct worklist *wk;
9244
9245	if (pagedep_lookup(ITOVFS(ip), NULL, ip->i_number, 0, 0, &pagedep) == 0)
9246		return (jremref);
9247	dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
9248	if (dap == NULL)
9249		return (jremref);
9250	cancel_diradd(dap, dirrem, jremref, NULL, NULL);
9251	/*
9252	 * Mark any journal work as belonging to the parent so it is freed
9253	 * with the .. reference.
9254	 */
9255	LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9256		wk->wk_state |= MKDIR_PARENT;
9257	return (NULL);
9258}
9259
9260/*
9261 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
9262 * replace it with a dirrem/diradd pair as a result of re-parenting a
9263 * directory.  This ensures that we don't simultaneously have a mkdir and
9264 * a diradd for the same .. entry.
9265 */
9266static struct jremref *
9267cancel_mkdir_dotdot(struct inode *ip,
9268	struct dirrem *dirrem,
9269	struct jremref *jremref)
9270{
9271	struct inodedep *inodedep;
9272	struct jaddref *jaddref;
9273	struct ufsmount *ump;
9274	struct mkdir *mkdir;
9275	struct diradd *dap;
9276	struct mount *mp;
9277
9278	mp = ITOVFS(ip);
9279	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9280		return (jremref);
9281	dap = inodedep->id_mkdiradd;
9282	if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
9283		return (jremref);
9284	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9285	for (mkdir = LIST_FIRST(&ump->softdep_mkdirlisthd); mkdir;
9286	    mkdir = LIST_NEXT(mkdir, md_mkdirs))
9287		if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
9288			break;
9289	if (mkdir == NULL)
9290		panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
9291	if ((jaddref = mkdir->md_jaddref) != NULL) {
9292		mkdir->md_jaddref = NULL;
9293		jaddref->ja_state &= ~MKDIR_PARENT;
9294		if (inodedep_lookup(mp, jaddref->ja_ino, 0, &inodedep) == 0)
9295			panic("cancel_mkdir_dotdot: Lost parent inodedep");
9296		if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
9297			journal_jremref(dirrem, jremref, inodedep);
9298			jremref = NULL;
9299		}
9300	}
9301	if (mkdir->md_state & ONWORKLIST)
9302		WORKLIST_REMOVE(&mkdir->md_list);
9303	mkdir->md_state |= ALLCOMPLETE;
9304	complete_mkdir(mkdir);
9305	return (jremref);
9306}
9307
9308static void
9309journal_jremref(struct dirrem *dirrem,
9310	struct jremref *jremref,
9311	struct inodedep *inodedep)
9312{
9313
9314	if (inodedep == NULL)
9315		if (inodedep_lookup(jremref->jr_list.wk_mp,
9316		    jremref->jr_ref.if_ino, 0, &inodedep) == 0)
9317			panic("journal_jremref: Lost inodedep");
9318	LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
9319	TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
9320	add_to_journal(&jremref->jr_list);
9321}
9322
9323static void
9324dirrem_journal(
9325	struct dirrem *dirrem,
9326	struct jremref *jremref,
9327	struct jremref *dotremref,
9328	struct jremref *dotdotremref)
9329{
9330	struct inodedep *inodedep;
9331
9332	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
9333	    &inodedep) == 0)
9334		panic("dirrem_journal: Lost inodedep");
9335	journal_jremref(dirrem, jremref, inodedep);
9336	if (dotremref)
9337		journal_jremref(dirrem, dotremref, inodedep);
9338	if (dotdotremref)
9339		journal_jremref(dirrem, dotdotremref, NULL);
9340}
9341
9342/*
9343 * Allocate a new dirrem if appropriate and return it along with
9344 * its associated pagedep. Called without a lock, returns with lock.
9345 */
9346static struct dirrem *
9347newdirrem(
9348	struct buf *bp,		/* buffer containing directory block */
9349	struct inode *dp,	/* inode for the directory being modified */
9350	struct inode *ip,	/* inode for directory entry being removed */
9351	int isrmdir,		/* indicates if doing RMDIR */
9352	struct dirrem **prevdirremp) /* previously referenced inode, if any */
9353{
9354	int offset;
9355	ufs_lbn_t lbn;
9356	struct diradd *dap;
9357	struct dirrem *dirrem;
9358	struct pagedep *pagedep;
9359	struct jremref *jremref;
9360	struct jremref *dotremref;
9361	struct jremref *dotdotremref;
9362	struct vnode *dvp;
9363	struct ufsmount *ump;
9364
9365	/*
9366	 * Whiteouts have no deletion dependencies.
9367	 */
9368	if (ip == NULL)
9369		panic("newdirrem: whiteout");
9370	dvp = ITOV(dp);
9371	ump = ITOUMP(dp);
9372
9373	/*
9374	 * If the system is over its limit and our filesystem is
9375	 * responsible for more than our share of that usage and
9376	 * we are not a snapshot, request some inodedep cleanup.
9377	 * Limiting the number of dirrem structures will also limit
9378	 * the number of freefile and freeblks structures.
9379	 */
9380	ACQUIRE_LOCK(ump);
9381	if (!IS_SNAPSHOT(ip) && softdep_excess_items(ump, D_DIRREM))
9382		schedule_cleanup(UFSTOVFS(ump));
9383	else
9384		FREE_LOCK(ump);
9385	dirrem = malloc(sizeof(struct dirrem), M_DIRREM, M_SOFTDEP_FLAGS |
9386	    M_ZERO);
9387	workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
9388	LIST_INIT(&dirrem->dm_jremrefhd);
9389	LIST_INIT(&dirrem->dm_jwork);
9390	dirrem->dm_state = isrmdir ? RMDIR : 0;
9391	dirrem->dm_oldinum = ip->i_number;
9392	*prevdirremp = NULL;
9393	/*
9394	 * Allocate remove reference structures to track journal write
9395	 * dependencies.  We will always have one for the link and
9396	 * when doing directories we will always have one more for dot.
9397	 * When renaming a directory we skip the dotdot link change so
9398	 * this is not needed.
9399	 */
9400	jremref = dotremref = dotdotremref = NULL;
9401	if (DOINGSUJ(dvp)) {
9402		if (isrmdir) {
9403			jremref = newjremref(dirrem, dp, ip, I_OFFSET(dp),
9404			    ip->i_effnlink + 2);
9405			dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
9406			    ip->i_effnlink + 1);
9407			dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
9408			    dp->i_effnlink + 1);
9409			dotdotremref->jr_state |= MKDIR_PARENT;
9410		} else
9411			jremref = newjremref(dirrem, dp, ip, I_OFFSET(dp),
9412			    ip->i_effnlink + 1);
9413	}
9414	ACQUIRE_LOCK(ump);
9415	lbn = lblkno(ump->um_fs, I_OFFSET(dp));
9416	offset = blkoff(ump->um_fs, I_OFFSET(dp));
9417	pagedep_lookup(UFSTOVFS(ump), bp, dp->i_number, lbn, DEPALLOC,
9418	    &pagedep);
9419	dirrem->dm_pagedep = pagedep;
9420	dirrem->dm_offset = offset;
9421	/*
9422	 * If we're renaming a .. link to a new directory, cancel any
9423	 * existing MKDIR_PARENT mkdir.  If it has already been canceled
9424	 * the jremref is preserved for any potential diradd in this
9425	 * location.  This can not coincide with a rmdir.
9426	 */
9427	if (I_OFFSET(dp) == DOTDOT_OFFSET) {
9428		if (isrmdir)
9429			panic("newdirrem: .. directory change during remove?");
9430		jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
9431	}
9432	/*
9433	 * If we're removing a directory search for the .. dependency now and
9434	 * cancel it.  Any pending journal work will be added to the dirrem
9435	 * to be completed when the workitem remove completes.
9436	 */
9437	if (isrmdir)
9438		dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
9439	/*
9440	 * Check for a diradd dependency for the same directory entry.
9441	 * If present, then both dependencies become obsolete and can
9442	 * be de-allocated.
9443	 */
9444	dap = diradd_lookup(pagedep, offset);
9445	if (dap == NULL) {
9446		/*
9447		 * Link the jremref structures into the dirrem so they are
9448		 * written prior to the pagedep.
9449		 */
9450		if (jremref)
9451			dirrem_journal(dirrem, jremref, dotremref,
9452			    dotdotremref);
9453		return (dirrem);
9454	}
9455	/*
9456	 * Must be ATTACHED at this point.
9457	 */
9458	if ((dap->da_state & ATTACHED) == 0)
9459		panic("newdirrem: not ATTACHED");
9460	if (dap->da_newinum != ip->i_number)
9461		panic("newdirrem: inum %ju should be %ju",
9462		    (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum);
9463	/*
9464	 * If we are deleting a changed name that never made it to disk,
9465	 * then return the dirrem describing the previous inode (which
9466	 * represents the inode currently referenced from this entry on disk).
9467	 */
9468	if ((dap->da_state & DIRCHG) != 0) {
9469		*prevdirremp = dap->da_previous;
9470		dap->da_state &= ~DIRCHG;
9471		dap->da_pagedep = pagedep;
9472	}
9473	/*
9474	 * We are deleting an entry that never made it to disk.
9475	 * Mark it COMPLETE so we can delete its inode immediately.
9476	 */
9477	dirrem->dm_state |= COMPLETE;
9478	cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
9479#ifdef INVARIANTS
9480	if (isrmdir == 0) {
9481		struct worklist *wk;
9482
9483		LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9484			if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
9485				panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
9486	}
9487#endif
9488
9489	return (dirrem);
9490}
9491
9492/*
9493 * Directory entry change dependencies.
9494 *
9495 * Changing an existing directory entry requires that an add operation
9496 * be completed first followed by a deletion. The semantics for the addition
9497 * are identical to the description of adding a new entry above except
9498 * that the rollback is to the old inode number rather than zero. Once
9499 * the addition dependency is completed, the removal is done as described
9500 * in the removal routine above.
9501 */
9502
9503/*
9504 * This routine should be called immediately after changing
9505 * a directory entry.  The inode's link count should not be
9506 * decremented by the calling procedure -- the soft updates
9507 * code will perform this task when it is safe.
9508 */
9509void
9510softdep_setup_directory_change(
9511	struct buf *bp,		/* buffer containing directory block */
9512	struct inode *dp,	/* inode for the directory being modified */
9513	struct inode *ip,	/* inode for directory entry being removed */
9514	ino_t newinum,		/* new inode number for changed entry */
9515	int isrmdir)		/* indicates if doing RMDIR */
9516{
9517	int offset;
9518	struct diradd *dap = NULL;
9519	struct dirrem *dirrem, *prevdirrem;
9520	struct pagedep *pagedep;
9521	struct inodedep *inodedep;
9522	struct jaddref *jaddref;
9523	struct mount *mp;
9524	struct ufsmount *ump;
9525
9526	mp = ITOVFS(dp);
9527	ump = VFSTOUFS(mp);
9528	offset = blkoff(ump->um_fs, I_OFFSET(dp));
9529	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
9530	   ("softdep_setup_directory_change called on non-softdep filesystem"));
9531
9532	/*
9533	 * Whiteouts do not need diradd dependencies.
9534	 */
9535	if (newinum != UFS_WINO) {
9536		dap = malloc(sizeof(struct diradd),
9537		    M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9538		workitem_alloc(&dap->da_list, D_DIRADD, mp);
9539		dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9540		dap->da_offset = offset;
9541		dap->da_newinum = newinum;
9542		LIST_INIT(&dap->da_jwork);
9543	}
9544
9545	/*
9546	 * Allocate a new dirrem and ACQUIRE_LOCK.
9547	 */
9548	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9549	pagedep = dirrem->dm_pagedep;
9550	/*
9551	 * The possible values for isrmdir:
9552	 *	0 - non-directory file rename
9553	 *	1 - directory rename within same directory
9554	 *   inum - directory rename to new directory of given inode number
9555	 * When renaming to a new directory, we are both deleting and
9556	 * creating a new directory entry, so the link count on the new
9557	 * directory should not change. Thus we do not need the followup
9558	 * dirrem which is usually done in handle_workitem_remove. We set
9559	 * the DIRCHG flag to tell handle_workitem_remove to skip the
9560	 * followup dirrem.
9561	 */
9562	if (isrmdir > 1)
9563		dirrem->dm_state |= DIRCHG;
9564
9565	/*
9566	 * Whiteouts have no additional dependencies,
9567	 * so just put the dirrem on the correct list.
9568	 */
9569	if (newinum == UFS_WINO) {
9570		if ((dirrem->dm_state & COMPLETE) == 0) {
9571			LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9572			    dm_next);
9573		} else {
9574			dirrem->dm_dirinum = pagedep->pd_ino;
9575			if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9576				add_to_worklist(&dirrem->dm_list, 0);
9577		}
9578		FREE_LOCK(ump);
9579		return;
9580	}
9581	/*
9582	 * Add the dirrem to the inodedep's pending remove list for quick
9583	 * discovery later.  A valid nlinkdelta ensures that this lookup
9584	 * will not fail.
9585	 */
9586	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9587		panic("softdep_setup_directory_change: Lost inodedep.");
9588	dirrem->dm_state |= ONDEPLIST;
9589	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9590
9591	/*
9592	 * If the COMPLETE flag is clear, then there were no active
9593	 * entries and we want to roll back to the previous inode until
9594	 * the new inode is committed to disk. If the COMPLETE flag is
9595	 * set, then we have deleted an entry that never made it to disk.
9596	 * If the entry we deleted resulted from a name change, then the old
9597	 * inode reference still resides on disk. Any rollback that we do
9598	 * needs to be to that old inode (returned to us in prevdirrem). If
9599	 * the entry we deleted resulted from a create, then there is
9600	 * no entry on the disk, so we want to roll back to zero rather
9601	 * than the uncommitted inode. In either of the COMPLETE cases we
9602	 * want to immediately free the unwritten and unreferenced inode.
9603	 */
9604	if ((dirrem->dm_state & COMPLETE) == 0) {
9605		dap->da_previous = dirrem;
9606	} else {
9607		if (prevdirrem != NULL) {
9608			dap->da_previous = prevdirrem;
9609		} else {
9610			dap->da_state &= ~DIRCHG;
9611			dap->da_pagedep = pagedep;
9612		}
9613		dirrem->dm_dirinum = pagedep->pd_ino;
9614		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9615			add_to_worklist(&dirrem->dm_list, 0);
9616	}
9617	/*
9618	 * Lookup the jaddref for this journal entry.  We must finish
9619	 * initializing it and make the diradd write dependent on it.
9620	 * If we're not journaling, put it on the id_bufwait list if the
9621	 * inode is not yet written. If it is written, do the post-inode
9622	 * write processing to put it on the id_pendinghd list.
9623	 */
9624	inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
9625	if (MOUNTEDSUJ(mp)) {
9626		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9627		    inoreflst);
9628		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9629		    ("softdep_setup_directory_change: bad jaddref %p",
9630		    jaddref));
9631		jaddref->ja_diroff = I_OFFSET(dp);
9632		jaddref->ja_diradd = dap;
9633		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9634		    dap, da_pdlist);
9635		add_to_journal(&jaddref->ja_list);
9636	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9637		dap->da_state |= COMPLETE;
9638		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9639		WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9640	} else {
9641		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9642		    dap, da_pdlist);
9643		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9644	}
9645	/*
9646	 * If we're making a new name for a directory that has not been
9647	 * committed when need to move the dot and dotdot references to
9648	 * this new name.
9649	 */
9650	if (inodedep->id_mkdiradd && I_OFFSET(dp) != DOTDOT_OFFSET)
9651		merge_diradd(inodedep, dap);
9652	FREE_LOCK(ump);
9653}
9654
9655/*
9656 * Called whenever the link count on an inode is changed.
9657 * It creates an inode dependency so that the new reference(s)
9658 * to the inode cannot be committed to disk until the updated
9659 * inode has been written.
9660 */
9661void
9662softdep_change_linkcnt(
9663	struct inode *ip)	/* the inode with the increased link count */
9664{
9665	struct inodedep *inodedep;
9666	struct ufsmount *ump;
9667
9668	ump = ITOUMP(ip);
9669	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9670	    ("softdep_change_linkcnt called on non-softdep filesystem"));
9671	ACQUIRE_LOCK(ump);
9672	inodedep_lookup(UFSTOVFS(ump), ip->i_number, DEPALLOC, &inodedep);
9673	if (ip->i_nlink < ip->i_effnlink)
9674		panic("softdep_change_linkcnt: bad delta");
9675	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9676	FREE_LOCK(ump);
9677}
9678
9679/*
9680 * Attach a sbdep dependency to the superblock buf so that we can keep
9681 * track of the head of the linked list of referenced but unlinked inodes.
9682 */
9683void
9684softdep_setup_sbupdate(
9685	struct ufsmount *ump,
9686	struct fs *fs,
9687	struct buf *bp)
9688{
9689	struct sbdep *sbdep;
9690	struct worklist *wk;
9691
9692	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9693	    ("softdep_setup_sbupdate called on non-softdep filesystem"));
9694	LIST_FOREACH(wk, &bp->b_dep, wk_list)
9695		if (wk->wk_type == D_SBDEP)
9696			break;
9697	if (wk != NULL)
9698		return;
9699	sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9700	workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9701	sbdep->sb_fs = fs;
9702	sbdep->sb_ump = ump;
9703	ACQUIRE_LOCK(ump);
9704	WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9705	FREE_LOCK(ump);
9706}
9707
9708/*
9709 * Return the first unlinked inodedep which is ready to be the head of the
9710 * list.  The inodedep and all those after it must have valid next pointers.
9711 */
9712static struct inodedep *
9713first_unlinked_inodedep(struct ufsmount *ump)
9714{
9715	struct inodedep *inodedep;
9716	struct inodedep *idp;
9717
9718	LOCK_OWNED(ump);
9719	for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9720	    inodedep; inodedep = idp) {
9721		if ((inodedep->id_state & UNLINKNEXT) == 0)
9722			return (NULL);
9723		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9724		if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9725			break;
9726		if ((inodedep->id_state & UNLINKPREV) == 0)
9727			break;
9728	}
9729	return (inodedep);
9730}
9731
9732/*
9733 * Set the sujfree unlinked head pointer prior to writing a superblock.
9734 */
9735static void
9736initiate_write_sbdep(struct sbdep *sbdep)
9737{
9738	struct inodedep *inodedep;
9739	struct fs *bpfs;
9740	struct fs *fs;
9741
9742	bpfs = sbdep->sb_fs;
9743	fs = sbdep->sb_ump->um_fs;
9744	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9745	if (inodedep) {
9746		fs->fs_sujfree = inodedep->id_ino;
9747		inodedep->id_state |= UNLINKPREV;
9748	} else
9749		fs->fs_sujfree = 0;
9750	bpfs->fs_sujfree = fs->fs_sujfree;
9751	/*
9752	 * Because we have made changes to the superblock, we need to
9753	 * recompute its check-hash.
9754	 */
9755	bpfs->fs_ckhash = ffs_calc_sbhash(bpfs);
9756}
9757
9758/*
9759 * After a superblock is written determine whether it must be written again
9760 * due to a changing unlinked list head.
9761 */
9762static int
9763handle_written_sbdep(struct sbdep *sbdep, struct buf *bp)
9764{
9765	struct inodedep *inodedep;
9766	struct fs *fs;
9767
9768	LOCK_OWNED(sbdep->sb_ump);
9769	fs = sbdep->sb_fs;
9770	/*
9771	 * If the superblock doesn't match the in-memory list start over.
9772	 */
9773	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9774	if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9775	    (inodedep == NULL && fs->fs_sujfree != 0)) {
9776		bdirty(bp);
9777		return (1);
9778	}
9779	WORKITEM_FREE(sbdep, D_SBDEP);
9780	if (fs->fs_sujfree == 0)
9781		return (0);
9782	/*
9783	 * Now that we have a record of this inode in stable store allow it
9784	 * to be written to free up pending work.  Inodes may see a lot of
9785	 * write activity after they are unlinked which we must not hold up.
9786	 */
9787	for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9788		if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9789			panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9790			    inodedep, inodedep->id_state);
9791		if (inodedep->id_state & UNLINKONLIST)
9792			break;
9793		inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9794	}
9795
9796	return (0);
9797}
9798
9799/*
9800 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9801 */
9802static void
9803unlinked_inodedep( struct mount *mp, struct inodedep *inodedep)
9804{
9805	struct ufsmount *ump;
9806
9807	ump = VFSTOUFS(mp);
9808	LOCK_OWNED(ump);
9809	if (MOUNTEDSUJ(mp) == 0)
9810		return;
9811	ump->um_fs->fs_fmod = 1;
9812	if (inodedep->id_state & UNLINKED)
9813		panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9814	inodedep->id_state |= UNLINKED;
9815	TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9816}
9817
9818/*
9819 * Remove an inodedep from the unlinked inodedep list.  This may require
9820 * disk writes if the inode has made it that far.
9821 */
9822static void
9823clear_unlinked_inodedep( struct inodedep *inodedep)
9824{
9825	struct ufs2_dinode *dip;
9826	struct ufsmount *ump;
9827	struct inodedep *idp;
9828	struct inodedep *idn;
9829	struct fs *fs, *bpfs;
9830	struct buf *bp;
9831	daddr_t dbn;
9832	ino_t ino;
9833	ino_t nino;
9834	ino_t pino;
9835	int error;
9836
9837	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9838	fs = ump->um_fs;
9839	ino = inodedep->id_ino;
9840	error = 0;
9841	for (;;) {
9842		LOCK_OWNED(ump);
9843		KASSERT((inodedep->id_state & UNLINKED) != 0,
9844		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9845		    inodedep));
9846		/*
9847		 * If nothing has yet been written simply remove us from
9848		 * the in memory list and return.  This is the most common
9849		 * case where handle_workitem_remove() loses the final
9850		 * reference.
9851		 */
9852		if ((inodedep->id_state & UNLINKLINKS) == 0)
9853			break;
9854		/*
9855		 * If we have a NEXT pointer and no PREV pointer we can simply
9856		 * clear NEXT's PREV and remove ourselves from the list.  Be
9857		 * careful not to clear PREV if the superblock points at
9858		 * next as well.
9859		 */
9860		idn = TAILQ_NEXT(inodedep, id_unlinked);
9861		if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9862			if (idn && fs->fs_sujfree != idn->id_ino)
9863				idn->id_state &= ~UNLINKPREV;
9864			break;
9865		}
9866		/*
9867		 * Here we have an inodedep which is actually linked into
9868		 * the list.  We must remove it by forcing a write to the
9869		 * link before us, whether it be the superblock or an inode.
9870		 * Unfortunately the list may change while we're waiting
9871		 * on the buf lock for either resource so we must loop until
9872		 * we lock the right one.  If both the superblock and an
9873		 * inode point to this inode we must clear the inode first
9874		 * followed by the superblock.
9875		 */
9876		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9877		pino = 0;
9878		if (idp && (idp->id_state & UNLINKNEXT))
9879			pino = idp->id_ino;
9880		FREE_LOCK(ump);
9881		if (pino == 0) {
9882			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9883			    (int)fs->fs_sbsize, 0, 0, 0);
9884		} else {
9885			dbn = fsbtodb(fs, ino_to_fsba(fs, pino));
9886			error = ffs_breadz(ump, ump->um_devvp, dbn, dbn,
9887			    (int)fs->fs_bsize, NULL, NULL, 0, NOCRED, 0, NULL,
9888			    &bp);
9889		}
9890		ACQUIRE_LOCK(ump);
9891		if (error)
9892			break;
9893		/* If the list has changed restart the loop. */
9894		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9895		nino = 0;
9896		if (idp && (idp->id_state & UNLINKNEXT))
9897			nino = idp->id_ino;
9898		if (nino != pino ||
9899		    (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9900			FREE_LOCK(ump);
9901			brelse(bp);
9902			ACQUIRE_LOCK(ump);
9903			continue;
9904		}
9905		nino = 0;
9906		idn = TAILQ_NEXT(inodedep, id_unlinked);
9907		if (idn)
9908			nino = idn->id_ino;
9909		/*
9910		 * Remove us from the in memory list.  After this we cannot
9911		 * access the inodedep.
9912		 */
9913		KASSERT((inodedep->id_state & UNLINKED) != 0,
9914		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9915		    inodedep));
9916		inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9917		TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9918		FREE_LOCK(ump);
9919		/*
9920		 * The predecessor's next pointer is manually updated here
9921		 * so that the NEXT flag is never cleared for an element
9922		 * that is in the list.
9923		 */
9924		if (pino == 0) {
9925			bcopy((caddr_t)fs, bp->b_data, (uint64_t)fs->fs_sbsize);
9926			bpfs = (struct fs *)bp->b_data;
9927			ffs_oldfscompat_write(bpfs, ump);
9928			softdep_setup_sbupdate(ump, bpfs, bp);
9929			/*
9930			 * Because we may have made changes to the superblock,
9931			 * we need to recompute its check-hash.
9932			 */
9933			bpfs->fs_ckhash = ffs_calc_sbhash(bpfs);
9934		} else if (fs->fs_magic == FS_UFS1_MAGIC) {
9935			((struct ufs1_dinode *)bp->b_data +
9936			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9937		} else {
9938			dip = (struct ufs2_dinode *)bp->b_data +
9939			    ino_to_fsbo(fs, pino);
9940			dip->di_freelink = nino;
9941			ffs_update_dinode_ckhash(fs, dip);
9942		}
9943		/*
9944		 * If the bwrite fails we have no recourse to recover.  The
9945		 * filesystem is corrupted already.
9946		 */
9947		bwrite(bp);
9948		ACQUIRE_LOCK(ump);
9949		/*
9950		 * If the superblock pointer still needs to be cleared force
9951		 * a write here.
9952		 */
9953		if (fs->fs_sujfree == ino) {
9954			FREE_LOCK(ump);
9955			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9956			    (int)fs->fs_sbsize, 0, 0, 0);
9957			bcopy((caddr_t)fs, bp->b_data, (uint64_t)fs->fs_sbsize);
9958			bpfs = (struct fs *)bp->b_data;
9959			ffs_oldfscompat_write(bpfs, ump);
9960			softdep_setup_sbupdate(ump, bpfs, bp);
9961			/*
9962			 * Because we may have made changes to the superblock,
9963			 * we need to recompute its check-hash.
9964			 */
9965			bpfs->fs_ckhash = ffs_calc_sbhash(bpfs);
9966			bwrite(bp);
9967			ACQUIRE_LOCK(ump);
9968		}
9969
9970		if (fs->fs_sujfree != ino)
9971			return;
9972		panic("clear_unlinked_inodedep: Failed to clear free head");
9973	}
9974	if (inodedep->id_ino == fs->fs_sujfree)
9975		panic("clear_unlinked_inodedep: Freeing head of free list");
9976	inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9977	TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9978	return;
9979}
9980
9981/*
9982 * This workitem decrements the inode's link count.
9983 * If the link count reaches zero, the file is removed.
9984 */
9985static int
9986handle_workitem_remove(struct dirrem *dirrem, int flags)
9987{
9988	struct inodedep *inodedep;
9989	struct workhead dotdotwk;
9990	struct worklist *wk;
9991	struct ufsmount *ump;
9992	struct mount *mp;
9993	struct vnode *vp;
9994	struct inode *ip;
9995	ino_t oldinum;
9996
9997	if (dirrem->dm_state & ONWORKLIST)
9998		panic("handle_workitem_remove: dirrem %p still on worklist",
9999		    dirrem);
10000	oldinum = dirrem->dm_oldinum;
10001	mp = dirrem->dm_list.wk_mp;
10002	ump = VFSTOUFS(mp);
10003	flags |= LK_EXCLUSIVE;
10004	if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ |
10005	    FFSV_FORCEINODEDEP) != 0)
10006		return (EBUSY);
10007	ip = VTOI(vp);
10008	MPASS(ip->i_mode != 0);
10009	ACQUIRE_LOCK(ump);
10010	if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
10011		panic("handle_workitem_remove: lost inodedep");
10012	if (dirrem->dm_state & ONDEPLIST)
10013		LIST_REMOVE(dirrem, dm_inonext);
10014	KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
10015	    ("handle_workitem_remove:  Journal entries not written."));
10016
10017	/*
10018	 * Move all dependencies waiting on the remove to complete
10019	 * from the dirrem to the inode inowait list to be completed
10020	 * after the inode has been updated and written to disk.
10021	 *
10022	 * Any marked MKDIR_PARENT are saved to be completed when the
10023	 * dotdot ref is removed unless DIRCHG is specified.  For
10024	 * directory change operations there will be no further
10025	 * directory writes and the jsegdeps need to be moved along
10026	 * with the rest to be completed when the inode is free or
10027	 * stable in the inode free list.
10028	 */
10029	LIST_INIT(&dotdotwk);
10030	while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
10031		WORKLIST_REMOVE(wk);
10032		if ((dirrem->dm_state & DIRCHG) == 0 &&
10033		    wk->wk_state & MKDIR_PARENT) {
10034			wk->wk_state &= ~MKDIR_PARENT;
10035			WORKLIST_INSERT(&dotdotwk, wk);
10036			continue;
10037		}
10038		WORKLIST_INSERT(&inodedep->id_inowait, wk);
10039	}
10040	LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
10041	/*
10042	 * Normal file deletion.
10043	 */
10044	if ((dirrem->dm_state & RMDIR) == 0) {
10045		ip->i_nlink--;
10046		KASSERT(ip->i_nlink >= 0, ("handle_workitem_remove: file ino "
10047		    "%ju negative i_nlink %d", (intmax_t)ip->i_number,
10048		    ip->i_nlink));
10049		DIP_SET_NLINK(ip, ip->i_nlink);
10050		UFS_INODE_SET_FLAG(ip, IN_CHANGE);
10051		if (ip->i_nlink < ip->i_effnlink)
10052			panic("handle_workitem_remove: bad file delta");
10053		if (ip->i_nlink == 0)
10054			unlinked_inodedep(mp, inodedep);
10055		inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
10056		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
10057		    ("handle_workitem_remove: worklist not empty. %s",
10058		    TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
10059		WORKITEM_FREE(dirrem, D_DIRREM);
10060		FREE_LOCK(ump);
10061		goto out;
10062	}
10063	/*
10064	 * Directory deletion. Decrement reference count for both the
10065	 * just deleted parent directory entry and the reference for ".".
10066	 * Arrange to have the reference count on the parent decremented
10067	 * to account for the loss of "..".
10068	 */
10069	ip->i_nlink -= 2;
10070	KASSERT(ip->i_nlink >= 0, ("handle_workitem_remove: directory ino "
10071	    "%ju negative i_nlink %d", (intmax_t)ip->i_number, ip->i_nlink));
10072	DIP_SET_NLINK(ip, ip->i_nlink);
10073	UFS_INODE_SET_FLAG(ip, IN_CHANGE);
10074	if (ip->i_nlink < ip->i_effnlink)
10075		panic("handle_workitem_remove: bad dir delta");
10076	if (ip->i_nlink == 0)
10077		unlinked_inodedep(mp, inodedep);
10078	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
10079	/*
10080	 * Rename a directory to a new parent. Since, we are both deleting
10081	 * and creating a new directory entry, the link count on the new
10082	 * directory should not change. Thus we skip the followup dirrem.
10083	 */
10084	if (dirrem->dm_state & DIRCHG) {
10085		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
10086		    ("handle_workitem_remove: DIRCHG and worklist not empty."));
10087		WORKITEM_FREE(dirrem, D_DIRREM);
10088		FREE_LOCK(ump);
10089		goto out;
10090	}
10091	dirrem->dm_state = ONDEPLIST;
10092	dirrem->dm_oldinum = dirrem->dm_dirinum;
10093	/*
10094	 * Place the dirrem on the parent's diremhd list.
10095	 */
10096	if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
10097		panic("handle_workitem_remove: lost dir inodedep");
10098	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
10099	/*
10100	 * If the allocated inode has never been written to disk, then
10101	 * the on-disk inode is zero'ed and we can remove the file
10102	 * immediately.  When journaling if the inode has been marked
10103	 * unlinked and not DEPCOMPLETE we know it can never be written.
10104	 */
10105	inodedep_lookup(mp, oldinum, 0, &inodedep);
10106	if (inodedep == NULL ||
10107	    (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
10108	    check_inode_unwritten(inodedep)) {
10109		FREE_LOCK(ump);
10110		vput(vp);
10111		return handle_workitem_remove(dirrem, flags);
10112	}
10113	WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
10114	FREE_LOCK(ump);
10115	UFS_INODE_SET_FLAG(ip, IN_CHANGE);
10116out:
10117	ffs_update(vp, 0);
10118	vput(vp);
10119	return (0);
10120}
10121
10122/*
10123 * Inode de-allocation dependencies.
10124 *
10125 * When an inode's link count is reduced to zero, it can be de-allocated. We
10126 * found it convenient to postpone de-allocation until after the inode is
10127 * written to disk with its new link count (zero).  At this point, all of the
10128 * on-disk inode's block pointers are nullified and, with careful dependency
10129 * list ordering, all dependencies related to the inode will be satisfied and
10130 * the corresponding dependency structures de-allocated.  So, if/when the
10131 * inode is reused, there will be no mixing of old dependencies with new
10132 * ones.  This artificial dependency is set up by the block de-allocation
10133 * procedure above (softdep_setup_freeblocks) and completed by the
10134 * following procedure.
10135 */
10136static void
10137handle_workitem_freefile(struct freefile *freefile)
10138{
10139	struct workhead wkhd;
10140	struct fs *fs;
10141	struct ufsmount *ump;
10142	int error;
10143#ifdef INVARIANTS
10144	struct inodedep *idp;
10145#endif
10146
10147	ump = VFSTOUFS(freefile->fx_list.wk_mp);
10148	fs = ump->um_fs;
10149#ifdef INVARIANTS
10150	ACQUIRE_LOCK(ump);
10151	error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
10152	FREE_LOCK(ump);
10153	if (error)
10154		panic("handle_workitem_freefile: inodedep %p survived", idp);
10155#endif
10156	UFS_LOCK(ump);
10157	fs->fs_pendinginodes -= 1;
10158	UFS_UNLOCK(ump);
10159	LIST_INIT(&wkhd);
10160	LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
10161	if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
10162	    freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
10163		softdep_error("handle_workitem_freefile", error);
10164	ACQUIRE_LOCK(ump);
10165	WORKITEM_FREE(freefile, D_FREEFILE);
10166	FREE_LOCK(ump);
10167}
10168
10169/*
10170 * Helper function which unlinks marker element from work list and returns
10171 * the next element on the list.
10172 */
10173static __inline struct worklist *
10174markernext(struct worklist *marker)
10175{
10176	struct worklist *next;
10177
10178	next = LIST_NEXT(marker, wk_list);
10179	LIST_REMOVE(marker, wk_list);
10180	return next;
10181}
10182
10183/*
10184 * Disk writes.
10185 *
10186 * The dependency structures constructed above are most actively used when file
10187 * system blocks are written to disk.  No constraints are placed on when a
10188 * block can be written, but unsatisfied update dependencies are made safe by
10189 * modifying (or replacing) the source memory for the duration of the disk
10190 * write.  When the disk write completes, the memory block is again brought
10191 * up-to-date.
10192 *
10193 * In-core inode structure reclamation.
10194 *
10195 * Because there are a finite number of "in-core" inode structures, they are
10196 * reused regularly.  By transferring all inode-related dependencies to the
10197 * in-memory inode block and indexing them separately (via "inodedep"s), we
10198 * can allow "in-core" inode structures to be reused at any time and avoid
10199 * any increase in contention.
10200 *
10201 * Called just before entering the device driver to initiate a new disk I/O.
10202 * The buffer must be locked, thus, no I/O completion operations can occur
10203 * while we are manipulating its associated dependencies.
10204 */
10205static void
10206softdep_disk_io_initiation(
10207	struct buf *bp)		/* structure describing disk write to occur */
10208{
10209	struct worklist *wk;
10210	struct worklist marker;
10211	struct inodedep *inodedep;
10212	struct freeblks *freeblks;
10213	struct jblkdep *jblkdep;
10214	struct newblk *newblk;
10215	struct ufsmount *ump;
10216
10217	/*
10218	 * We only care about write operations. There should never
10219	 * be dependencies for reads.
10220	 */
10221	if (bp->b_iocmd != BIO_WRITE)
10222		panic("softdep_disk_io_initiation: not write");
10223
10224	if (bp->b_vflags & BV_BKGRDINPROG)
10225		panic("softdep_disk_io_initiation: Writing buffer with "
10226		    "background write in progress: %p", bp);
10227
10228	ump = softdep_bp_to_mp(bp);
10229	if (ump == NULL)
10230		return;
10231
10232	marker.wk_type = D_LAST + 1;	/* Not a normal workitem */
10233	PHOLD(curproc);			/* Don't swap out kernel stack */
10234	ACQUIRE_LOCK(ump);
10235	/*
10236	 * Do any necessary pre-I/O processing.
10237	 */
10238	for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
10239	     wk = markernext(&marker)) {
10240		LIST_INSERT_AFTER(wk, &marker, wk_list);
10241		switch (wk->wk_type) {
10242		case D_PAGEDEP:
10243			initiate_write_filepage(WK_PAGEDEP(wk), bp);
10244			continue;
10245
10246		case D_INODEDEP:
10247			inodedep = WK_INODEDEP(wk);
10248			if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
10249				initiate_write_inodeblock_ufs1(inodedep, bp);
10250			else
10251				initiate_write_inodeblock_ufs2(inodedep, bp);
10252			continue;
10253
10254		case D_INDIRDEP:
10255			initiate_write_indirdep(WK_INDIRDEP(wk), bp);
10256			continue;
10257
10258		case D_BMSAFEMAP:
10259			initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
10260			continue;
10261
10262		case D_JSEG:
10263			WK_JSEG(wk)->js_buf = NULL;
10264			continue;
10265
10266		case D_FREEBLKS:
10267			freeblks = WK_FREEBLKS(wk);
10268			jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
10269			/*
10270			 * We have to wait for the freeblks to be journaled
10271			 * before we can write an inodeblock with updated
10272			 * pointers.  Be careful to arrange the marker so
10273			 * we revisit the freeblks if it's not removed by
10274			 * the first jwait().
10275			 */
10276			if (jblkdep != NULL) {
10277				LIST_REMOVE(&marker, wk_list);
10278				LIST_INSERT_BEFORE(wk, &marker, wk_list);
10279				jwait(&jblkdep->jb_list, MNT_WAIT);
10280			}
10281			continue;
10282		case D_ALLOCDIRECT:
10283		case D_ALLOCINDIR:
10284			/*
10285			 * We have to wait for the jnewblk to be journaled
10286			 * before we can write to a block if the contents
10287			 * may be confused with an earlier file's indirect
10288			 * at recovery time.  Handle the marker as described
10289			 * above.
10290			 */
10291			newblk = WK_NEWBLK(wk);
10292			if (newblk->nb_jnewblk != NULL &&
10293			    indirblk_lookup(newblk->nb_list.wk_mp,
10294			    newblk->nb_newblkno)) {
10295				LIST_REMOVE(&marker, wk_list);
10296				LIST_INSERT_BEFORE(wk, &marker, wk_list);
10297				jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
10298			}
10299			continue;
10300
10301		case D_SBDEP:
10302			initiate_write_sbdep(WK_SBDEP(wk));
10303			continue;
10304
10305		case D_MKDIR:
10306		case D_FREEWORK:
10307		case D_FREEDEP:
10308		case D_JSEGDEP:
10309			continue;
10310
10311		default:
10312			panic("handle_disk_io_initiation: Unexpected type %s",
10313			    TYPENAME(wk->wk_type));
10314			/* NOTREACHED */
10315		}
10316	}
10317	FREE_LOCK(ump);
10318	PRELE(curproc);			/* Allow swapout of kernel stack */
10319}
10320
10321/*
10322 * Called from within the procedure above to deal with unsatisfied
10323 * allocation dependencies in a directory. The buffer must be locked,
10324 * thus, no I/O completion operations can occur while we are
10325 * manipulating its associated dependencies.
10326 */
10327static void
10328initiate_write_filepage(struct pagedep *pagedep, struct buf *bp)
10329{
10330	struct jremref *jremref;
10331	struct jmvref *jmvref;
10332	struct dirrem *dirrem;
10333	struct diradd *dap;
10334	struct direct *ep;
10335	int i;
10336
10337	if (pagedep->pd_state & IOSTARTED) {
10338		/*
10339		 * This can only happen if there is a driver that does not
10340		 * understand chaining. Here biodone will reissue the call
10341		 * to strategy for the incomplete buffers.
10342		 */
10343		printf("initiate_write_filepage: already started\n");
10344		return;
10345	}
10346	pagedep->pd_state |= IOSTARTED;
10347	/*
10348	 * Wait for all journal remove dependencies to hit the disk.
10349	 * We can not allow any potentially conflicting directory adds
10350	 * to be visible before removes and rollback is too difficult.
10351	 * The per-filesystem lock may be dropped and re-acquired, however
10352	 * we hold the buf locked so the dependency can not go away.
10353	 */
10354	LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
10355		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
10356			jwait(&jremref->jr_list, MNT_WAIT);
10357	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
10358		jwait(&jmvref->jm_list, MNT_WAIT);
10359	for (i = 0; i < DAHASHSZ; i++) {
10360		LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
10361			ep = (struct direct *)
10362			    ((char *)bp->b_data + dap->da_offset);
10363			if (ep->d_ino != dap->da_newinum)
10364				panic("%s: dir inum %ju != new %ju",
10365				    "initiate_write_filepage",
10366				    (uintmax_t)ep->d_ino,
10367				    (uintmax_t)dap->da_newinum);
10368			if (dap->da_state & DIRCHG)
10369				ep->d_ino = dap->da_previous->dm_oldinum;
10370			else
10371				ep->d_ino = 0;
10372			dap->da_state &= ~ATTACHED;
10373			dap->da_state |= UNDONE;
10374		}
10375	}
10376}
10377
10378/*
10379 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
10380 * Note that any bug fixes made to this routine must be done in the
10381 * version found below.
10382 *
10383 * Called from within the procedure above to deal with unsatisfied
10384 * allocation dependencies in an inodeblock. The buffer must be
10385 * locked, thus, no I/O completion operations can occur while we
10386 * are manipulating its associated dependencies.
10387 */
10388static void
10389initiate_write_inodeblock_ufs1(
10390	struct inodedep *inodedep,
10391	struct buf *bp)			/* The inode block */
10392{
10393	struct allocdirect *adp, *lastadp;
10394	struct ufs1_dinode *dp;
10395	struct ufs1_dinode *sip;
10396	struct inoref *inoref;
10397	struct ufsmount *ump;
10398	struct fs *fs;
10399	ufs_lbn_t i;
10400#ifdef INVARIANTS
10401	ufs_lbn_t prevlbn = 0;
10402#endif
10403	int deplist __diagused;
10404
10405	if (inodedep->id_state & IOSTARTED)
10406		panic("initiate_write_inodeblock_ufs1: already started");
10407	inodedep->id_state |= IOSTARTED;
10408	fs = inodedep->id_fs;
10409	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10410	LOCK_OWNED(ump);
10411	dp = (struct ufs1_dinode *)bp->b_data +
10412	    ino_to_fsbo(fs, inodedep->id_ino);
10413
10414	/*
10415	 * If we're on the unlinked list but have not yet written our
10416	 * next pointer initialize it here.
10417	 */
10418	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10419		struct inodedep *inon;
10420
10421		inon = TAILQ_NEXT(inodedep, id_unlinked);
10422		dp->di_freelink = inon ? inon->id_ino : 0;
10423	}
10424	/*
10425	 * If the bitmap is not yet written, then the allocated
10426	 * inode cannot be written to disk.
10427	 */
10428	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10429		if (inodedep->id_savedino1 != NULL)
10430			panic("initiate_write_inodeblock_ufs1: I/O underway");
10431		FREE_LOCK(ump);
10432		sip = malloc(sizeof(struct ufs1_dinode),
10433		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10434		ACQUIRE_LOCK(ump);
10435		inodedep->id_savedino1 = sip;
10436		*inodedep->id_savedino1 = *dp;
10437		bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
10438		dp->di_gen = inodedep->id_savedino1->di_gen;
10439		dp->di_freelink = inodedep->id_savedino1->di_freelink;
10440		return;
10441	}
10442	/*
10443	 * If no dependencies, then there is nothing to roll back.
10444	 */
10445	inodedep->id_savedsize = dp->di_size;
10446	inodedep->id_savedextsize = 0;
10447	inodedep->id_savednlink = dp->di_nlink;
10448	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10449	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10450		return;
10451	/*
10452	 * Revert the link count to that of the first unwritten journal entry.
10453	 */
10454	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10455	if (inoref)
10456		dp->di_nlink = inoref->if_nlink;
10457	/*
10458	 * Set the dependencies to busy.
10459	 */
10460	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10461	     adp = TAILQ_NEXT(adp, ad_next)) {
10462#ifdef INVARIANTS
10463		if (deplist != 0 && prevlbn >= adp->ad_offset)
10464			panic("softdep_write_inodeblock: lbn order");
10465		prevlbn = adp->ad_offset;
10466		if (adp->ad_offset < UFS_NDADDR &&
10467		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10468			panic("initiate_write_inodeblock_ufs1: "
10469			    "direct pointer #%jd mismatch %d != %jd",
10470			    (intmax_t)adp->ad_offset,
10471			    dp->di_db[adp->ad_offset],
10472			    (intmax_t)adp->ad_newblkno);
10473		if (adp->ad_offset >= UFS_NDADDR &&
10474		    dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno)
10475			panic("initiate_write_inodeblock_ufs1: "
10476			    "indirect pointer #%jd mismatch %d != %jd",
10477			    (intmax_t)adp->ad_offset - UFS_NDADDR,
10478			    dp->di_ib[adp->ad_offset - UFS_NDADDR],
10479			    (intmax_t)adp->ad_newblkno);
10480		deplist |= 1 << adp->ad_offset;
10481		if ((adp->ad_state & ATTACHED) == 0)
10482			panic("initiate_write_inodeblock_ufs1: "
10483			    "Unknown state 0x%x", adp->ad_state);
10484#endif /* INVARIANTS */
10485		adp->ad_state &= ~ATTACHED;
10486		adp->ad_state |= UNDONE;
10487	}
10488	/*
10489	 * The on-disk inode cannot claim to be any larger than the last
10490	 * fragment that has been written. Otherwise, the on-disk inode
10491	 * might have fragments that were not the last block in the file
10492	 * which would corrupt the filesystem.
10493	 */
10494	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10495	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10496		if (adp->ad_offset >= UFS_NDADDR)
10497			break;
10498		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10499		/* keep going until hitting a rollback to a frag */
10500		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10501			continue;
10502		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10503		for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) {
10504#ifdef INVARIANTS
10505			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10506				panic("initiate_write_inodeblock_ufs1: "
10507				    "lost dep1");
10508#endif /* INVARIANTS */
10509			dp->di_db[i] = 0;
10510		}
10511		for (i = 0; i < UFS_NIADDR; i++) {
10512#ifdef INVARIANTS
10513			if (dp->di_ib[i] != 0 &&
10514			    (deplist & ((1 << UFS_NDADDR) << i)) == 0)
10515				panic("initiate_write_inodeblock_ufs1: "
10516				    "lost dep2");
10517#endif /* INVARIANTS */
10518			dp->di_ib[i] = 0;
10519		}
10520		return;
10521	}
10522	/*
10523	 * If we have zero'ed out the last allocated block of the file,
10524	 * roll back the size to the last currently allocated block.
10525	 * We know that this last allocated block is a full-sized as
10526	 * we already checked for fragments in the loop above.
10527	 */
10528	if (lastadp != NULL &&
10529	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10530		for (i = lastadp->ad_offset; i >= 0; i--)
10531			if (dp->di_db[i] != 0)
10532				break;
10533		dp->di_size = (i + 1) * fs->fs_bsize;
10534	}
10535	/*
10536	 * The only dependencies are for indirect blocks.
10537	 *
10538	 * The file size for indirect block additions is not guaranteed.
10539	 * Such a guarantee would be non-trivial to achieve. The conventional
10540	 * synchronous write implementation also does not make this guarantee.
10541	 * Fsck should catch and fix discrepancies. Arguably, the file size
10542	 * can be over-estimated without destroying integrity when the file
10543	 * moves into the indirect blocks (i.e., is large). If we want to
10544	 * postpone fsck, we are stuck with this argument.
10545	 */
10546	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10547		dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0;
10548}
10549
10550/*
10551 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10552 * Note that any bug fixes made to this routine must be done in the
10553 * version found above.
10554 *
10555 * Called from within the procedure above to deal with unsatisfied
10556 * allocation dependencies in an inodeblock. The buffer must be
10557 * locked, thus, no I/O completion operations can occur while we
10558 * are manipulating its associated dependencies.
10559 */
10560static void
10561initiate_write_inodeblock_ufs2(
10562	struct inodedep *inodedep,
10563	struct buf *bp)			/* The inode block */
10564{
10565	struct allocdirect *adp, *lastadp;
10566	struct ufs2_dinode *dp;
10567	struct ufs2_dinode *sip;
10568	struct inoref *inoref;
10569	struct ufsmount *ump;
10570	struct fs *fs;
10571	ufs_lbn_t i;
10572#ifdef INVARIANTS
10573	ufs_lbn_t prevlbn = 0;
10574#endif
10575	int deplist __diagused;
10576
10577	if (inodedep->id_state & IOSTARTED)
10578		panic("initiate_write_inodeblock_ufs2: already started");
10579	inodedep->id_state |= IOSTARTED;
10580	fs = inodedep->id_fs;
10581	ump = VFSTOUFS(inodedep->id_list.wk_mp);
10582	LOCK_OWNED(ump);
10583	dp = (struct ufs2_dinode *)bp->b_data +
10584	    ino_to_fsbo(fs, inodedep->id_ino);
10585
10586	/*
10587	 * If we're on the unlinked list but have not yet written our
10588	 * next pointer initialize it here.
10589	 */
10590	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10591		struct inodedep *inon;
10592
10593		inon = TAILQ_NEXT(inodedep, id_unlinked);
10594		dp->di_freelink = inon ? inon->id_ino : 0;
10595		ffs_update_dinode_ckhash(fs, dp);
10596	}
10597	/*
10598	 * If the bitmap is not yet written, then the allocated
10599	 * inode cannot be written to disk.
10600	 */
10601	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10602		if (inodedep->id_savedino2 != NULL)
10603			panic("initiate_write_inodeblock_ufs2: I/O underway");
10604		FREE_LOCK(ump);
10605		sip = malloc(sizeof(struct ufs2_dinode),
10606		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10607		ACQUIRE_LOCK(ump);
10608		inodedep->id_savedino2 = sip;
10609		*inodedep->id_savedino2 = *dp;
10610		bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10611		dp->di_gen = inodedep->id_savedino2->di_gen;
10612		dp->di_freelink = inodedep->id_savedino2->di_freelink;
10613		return;
10614	}
10615	/*
10616	 * If no dependencies, then there is nothing to roll back.
10617	 */
10618	inodedep->id_savedsize = dp->di_size;
10619	inodedep->id_savedextsize = dp->di_extsize;
10620	inodedep->id_savednlink = dp->di_nlink;
10621	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10622	    TAILQ_EMPTY(&inodedep->id_extupdt) &&
10623	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10624		return;
10625	/*
10626	 * Revert the link count to that of the first unwritten journal entry.
10627	 */
10628	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10629	if (inoref)
10630		dp->di_nlink = inoref->if_nlink;
10631
10632	/*
10633	 * Set the ext data dependencies to busy.
10634	 */
10635	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10636	     adp = TAILQ_NEXT(adp, ad_next)) {
10637#ifdef INVARIANTS
10638		if (deplist != 0 && prevlbn >= adp->ad_offset)
10639			panic("initiate_write_inodeblock_ufs2: lbn order");
10640		prevlbn = adp->ad_offset;
10641		if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10642			panic("initiate_write_inodeblock_ufs2: "
10643			    "ext pointer #%jd mismatch %jd != %jd",
10644			    (intmax_t)adp->ad_offset,
10645			    (intmax_t)dp->di_extb[adp->ad_offset],
10646			    (intmax_t)adp->ad_newblkno);
10647		deplist |= 1 << adp->ad_offset;
10648		if ((adp->ad_state & ATTACHED) == 0)
10649			panic("initiate_write_inodeblock_ufs2: Unknown "
10650			    "state 0x%x", adp->ad_state);
10651#endif /* INVARIANTS */
10652		adp->ad_state &= ~ATTACHED;
10653		adp->ad_state |= UNDONE;
10654	}
10655	/*
10656	 * The on-disk inode cannot claim to be any larger than the last
10657	 * fragment that has been written. Otherwise, the on-disk inode
10658	 * might have fragments that were not the last block in the ext
10659	 * data which would corrupt the filesystem.
10660	 */
10661	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10662	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10663		dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10664		/* keep going until hitting a rollback to a frag */
10665		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10666			continue;
10667		dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10668		for (i = adp->ad_offset + 1; i < UFS_NXADDR; i++) {
10669#ifdef INVARIANTS
10670			if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10671				panic("initiate_write_inodeblock_ufs2: "
10672				    "lost dep1");
10673#endif /* INVARIANTS */
10674			dp->di_extb[i] = 0;
10675		}
10676		lastadp = NULL;
10677		break;
10678	}
10679	/*
10680	 * If we have zero'ed out the last allocated block of the ext
10681	 * data, roll back the size to the last currently allocated block.
10682	 * We know that this last allocated block is a full-sized as
10683	 * we already checked for fragments in the loop above.
10684	 */
10685	if (lastadp != NULL &&
10686	    dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10687		for (i = lastadp->ad_offset; i >= 0; i--)
10688			if (dp->di_extb[i] != 0)
10689				break;
10690		dp->di_extsize = (i + 1) * fs->fs_bsize;
10691	}
10692	/*
10693	 * Set the file data dependencies to busy.
10694	 */
10695	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10696	     adp = TAILQ_NEXT(adp, ad_next)) {
10697#ifdef INVARIANTS
10698		if (deplist != 0 && prevlbn >= adp->ad_offset)
10699			panic("softdep_write_inodeblock: lbn order");
10700		if ((adp->ad_state & ATTACHED) == 0)
10701			panic("inodedep %p and adp %p not attached", inodedep, adp);
10702		prevlbn = adp->ad_offset;
10703		if (!ffs_fsfail_cleanup(ump, 0) &&
10704		    adp->ad_offset < UFS_NDADDR &&
10705		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10706			panic("initiate_write_inodeblock_ufs2: "
10707			    "direct pointer #%jd mismatch %jd != %jd",
10708			    (intmax_t)adp->ad_offset,
10709			    (intmax_t)dp->di_db[adp->ad_offset],
10710			    (intmax_t)adp->ad_newblkno);
10711		if (!ffs_fsfail_cleanup(ump, 0) &&
10712		    adp->ad_offset >= UFS_NDADDR &&
10713		    dp->di_ib[adp->ad_offset - UFS_NDADDR] != adp->ad_newblkno)
10714			panic("initiate_write_inodeblock_ufs2: "
10715			    "indirect pointer #%jd mismatch %jd != %jd",
10716			    (intmax_t)adp->ad_offset - UFS_NDADDR,
10717			    (intmax_t)dp->di_ib[adp->ad_offset - UFS_NDADDR],
10718			    (intmax_t)adp->ad_newblkno);
10719		deplist |= 1 << adp->ad_offset;
10720		if ((adp->ad_state & ATTACHED) == 0)
10721			panic("initiate_write_inodeblock_ufs2: Unknown "
10722			     "state 0x%x", adp->ad_state);
10723#endif /* INVARIANTS */
10724		adp->ad_state &= ~ATTACHED;
10725		adp->ad_state |= UNDONE;
10726	}
10727	/*
10728	 * The on-disk inode cannot claim to be any larger than the last
10729	 * fragment that has been written. Otherwise, the on-disk inode
10730	 * might have fragments that were not the last block in the file
10731	 * which would corrupt the filesystem.
10732	 */
10733	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10734	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10735		if (adp->ad_offset >= UFS_NDADDR)
10736			break;
10737		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10738		/* keep going until hitting a rollback to a frag */
10739		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10740			continue;
10741		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10742		for (i = adp->ad_offset + 1; i < UFS_NDADDR; i++) {
10743#ifdef INVARIANTS
10744			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10745				panic("initiate_write_inodeblock_ufs2: "
10746				    "lost dep2");
10747#endif /* INVARIANTS */
10748			dp->di_db[i] = 0;
10749		}
10750		for (i = 0; i < UFS_NIADDR; i++) {
10751#ifdef INVARIANTS
10752			if (dp->di_ib[i] != 0 &&
10753			    (deplist & ((1 << UFS_NDADDR) << i)) == 0)
10754				panic("initiate_write_inodeblock_ufs2: "
10755				    "lost dep3");
10756#endif /* INVARIANTS */
10757			dp->di_ib[i] = 0;
10758		}
10759		ffs_update_dinode_ckhash(fs, dp);
10760		return;
10761	}
10762	/*
10763	 * If we have zero'ed out the last allocated block of the file,
10764	 * roll back the size to the last currently allocated block.
10765	 * We know that this last allocated block is a full-sized as
10766	 * we already checked for fragments in the loop above.
10767	 */
10768	if (lastadp != NULL &&
10769	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10770		for (i = lastadp->ad_offset; i >= 0; i--)
10771			if (dp->di_db[i] != 0)
10772				break;
10773		dp->di_size = (i + 1) * fs->fs_bsize;
10774	}
10775	/*
10776	 * The only dependencies are for indirect blocks.
10777	 *
10778	 * The file size for indirect block additions is not guaranteed.
10779	 * Such a guarantee would be non-trivial to achieve. The conventional
10780	 * synchronous write implementation also does not make this guarantee.
10781	 * Fsck should catch and fix discrepancies. Arguably, the file size
10782	 * can be over-estimated without destroying integrity when the file
10783	 * moves into the indirect blocks (i.e., is large). If we want to
10784	 * postpone fsck, we are stuck with this argument.
10785	 */
10786	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10787		dp->di_ib[adp->ad_offset - UFS_NDADDR] = 0;
10788	ffs_update_dinode_ckhash(fs, dp);
10789}
10790
10791/*
10792 * Cancel an indirdep as a result of truncation.  Release all of the
10793 * children allocindirs and place their journal work on the appropriate
10794 * list.
10795 */
10796static void
10797cancel_indirdep(
10798	struct indirdep *indirdep,
10799	struct buf *bp,
10800	struct freeblks *freeblks)
10801{
10802	struct allocindir *aip;
10803
10804	/*
10805	 * None of the indirect pointers will ever be visible,
10806	 * so they can simply be tossed. GOINGAWAY ensures
10807	 * that allocated pointers will be saved in the buffer
10808	 * cache until they are freed. Note that they will
10809	 * only be able to be found by their physical address
10810	 * since the inode mapping the logical address will
10811	 * be gone. The save buffer used for the safe copy
10812	 * was allocated in setup_allocindir_phase2 using
10813	 * the physical address so it could be used for this
10814	 * purpose. Hence we swap the safe copy with the real
10815	 * copy, allowing the safe copy to be freed and holding
10816	 * on to the real copy for later use in indir_trunc.
10817	 */
10818	if (indirdep->ir_state & GOINGAWAY)
10819		panic("cancel_indirdep: already gone");
10820	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10821		indirdep->ir_state |= DEPCOMPLETE;
10822		LIST_REMOVE(indirdep, ir_next);
10823	}
10824	indirdep->ir_state |= GOINGAWAY;
10825	/*
10826	 * Pass in bp for blocks still have journal writes
10827	 * pending so we can cancel them on their own.
10828	 */
10829	while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != NULL)
10830		cancel_allocindir(aip, bp, freeblks, 0);
10831	while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL)
10832		cancel_allocindir(aip, NULL, freeblks, 0);
10833	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL)
10834		cancel_allocindir(aip, NULL, freeblks, 0);
10835	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL)
10836		cancel_allocindir(aip, NULL, freeblks, 0);
10837	/*
10838	 * If there are pending partial truncations we need to keep the
10839	 * old block copy around until they complete.  This is because
10840	 * the current b_data is not a perfect superset of the available
10841	 * blocks.
10842	 */
10843	if (TAILQ_EMPTY(&indirdep->ir_trunc))
10844		bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10845	else
10846		bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10847	WORKLIST_REMOVE(&indirdep->ir_list);
10848	WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10849	indirdep->ir_bp = NULL;
10850	indirdep->ir_freeblks = freeblks;
10851}
10852
10853/*
10854 * Free an indirdep once it no longer has new pointers to track.
10855 */
10856static void
10857free_indirdep(struct indirdep *indirdep)
10858{
10859
10860	KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10861	    ("free_indirdep: Indir trunc list not empty."));
10862	KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10863	    ("free_indirdep: Complete head not empty."));
10864	KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10865	    ("free_indirdep: write head not empty."));
10866	KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10867	    ("free_indirdep: done head not empty."));
10868	KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10869	    ("free_indirdep: deplist head not empty."));
10870	KASSERT((indirdep->ir_state & DEPCOMPLETE),
10871	    ("free_indirdep: %p still on newblk list.", indirdep));
10872	KASSERT(indirdep->ir_saveddata == NULL,
10873	    ("free_indirdep: %p still has saved data.", indirdep));
10874	KASSERT(indirdep->ir_savebp == NULL,
10875	    ("free_indirdep: %p still has savebp buffer.", indirdep));
10876	if (indirdep->ir_state & ONWORKLIST)
10877		WORKLIST_REMOVE(&indirdep->ir_list);
10878	WORKITEM_FREE(indirdep, D_INDIRDEP);
10879}
10880
10881/*
10882 * Called before a write to an indirdep.  This routine is responsible for
10883 * rolling back pointers to a safe state which includes only those
10884 * allocindirs which have been completed.
10885 */
10886static void
10887initiate_write_indirdep(struct indirdep *indirdep, struct buf *bp)
10888{
10889	struct ufsmount *ump;
10890
10891	indirdep->ir_state |= IOSTARTED;
10892	if (indirdep->ir_state & GOINGAWAY)
10893		panic("disk_io_initiation: indirdep gone");
10894	/*
10895	 * If there are no remaining dependencies, this will be writing
10896	 * the real pointers.
10897	 */
10898	if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10899	    TAILQ_EMPTY(&indirdep->ir_trunc))
10900		return;
10901	/*
10902	 * Replace up-to-date version with safe version.
10903	 */
10904	if (indirdep->ir_saveddata == NULL) {
10905		ump = VFSTOUFS(indirdep->ir_list.wk_mp);
10906		LOCK_OWNED(ump);
10907		FREE_LOCK(ump);
10908		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10909		    M_SOFTDEP_FLAGS);
10910		ACQUIRE_LOCK(ump);
10911	}
10912	indirdep->ir_state &= ~ATTACHED;
10913	indirdep->ir_state |= UNDONE;
10914	bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10915	bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10916	    bp->b_bcount);
10917}
10918
10919/*
10920 * Called when an inode has been cleared in a cg bitmap.  This finally
10921 * eliminates any canceled jaddrefs
10922 */
10923void
10924softdep_setup_inofree(struct mount *mp,
10925	struct buf *bp,
10926	ino_t ino,
10927	struct workhead *wkhd,
10928	bool doingrecovery)
10929{
10930	struct worklist *wk, *wkn;
10931	struct ufsmount *ump;
10932#ifdef INVARIANTS
10933	struct inodedep *inodedep;
10934#endif
10935
10936	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
10937	    ("softdep_setup_inofree called on non-softdep filesystem"));
10938	ump = VFSTOUFS(mp);
10939	ACQUIRE_LOCK(ump);
10940	KASSERT(doingrecovery || ffs_fsfail_cleanup(ump, 0) ||
10941	    isclr(cg_inosused((struct cg *)bp->b_data),
10942	    ino % ump->um_fs->fs_ipg),
10943	    ("softdep_setup_inofree: inode %ju not freed.", (uintmax_t)ino));
10944	KASSERT(inodedep_lookup(mp, ino, 0, &inodedep) == 0,
10945	    ("softdep_setup_inofree: ino %ju has existing inodedep %p",
10946	    (uintmax_t)ino, inodedep));
10947	if (wkhd) {
10948		LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10949			if (wk->wk_type != D_JADDREF)
10950				continue;
10951			WORKLIST_REMOVE(wk);
10952			/*
10953			 * We can free immediately even if the jaddref
10954			 * isn't attached in a background write as now
10955			 * the bitmaps are reconciled.
10956			 */
10957			wk->wk_state |= COMPLETE | ATTACHED;
10958			free_jaddref(WK_JADDREF(wk));
10959		}
10960		jwork_move(&bp->b_dep, wkhd);
10961	}
10962	FREE_LOCK(ump);
10963}
10964
10965/*
10966 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10967 * map.  Any dependencies waiting for the write to clear are added to the
10968 * buf's list and any jnewblks that are being canceled are discarded
10969 * immediately.
10970 */
10971void
10972softdep_setup_blkfree(
10973	struct mount *mp,
10974	struct buf *bp,
10975	ufs2_daddr_t blkno,
10976	int frags,
10977	struct workhead *wkhd,
10978	bool doingrecovery)
10979{
10980	struct bmsafemap *bmsafemap;
10981	struct jnewblk *jnewblk;
10982	struct ufsmount *ump;
10983	struct worklist *wk;
10984	struct fs *fs;
10985#ifdef INVARIANTS
10986	uint8_t *blksfree;
10987	struct cg *cgp;
10988	ufs2_daddr_t jstart;
10989	ufs2_daddr_t jend;
10990	ufs2_daddr_t end;
10991	long bno;
10992	int i;
10993#endif
10994
10995	CTR3(KTR_SUJ,
10996	    "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
10997	    blkno, frags, wkhd);
10998
10999	ump = VFSTOUFS(mp);
11000	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
11001	    ("softdep_setup_blkfree called on non-softdep filesystem"));
11002	ACQUIRE_LOCK(ump);
11003	/* Lookup the bmsafemap so we track when it is dirty. */
11004	fs = ump->um_fs;
11005	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
11006	/*
11007	 * Detach any jnewblks which have been canceled.  They must linger
11008	 * until the bitmap is cleared again by ffs_blkfree() to prevent
11009	 * an unjournaled allocation from hitting the disk.
11010	 */
11011	if (wkhd) {
11012		while ((wk = LIST_FIRST(wkhd)) != NULL) {
11013			CTR2(KTR_SUJ,
11014			    "softdep_setup_blkfree: blkno %jd wk type %d",
11015			    blkno, wk->wk_type);
11016			WORKLIST_REMOVE(wk);
11017			if (wk->wk_type != D_JNEWBLK) {
11018				WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
11019				continue;
11020			}
11021			jnewblk = WK_JNEWBLK(wk);
11022			KASSERT(jnewblk->jn_state & GOINGAWAY,
11023			    ("softdep_setup_blkfree: jnewblk not canceled."));
11024#ifdef INVARIANTS
11025			if (!doingrecovery && !ffs_fsfail_cleanup(ump, 0)) {
11026				/*
11027				 * Assert that this block is free in the
11028				 * bitmap before we discard the jnewblk.
11029				 */
11030				cgp = (struct cg *)bp->b_data;
11031				blksfree = cg_blksfree(cgp);
11032				bno = dtogd(fs, jnewblk->jn_blkno);
11033				for (i = jnewblk->jn_oldfrags;
11034				    i < jnewblk->jn_frags; i++) {
11035					if (isset(blksfree, bno + i))
11036						continue;
11037					panic("softdep_setup_blkfree: block "
11038					    "%ju not freed.",
11039					    (uintmax_t)jnewblk->jn_blkno);
11040				}
11041			}
11042#endif
11043			/*
11044			 * Even if it's not attached we can free immediately
11045			 * as the new bitmap is correct.
11046			 */
11047			wk->wk_state |= COMPLETE | ATTACHED;
11048			free_jnewblk(jnewblk);
11049		}
11050	}
11051
11052#ifdef INVARIANTS
11053	/*
11054	 * Assert that we are not freeing a block which has an outstanding
11055	 * allocation dependency.
11056	 */
11057	fs = VFSTOUFS(mp)->um_fs;
11058	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
11059	end = blkno + frags;
11060	LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
11061		/*
11062		 * Don't match against blocks that will be freed when the
11063		 * background write is done.
11064		 */
11065		if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
11066		    (COMPLETE | DEPCOMPLETE))
11067			continue;
11068		jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
11069		jend = jnewblk->jn_blkno + jnewblk->jn_frags;
11070		if ((blkno >= jstart && blkno < jend) ||
11071		    (end > jstart && end <= jend)) {
11072			printf("state 0x%X %jd - %d %d dep %p\n",
11073			    jnewblk->jn_state, jnewblk->jn_blkno,
11074			    jnewblk->jn_oldfrags, jnewblk->jn_frags,
11075			    jnewblk->jn_dep);
11076			panic("softdep_setup_blkfree: "
11077			    "%jd-%jd(%d) overlaps with %jd-%jd",
11078			    blkno, end, frags, jstart, jend);
11079		}
11080	}
11081#endif
11082	FREE_LOCK(ump);
11083}
11084
11085/*
11086 * Revert a block allocation when the journal record that describes it
11087 * is not yet written.
11088 */
11089static int
11090jnewblk_rollback(
11091	struct jnewblk *jnewblk,
11092	struct fs *fs,
11093	struct cg *cgp,
11094	uint8_t *blksfree)
11095{
11096	ufs1_daddr_t fragno;
11097	long cgbno, bbase;
11098	int frags, blk;
11099	int i;
11100
11101	frags = 0;
11102	cgbno = dtogd(fs, jnewblk->jn_blkno);
11103	/*
11104	 * We have to test which frags need to be rolled back.  We may
11105	 * be operating on a stale copy when doing background writes.
11106	 */
11107	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
11108		if (isclr(blksfree, cgbno + i))
11109			frags++;
11110	if (frags == 0)
11111		return (0);
11112	/*
11113	 * This is mostly ffs_blkfree() sans some validation and
11114	 * superblock updates.
11115	 */
11116	if (frags == fs->fs_frag) {
11117		fragno = fragstoblks(fs, cgbno);
11118		ffs_setblock(fs, blksfree, fragno);
11119		ffs_clusteracct(fs, cgp, fragno, 1);
11120		cgp->cg_cs.cs_nbfree++;
11121	} else {
11122		cgbno += jnewblk->jn_oldfrags;
11123		bbase = cgbno - fragnum(fs, cgbno);
11124		/* Decrement the old frags.  */
11125		blk = blkmap(fs, blksfree, bbase);
11126		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11127		/* Deallocate the fragment */
11128		for (i = 0; i < frags; i++)
11129			setbit(blksfree, cgbno + i);
11130		cgp->cg_cs.cs_nffree += frags;
11131		/* Add back in counts associated with the new frags */
11132		blk = blkmap(fs, blksfree, bbase);
11133		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11134		/* If a complete block has been reassembled, account for it. */
11135		fragno = fragstoblks(fs, bbase);
11136		if (ffs_isblock(fs, blksfree, fragno)) {
11137			cgp->cg_cs.cs_nffree -= fs->fs_frag;
11138			ffs_clusteracct(fs, cgp, fragno, 1);
11139			cgp->cg_cs.cs_nbfree++;
11140		}
11141	}
11142	stat_jnewblk++;
11143	jnewblk->jn_state &= ~ATTACHED;
11144	jnewblk->jn_state |= UNDONE;
11145
11146	return (frags);
11147}
11148
11149static void
11150initiate_write_bmsafemap(
11151	struct bmsafemap *bmsafemap,
11152	struct buf *bp)			/* The cg block. */
11153{
11154	struct jaddref *jaddref;
11155	struct jnewblk *jnewblk;
11156	uint8_t *inosused;
11157	uint8_t *blksfree;
11158	struct cg *cgp;
11159	struct fs *fs;
11160	ino_t ino;
11161
11162	/*
11163	 * If this is a background write, we did this at the time that
11164	 * the copy was made, so do not need to do it again.
11165	 */
11166	if (bmsafemap->sm_state & IOSTARTED)
11167		return;
11168	bmsafemap->sm_state |= IOSTARTED;
11169	/*
11170	 * Clear any inode allocations which are pending journal writes.
11171	 */
11172	if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
11173		cgp = (struct cg *)bp->b_data;
11174		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11175		inosused = cg_inosused(cgp);
11176		LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
11177			ino = jaddref->ja_ino % fs->fs_ipg;
11178			if (isset(inosused, ino)) {
11179				if ((jaddref->ja_mode & IFMT) == IFDIR)
11180					cgp->cg_cs.cs_ndir--;
11181				cgp->cg_cs.cs_nifree++;
11182				clrbit(inosused, ino);
11183				jaddref->ja_state &= ~ATTACHED;
11184				jaddref->ja_state |= UNDONE;
11185				stat_jaddref++;
11186			} else
11187				panic("initiate_write_bmsafemap: inode %ju "
11188				    "marked free", (uintmax_t)jaddref->ja_ino);
11189		}
11190	}
11191	/*
11192	 * Clear any block allocations which are pending journal writes.
11193	 */
11194	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11195		cgp = (struct cg *)bp->b_data;
11196		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11197		blksfree = cg_blksfree(cgp);
11198		LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
11199			if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
11200				continue;
11201			panic("initiate_write_bmsafemap: block %jd "
11202			    "marked free", jnewblk->jn_blkno);
11203		}
11204	}
11205	/*
11206	 * Move allocation lists to the written lists so they can be
11207	 * cleared once the block write is complete.
11208	 */
11209	LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
11210	    inodedep, id_deps);
11211	LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
11212	    newblk, nb_deps);
11213	LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
11214	    wk_list);
11215}
11216
11217void
11218softdep_handle_error(struct buf *bp)
11219{
11220	struct ufsmount *ump;
11221
11222	ump = softdep_bp_to_mp(bp);
11223	if (ump == NULL)
11224		return;
11225
11226	if (ffs_fsfail_cleanup(ump, bp->b_error)) {
11227		/*
11228		 * No future writes will succeed, so the on-disk image is safe.
11229		 * Pretend that this write succeeded so that the softdep state
11230		 * will be cleaned up naturally.
11231		 */
11232		bp->b_ioflags &= ~BIO_ERROR;
11233		bp->b_error = 0;
11234	}
11235}
11236
11237/*
11238 * This routine is called during the completion interrupt
11239 * service routine for a disk write (from the procedure called
11240 * by the device driver to inform the filesystem caches of
11241 * a request completion).  It should be called early in this
11242 * procedure, before the block is made available to other
11243 * processes or other routines are called.
11244 *
11245 */
11246static void
11247softdep_disk_write_complete(
11248	struct buf *bp)		/* describes the completed disk write */
11249{
11250	struct worklist *wk;
11251	struct worklist *owk;
11252	struct ufsmount *ump;
11253	struct workhead reattach;
11254	struct freeblks *freeblks;
11255	struct buf *sbp;
11256
11257	ump = softdep_bp_to_mp(bp);
11258	KASSERT(LIST_EMPTY(&bp->b_dep) || ump != NULL,
11259	    ("softdep_disk_write_complete: softdep_bp_to_mp returned NULL "
11260	     "with outstanding dependencies for buffer %p", bp));
11261	if (ump == NULL)
11262		return;
11263	if ((bp->b_ioflags & BIO_ERROR) != 0)
11264		softdep_handle_error(bp);
11265	/*
11266	 * If an error occurred while doing the write, then the data
11267	 * has not hit the disk and the dependencies cannot be processed.
11268	 * But we do have to go through and roll forward any dependencies
11269	 * that were rolled back before the disk write.
11270	 */
11271	sbp = NULL;
11272	ACQUIRE_LOCK(ump);
11273	if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0) {
11274		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
11275			switch (wk->wk_type) {
11276			case D_PAGEDEP:
11277				handle_written_filepage(WK_PAGEDEP(wk), bp, 0);
11278				continue;
11279
11280			case D_INODEDEP:
11281				handle_written_inodeblock(WK_INODEDEP(wk),
11282				    bp, 0);
11283				continue;
11284
11285			case D_BMSAFEMAP:
11286				handle_written_bmsafemap(WK_BMSAFEMAP(wk),
11287				    bp, 0);
11288				continue;
11289
11290			case D_INDIRDEP:
11291				handle_written_indirdep(WK_INDIRDEP(wk),
11292				    bp, &sbp, 0);
11293				continue;
11294			default:
11295				/* nothing to roll forward */
11296				continue;
11297			}
11298		}
11299		FREE_LOCK(ump);
11300		if (sbp)
11301			brelse(sbp);
11302		return;
11303	}
11304	LIST_INIT(&reattach);
11305
11306	/*
11307	 * Ump SU lock must not be released anywhere in this code segment.
11308	 */
11309	owk = NULL;
11310	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
11311		WORKLIST_REMOVE(wk);
11312		atomic_add_long(&dep_write[wk->wk_type], 1);
11313		if (wk == owk)
11314			panic("duplicate worklist: %p\n", wk);
11315		owk = wk;
11316		switch (wk->wk_type) {
11317		case D_PAGEDEP:
11318			if (handle_written_filepage(WK_PAGEDEP(wk), bp,
11319			    WRITESUCCEEDED))
11320				WORKLIST_INSERT(&reattach, wk);
11321			continue;
11322
11323		case D_INODEDEP:
11324			if (handle_written_inodeblock(WK_INODEDEP(wk), bp,
11325			    WRITESUCCEEDED))
11326				WORKLIST_INSERT(&reattach, wk);
11327			continue;
11328
11329		case D_BMSAFEMAP:
11330			if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp,
11331			    WRITESUCCEEDED))
11332				WORKLIST_INSERT(&reattach, wk);
11333			continue;
11334
11335		case D_MKDIR:
11336			handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
11337			continue;
11338
11339		case D_ALLOCDIRECT:
11340			wk->wk_state |= COMPLETE;
11341			handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
11342			continue;
11343
11344		case D_ALLOCINDIR:
11345			wk->wk_state |= COMPLETE;
11346			handle_allocindir_partdone(WK_ALLOCINDIR(wk));
11347			continue;
11348
11349		case D_INDIRDEP:
11350			if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp,
11351			    WRITESUCCEEDED))
11352				WORKLIST_INSERT(&reattach, wk);
11353			continue;
11354
11355		case D_FREEBLKS:
11356			wk->wk_state |= COMPLETE;
11357			freeblks = WK_FREEBLKS(wk);
11358			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
11359			    LIST_EMPTY(&freeblks->fb_jblkdephd))
11360				add_to_worklist(wk, WK_NODELAY);
11361			continue;
11362
11363		case D_FREEWORK:
11364			handle_written_freework(WK_FREEWORK(wk));
11365			break;
11366
11367		case D_JSEGDEP:
11368			free_jsegdep(WK_JSEGDEP(wk));
11369			continue;
11370
11371		case D_JSEG:
11372			handle_written_jseg(WK_JSEG(wk), bp);
11373			continue;
11374
11375		case D_SBDEP:
11376			if (handle_written_sbdep(WK_SBDEP(wk), bp))
11377				WORKLIST_INSERT(&reattach, wk);
11378			continue;
11379
11380		case D_FREEDEP:
11381			free_freedep(WK_FREEDEP(wk));
11382			continue;
11383
11384		default:
11385			panic("handle_disk_write_complete: Unknown type %s",
11386			    TYPENAME(wk->wk_type));
11387			/* NOTREACHED */
11388		}
11389	}
11390	/*
11391	 * Reattach any requests that must be redone.
11392	 */
11393	while ((wk = LIST_FIRST(&reattach)) != NULL) {
11394		WORKLIST_REMOVE(wk);
11395		WORKLIST_INSERT(&bp->b_dep, wk);
11396	}
11397	FREE_LOCK(ump);
11398	if (sbp)
11399		brelse(sbp);
11400}
11401
11402/*
11403 * Called from within softdep_disk_write_complete above.
11404 */
11405static void
11406handle_allocdirect_partdone(
11407	struct allocdirect *adp,	/* the completed allocdirect */
11408	struct workhead *wkhd)		/* Work to do when inode is writtne. */
11409{
11410	struct allocdirectlst *listhead;
11411	struct allocdirect *listadp;
11412	struct inodedep *inodedep;
11413	long bsize;
11414
11415	LOCK_OWNED(VFSTOUFS(adp->ad_block.nb_list.wk_mp));
11416	if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11417		return;
11418	/*
11419	 * The on-disk inode cannot claim to be any larger than the last
11420	 * fragment that has been written. Otherwise, the on-disk inode
11421	 * might have fragments that were not the last block in the file
11422	 * which would corrupt the filesystem. Thus, we cannot free any
11423	 * allocdirects after one whose ad_oldblkno claims a fragment as
11424	 * these blocks must be rolled back to zero before writing the inode.
11425	 * We check the currently active set of allocdirects in id_inoupdt
11426	 * or id_extupdt as appropriate.
11427	 */
11428	inodedep = adp->ad_inodedep;
11429	bsize = inodedep->id_fs->fs_bsize;
11430	if (adp->ad_state & EXTDATA)
11431		listhead = &inodedep->id_extupdt;
11432	else
11433		listhead = &inodedep->id_inoupdt;
11434	TAILQ_FOREACH(listadp, listhead, ad_next) {
11435		/* found our block */
11436		if (listadp == adp)
11437			break;
11438		/* continue if ad_oldlbn is not a fragment */
11439		if (listadp->ad_oldsize == 0 ||
11440		    listadp->ad_oldsize == bsize)
11441			continue;
11442		/* hit a fragment */
11443		return;
11444	}
11445	/*
11446	 * If we have reached the end of the current list without
11447	 * finding the just finished dependency, then it must be
11448	 * on the future dependency list. Future dependencies cannot
11449	 * be freed until they are moved to the current list.
11450	 */
11451	if (listadp == NULL) {
11452#ifdef INVARIANTS
11453		if (adp->ad_state & EXTDATA)
11454			listhead = &inodedep->id_newextupdt;
11455		else
11456			listhead = &inodedep->id_newinoupdt;
11457		TAILQ_FOREACH(listadp, listhead, ad_next)
11458			/* found our block */
11459			if (listadp == adp)
11460				break;
11461		if (listadp == NULL)
11462			panic("handle_allocdirect_partdone: lost dep");
11463#endif /* INVARIANTS */
11464		return;
11465	}
11466	/*
11467	 * If we have found the just finished dependency, then queue
11468	 * it along with anything that follows it that is complete.
11469	 * Since the pointer has not yet been written in the inode
11470	 * as the dependency prevents it, place the allocdirect on the
11471	 * bufwait list where it will be freed once the pointer is
11472	 * valid.
11473	 */
11474	if (wkhd == NULL)
11475		wkhd = &inodedep->id_bufwait;
11476	for (; adp; adp = listadp) {
11477		listadp = TAILQ_NEXT(adp, ad_next);
11478		if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
11479			return;
11480		TAILQ_REMOVE(listhead, adp, ad_next);
11481		WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
11482	}
11483}
11484
11485/*
11486 * Called from within softdep_disk_write_complete above.  This routine
11487 * completes successfully written allocindirs.
11488 */
11489static void
11490handle_allocindir_partdone(
11491	struct allocindir *aip)		/* the completed allocindir */
11492{
11493	struct indirdep *indirdep;
11494
11495	if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
11496		return;
11497	indirdep = aip->ai_indirdep;
11498	LIST_REMOVE(aip, ai_next);
11499	/*
11500	 * Don't set a pointer while the buffer is undergoing IO or while
11501	 * we have active truncations.
11502	 */
11503	if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
11504		LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
11505		return;
11506	}
11507	if (indirdep->ir_state & UFS1FMT)
11508		((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11509		    aip->ai_newblkno;
11510	else
11511		((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
11512		    aip->ai_newblkno;
11513	/*
11514	 * Await the pointer write before freeing the allocindir.
11515	 */
11516	LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
11517}
11518
11519/*
11520 * Release segments held on a jwork list.
11521 */
11522static void
11523handle_jwork(struct workhead *wkhd)
11524{
11525	struct worklist *wk;
11526
11527	while ((wk = LIST_FIRST(wkhd)) != NULL) {
11528		WORKLIST_REMOVE(wk);
11529		switch (wk->wk_type) {
11530		case D_JSEGDEP:
11531			free_jsegdep(WK_JSEGDEP(wk));
11532			continue;
11533		case D_FREEDEP:
11534			free_freedep(WK_FREEDEP(wk));
11535			continue;
11536		case D_FREEFRAG:
11537			rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
11538			WORKITEM_FREE(wk, D_FREEFRAG);
11539			continue;
11540		case D_FREEWORK:
11541			handle_written_freework(WK_FREEWORK(wk));
11542			continue;
11543		default:
11544			panic("handle_jwork: Unknown type %s\n",
11545			    TYPENAME(wk->wk_type));
11546		}
11547	}
11548}
11549
11550/*
11551 * Handle the bufwait list on an inode when it is safe to release items
11552 * held there.  This normally happens after an inode block is written but
11553 * may be delayed and handled later if there are pending journal items that
11554 * are not yet safe to be released.
11555 */
11556static struct freefile *
11557handle_bufwait(
11558	struct inodedep *inodedep,
11559	struct workhead *refhd)
11560{
11561	struct jaddref *jaddref;
11562	struct freefile *freefile;
11563	struct worklist *wk;
11564
11565	freefile = NULL;
11566	while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
11567		WORKLIST_REMOVE(wk);
11568		switch (wk->wk_type) {
11569		case D_FREEFILE:
11570			/*
11571			 * We defer adding freefile to the worklist
11572			 * until all other additions have been made to
11573			 * ensure that it will be done after all the
11574			 * old blocks have been freed.
11575			 */
11576			if (freefile != NULL)
11577				panic("handle_bufwait: freefile");
11578			freefile = WK_FREEFILE(wk);
11579			continue;
11580
11581		case D_MKDIR:
11582			handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
11583			continue;
11584
11585		case D_DIRADD:
11586			diradd_inode_written(WK_DIRADD(wk), inodedep);
11587			continue;
11588
11589		case D_FREEFRAG:
11590			wk->wk_state |= COMPLETE;
11591			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
11592				add_to_worklist(wk, 0);
11593			continue;
11594
11595		case D_DIRREM:
11596			wk->wk_state |= COMPLETE;
11597			add_to_worklist(wk, 0);
11598			continue;
11599
11600		case D_ALLOCDIRECT:
11601		case D_ALLOCINDIR:
11602			free_newblk(WK_NEWBLK(wk));
11603			continue;
11604
11605		case D_JNEWBLK:
11606			wk->wk_state |= COMPLETE;
11607			free_jnewblk(WK_JNEWBLK(wk));
11608			continue;
11609
11610		/*
11611		 * Save freed journal segments and add references on
11612		 * the supplied list which will delay their release
11613		 * until the cg bitmap is cleared on disk.
11614		 */
11615		case D_JSEGDEP:
11616			if (refhd == NULL)
11617				free_jsegdep(WK_JSEGDEP(wk));
11618			else
11619				WORKLIST_INSERT(refhd, wk);
11620			continue;
11621
11622		case D_JADDREF:
11623			jaddref = WK_JADDREF(wk);
11624			TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11625			    if_deps);
11626			/*
11627			 * Transfer any jaddrefs to the list to be freed with
11628			 * the bitmap if we're handling a removed file.
11629			 */
11630			if (refhd == NULL) {
11631				wk->wk_state |= COMPLETE;
11632				free_jaddref(jaddref);
11633			} else
11634				WORKLIST_INSERT(refhd, wk);
11635			continue;
11636
11637		default:
11638			panic("handle_bufwait: Unknown type %p(%s)",
11639			    wk, TYPENAME(wk->wk_type));
11640			/* NOTREACHED */
11641		}
11642	}
11643	return (freefile);
11644}
11645/*
11646 * Called from within softdep_disk_write_complete above to restore
11647 * in-memory inode block contents to their most up-to-date state. Note
11648 * that this routine is always called from interrupt level with further
11649 * interrupts from this device blocked.
11650 *
11651 * If the write did not succeed, we will do all the roll-forward
11652 * operations, but we will not take the actions that will allow its
11653 * dependencies to be processed.
11654 */
11655static int
11656handle_written_inodeblock(
11657	struct inodedep *inodedep,
11658	struct buf *bp,		/* buffer containing the inode block */
11659	int flags)
11660{
11661	struct freefile *freefile;
11662	struct allocdirect *adp, *nextadp;
11663	struct ufs1_dinode *dp1 = NULL;
11664	struct ufs2_dinode *dp2 = NULL;
11665	struct workhead wkhd;
11666	int hadchanges, fstype;
11667	ino_t freelink;
11668
11669	LIST_INIT(&wkhd);
11670	hadchanges = 0;
11671	freefile = NULL;
11672	if ((inodedep->id_state & IOSTARTED) == 0)
11673		panic("handle_written_inodeblock: not started");
11674	inodedep->id_state &= ~IOSTARTED;
11675	if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11676		fstype = UFS1;
11677		dp1 = (struct ufs1_dinode *)bp->b_data +
11678		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11679		freelink = dp1->di_freelink;
11680	} else {
11681		fstype = UFS2;
11682		dp2 = (struct ufs2_dinode *)bp->b_data +
11683		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11684		freelink = dp2->di_freelink;
11685	}
11686	/*
11687	 * Leave this inodeblock dirty until it's in the list.
11688	 */
11689	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED &&
11690	    (flags & WRITESUCCEEDED)) {
11691		struct inodedep *inon;
11692
11693		inon = TAILQ_NEXT(inodedep, id_unlinked);
11694		if ((inon == NULL && freelink == 0) ||
11695		    (inon && inon->id_ino == freelink)) {
11696			if (inon)
11697				inon->id_state |= UNLINKPREV;
11698			inodedep->id_state |= UNLINKNEXT;
11699		}
11700		hadchanges = 1;
11701	}
11702	/*
11703	 * If we had to rollback the inode allocation because of
11704	 * bitmaps being incomplete, then simply restore it.
11705	 * Keep the block dirty so that it will not be reclaimed until
11706	 * all associated dependencies have been cleared and the
11707	 * corresponding updates written to disk.
11708	 */
11709	if (inodedep->id_savedino1 != NULL) {
11710		hadchanges = 1;
11711		if (fstype == UFS1)
11712			*dp1 = *inodedep->id_savedino1;
11713		else
11714			*dp2 = *inodedep->id_savedino2;
11715		free(inodedep->id_savedino1, M_SAVEDINO);
11716		inodedep->id_savedino1 = NULL;
11717		if ((bp->b_flags & B_DELWRI) == 0)
11718			stat_inode_bitmap++;
11719		bdirty(bp);
11720		/*
11721		 * If the inode is clear here and GOINGAWAY it will never
11722		 * be written.  Process the bufwait and clear any pending
11723		 * work which may include the freefile.
11724		 */
11725		if (inodedep->id_state & GOINGAWAY)
11726			goto bufwait;
11727		return (1);
11728	}
11729	if (flags & WRITESUCCEEDED)
11730		inodedep->id_state |= COMPLETE;
11731	/*
11732	 * Roll forward anything that had to be rolled back before
11733	 * the inode could be updated.
11734	 */
11735	for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11736		nextadp = TAILQ_NEXT(adp, ad_next);
11737		if (adp->ad_state & ATTACHED)
11738			panic("handle_written_inodeblock: new entry");
11739		if (fstype == UFS1) {
11740			if (adp->ad_offset < UFS_NDADDR) {
11741				if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11742					panic("%s %s #%jd mismatch %d != %jd",
11743					    "handle_written_inodeblock:",
11744					    "direct pointer",
11745					    (intmax_t)adp->ad_offset,
11746					    dp1->di_db[adp->ad_offset],
11747					    (intmax_t)adp->ad_oldblkno);
11748				dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11749			} else {
11750				if (dp1->di_ib[adp->ad_offset - UFS_NDADDR] !=
11751				    0)
11752					panic("%s: %s #%jd allocated as %d",
11753					    "handle_written_inodeblock",
11754					    "indirect pointer",
11755					    (intmax_t)adp->ad_offset -
11756					    UFS_NDADDR,
11757					    dp1->di_ib[adp->ad_offset -
11758					    UFS_NDADDR]);
11759				dp1->di_ib[adp->ad_offset - UFS_NDADDR] =
11760				    adp->ad_newblkno;
11761			}
11762		} else {
11763			if (adp->ad_offset < UFS_NDADDR) {
11764				if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11765					panic("%s: %s #%jd %s %jd != %jd",
11766					    "handle_written_inodeblock",
11767					    "direct pointer",
11768					    (intmax_t)adp->ad_offset, "mismatch",
11769					    (intmax_t)dp2->di_db[adp->ad_offset],
11770					    (intmax_t)adp->ad_oldblkno);
11771				dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11772			} else {
11773				if (dp2->di_ib[adp->ad_offset - UFS_NDADDR] !=
11774				    0)
11775					panic("%s: %s #%jd allocated as %jd",
11776					    "handle_written_inodeblock",
11777					    "indirect pointer",
11778					    (intmax_t)adp->ad_offset -
11779					    UFS_NDADDR,
11780					    (intmax_t)
11781					    dp2->di_ib[adp->ad_offset -
11782					    UFS_NDADDR]);
11783				dp2->di_ib[adp->ad_offset - UFS_NDADDR] =
11784				    adp->ad_newblkno;
11785			}
11786		}
11787		adp->ad_state &= ~UNDONE;
11788		adp->ad_state |= ATTACHED;
11789		hadchanges = 1;
11790	}
11791	for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11792		nextadp = TAILQ_NEXT(adp, ad_next);
11793		if (adp->ad_state & ATTACHED)
11794			panic("handle_written_inodeblock: new entry");
11795		if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11796			panic("%s: direct pointers #%jd %s %jd != %jd",
11797			    "handle_written_inodeblock",
11798			    (intmax_t)adp->ad_offset, "mismatch",
11799			    (intmax_t)dp2->di_extb[adp->ad_offset],
11800			    (intmax_t)adp->ad_oldblkno);
11801		dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11802		adp->ad_state &= ~UNDONE;
11803		adp->ad_state |= ATTACHED;
11804		hadchanges = 1;
11805	}
11806	if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11807		stat_direct_blk_ptrs++;
11808	/*
11809	 * Reset the file size to its most up-to-date value.
11810	 */
11811	if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11812		panic("handle_written_inodeblock: bad size");
11813	if (inodedep->id_savednlink > UFS_LINK_MAX)
11814		panic("handle_written_inodeblock: Invalid link count "
11815		    "%jd for inodedep %p", (uintmax_t)inodedep->id_savednlink,
11816		    inodedep);
11817	if (fstype == UFS1) {
11818		if (dp1->di_nlink != inodedep->id_savednlink) {
11819			dp1->di_nlink = inodedep->id_savednlink;
11820			hadchanges = 1;
11821		}
11822		if (dp1->di_size != inodedep->id_savedsize) {
11823			dp1->di_size = inodedep->id_savedsize;
11824			hadchanges = 1;
11825		}
11826	} else {
11827		if (dp2->di_nlink != inodedep->id_savednlink) {
11828			dp2->di_nlink = inodedep->id_savednlink;
11829			hadchanges = 1;
11830		}
11831		if (dp2->di_size != inodedep->id_savedsize) {
11832			dp2->di_size = inodedep->id_savedsize;
11833			hadchanges = 1;
11834		}
11835		if (dp2->di_extsize != inodedep->id_savedextsize) {
11836			dp2->di_extsize = inodedep->id_savedextsize;
11837			hadchanges = 1;
11838		}
11839	}
11840	inodedep->id_savedsize = -1;
11841	inodedep->id_savedextsize = -1;
11842	inodedep->id_savednlink = -1;
11843	/*
11844	 * If there were any rollbacks in the inode block, then it must be
11845	 * marked dirty so that its will eventually get written back in
11846	 * its correct form.
11847	 */
11848	if (hadchanges) {
11849		if (fstype == UFS2)
11850			ffs_update_dinode_ckhash(inodedep->id_fs, dp2);
11851		bdirty(bp);
11852	}
11853bufwait:
11854	/*
11855	 * If the write did not succeed, we have done all the roll-forward
11856	 * operations, but we cannot take the actions that will allow its
11857	 * dependencies to be processed.
11858	 */
11859	if ((flags & WRITESUCCEEDED) == 0)
11860		return (hadchanges);
11861	/*
11862	 * Process any allocdirects that completed during the update.
11863	 */
11864	if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11865		handle_allocdirect_partdone(adp, &wkhd);
11866	if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11867		handle_allocdirect_partdone(adp, &wkhd);
11868	/*
11869	 * Process deallocations that were held pending until the
11870	 * inode had been written to disk. Freeing of the inode
11871	 * is delayed until after all blocks have been freed to
11872	 * avoid creation of new <vfsid, inum, lbn> triples
11873	 * before the old ones have been deleted.  Completely
11874	 * unlinked inodes are not processed until the unlinked
11875	 * inode list is written or the last reference is removed.
11876	 */
11877	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11878		freefile = handle_bufwait(inodedep, NULL);
11879		if (freefile && !LIST_EMPTY(&wkhd)) {
11880			WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11881			freefile = NULL;
11882		}
11883	}
11884	/*
11885	 * Move rolled forward dependency completions to the bufwait list
11886	 * now that those that were already written have been processed.
11887	 */
11888	if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11889		panic("handle_written_inodeblock: bufwait but no changes");
11890	jwork_move(&inodedep->id_bufwait, &wkhd);
11891
11892	if (freefile != NULL) {
11893		/*
11894		 * If the inode is goingaway it was never written.  Fake up
11895		 * the state here so free_inodedep() can succeed.
11896		 */
11897		if (inodedep->id_state & GOINGAWAY)
11898			inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11899		if (free_inodedep(inodedep) == 0)
11900			panic("handle_written_inodeblock: live inodedep %p",
11901			    inodedep);
11902		add_to_worklist(&freefile->fx_list, 0);
11903		return (0);
11904	}
11905
11906	/*
11907	 * If no outstanding dependencies, free it.
11908	 */
11909	if (free_inodedep(inodedep) ||
11910	    (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11911	     TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11912	     TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11913	     LIST_FIRST(&inodedep->id_bufwait) == 0))
11914		return (0);
11915	return (hadchanges);
11916}
11917
11918/*
11919 * Perform needed roll-forwards and kick off any dependencies that
11920 * can now be processed.
11921 *
11922 * If the write did not succeed, we will do all the roll-forward
11923 * operations, but we will not take the actions that will allow its
11924 * dependencies to be processed.
11925 */
11926static int
11927handle_written_indirdep(
11928	struct indirdep *indirdep,
11929	struct buf *bp,
11930	struct buf **bpp,
11931	int flags)
11932{
11933	struct allocindir *aip;
11934	struct buf *sbp;
11935	int chgs;
11936
11937	if (indirdep->ir_state & GOINGAWAY)
11938		panic("handle_written_indirdep: indirdep gone");
11939	if ((indirdep->ir_state & IOSTARTED) == 0)
11940		panic("handle_written_indirdep: IO not started");
11941	chgs = 0;
11942	/*
11943	 * If there were rollbacks revert them here.
11944	 */
11945	if (indirdep->ir_saveddata) {
11946		bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11947		if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11948			free(indirdep->ir_saveddata, M_INDIRDEP);
11949			indirdep->ir_saveddata = NULL;
11950		}
11951		chgs = 1;
11952	}
11953	indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11954	indirdep->ir_state |= ATTACHED;
11955	/*
11956	 * If the write did not succeed, we have done all the roll-forward
11957	 * operations, but we cannot take the actions that will allow its
11958	 * dependencies to be processed.
11959	 */
11960	if ((flags & WRITESUCCEEDED) == 0) {
11961		stat_indir_blk_ptrs++;
11962		bdirty(bp);
11963		return (1);
11964	}
11965	/*
11966	 * Move allocindirs with written pointers to the completehd if
11967	 * the indirdep's pointer is not yet written.  Otherwise
11968	 * free them here.
11969	 */
11970	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != NULL) {
11971		LIST_REMOVE(aip, ai_next);
11972		if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11973			LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11974			    ai_next);
11975			newblk_freefrag(&aip->ai_block);
11976			continue;
11977		}
11978		free_newblk(&aip->ai_block);
11979	}
11980	/*
11981	 * Move allocindirs that have finished dependency processing from
11982	 * the done list to the write list after updating the pointers.
11983	 */
11984	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11985		while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != NULL) {
11986			handle_allocindir_partdone(aip);
11987			if (aip == LIST_FIRST(&indirdep->ir_donehd))
11988				panic("disk_write_complete: not gone");
11989			chgs = 1;
11990		}
11991	}
11992	/*
11993	 * Preserve the indirdep if there were any changes or if it is not
11994	 * yet valid on disk.
11995	 */
11996	if (chgs) {
11997		stat_indir_blk_ptrs++;
11998		bdirty(bp);
11999		return (1);
12000	}
12001	/*
12002	 * If there were no changes we can discard the savedbp and detach
12003	 * ourselves from the buf.  We are only carrying completed pointers
12004	 * in this case.
12005	 */
12006	sbp = indirdep->ir_savebp;
12007	sbp->b_flags |= B_INVAL | B_NOCACHE;
12008	indirdep->ir_savebp = NULL;
12009	indirdep->ir_bp = NULL;
12010	if (*bpp != NULL)
12011		panic("handle_written_indirdep: bp already exists.");
12012	*bpp = sbp;
12013	/*
12014	 * The indirdep may not be freed until its parent points at it.
12015	 */
12016	if (indirdep->ir_state & DEPCOMPLETE)
12017		free_indirdep(indirdep);
12018
12019	return (0);
12020}
12021
12022/*
12023 * Process a diradd entry after its dependent inode has been written.
12024 */
12025static void
12026diradd_inode_written(
12027	struct diradd *dap,
12028	struct inodedep *inodedep)
12029{
12030
12031	LOCK_OWNED(VFSTOUFS(dap->da_list.wk_mp));
12032	dap->da_state |= COMPLETE;
12033	complete_diradd(dap);
12034	WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
12035}
12036
12037/*
12038 * Returns true if the bmsafemap will have rollbacks when written.  Must only
12039 * be called with the per-filesystem lock and the buf lock on the cg held.
12040 */
12041static int
12042bmsafemap_backgroundwrite(
12043	struct bmsafemap *bmsafemap,
12044	struct buf *bp)
12045{
12046	int dirty;
12047
12048	LOCK_OWNED(VFSTOUFS(bmsafemap->sm_list.wk_mp));
12049	dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
12050	    !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
12051	/*
12052	 * If we're initiating a background write we need to process the
12053	 * rollbacks as they exist now, not as they exist when IO starts.
12054	 * No other consumers will look at the contents of the shadowed
12055	 * buf so this is safe to do here.
12056	 */
12057	if (bp->b_xflags & BX_BKGRDMARKER)
12058		initiate_write_bmsafemap(bmsafemap, bp);
12059
12060	return (dirty);
12061}
12062
12063/*
12064 * Re-apply an allocation when a cg write is complete.
12065 */
12066static int
12067jnewblk_rollforward(
12068	struct jnewblk *jnewblk,
12069	struct fs *fs,
12070	struct cg *cgp,
12071	uint8_t *blksfree)
12072{
12073	ufs1_daddr_t fragno;
12074	ufs2_daddr_t blkno;
12075	long cgbno, bbase;
12076	int frags, blk;
12077	int i;
12078
12079	frags = 0;
12080	cgbno = dtogd(fs, jnewblk->jn_blkno);
12081	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
12082		if (isclr(blksfree, cgbno + i))
12083			panic("jnewblk_rollforward: re-allocated fragment");
12084		frags++;
12085	}
12086	if (frags == fs->fs_frag) {
12087		blkno = fragstoblks(fs, cgbno);
12088		ffs_clrblock(fs, blksfree, (long)blkno);
12089		ffs_clusteracct(fs, cgp, blkno, -1);
12090		cgp->cg_cs.cs_nbfree--;
12091	} else {
12092		bbase = cgbno - fragnum(fs, cgbno);
12093		cgbno += jnewblk->jn_oldfrags;
12094                /* If a complete block had been reassembled, account for it. */
12095		fragno = fragstoblks(fs, bbase);
12096		if (ffs_isblock(fs, blksfree, fragno)) {
12097			cgp->cg_cs.cs_nffree += fs->fs_frag;
12098			ffs_clusteracct(fs, cgp, fragno, -1);
12099			cgp->cg_cs.cs_nbfree--;
12100		}
12101		/* Decrement the old frags.  */
12102		blk = blkmap(fs, blksfree, bbase);
12103		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
12104		/* Allocate the fragment */
12105		for (i = 0; i < frags; i++)
12106			clrbit(blksfree, cgbno + i);
12107		cgp->cg_cs.cs_nffree -= frags;
12108		/* Add back in counts associated with the new frags */
12109		blk = blkmap(fs, blksfree, bbase);
12110		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
12111	}
12112	return (frags);
12113}
12114
12115/*
12116 * Complete a write to a bmsafemap structure.  Roll forward any bitmap
12117 * changes if it's not a background write.  Set all written dependencies
12118 * to DEPCOMPLETE and free the structure if possible.
12119 *
12120 * If the write did not succeed, we will do all the roll-forward
12121 * operations, but we will not take the actions that will allow its
12122 * dependencies to be processed.
12123 */
12124static int
12125handle_written_bmsafemap(
12126	struct bmsafemap *bmsafemap,
12127	struct buf *bp,
12128	int flags)
12129{
12130	struct newblk *newblk;
12131	struct inodedep *inodedep;
12132	struct jaddref *jaddref, *jatmp;
12133	struct jnewblk *jnewblk, *jntmp;
12134	struct ufsmount *ump;
12135	uint8_t *inosused;
12136	uint8_t *blksfree;
12137	struct cg *cgp;
12138	struct fs *fs;
12139	ino_t ino;
12140	int foreground;
12141	int chgs;
12142
12143	if ((bmsafemap->sm_state & IOSTARTED) == 0)
12144		panic("handle_written_bmsafemap: Not started\n");
12145	ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
12146	chgs = 0;
12147	bmsafemap->sm_state &= ~IOSTARTED;
12148	foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0;
12149	/*
12150	 * If write was successful, release journal work that was waiting
12151	 * on the write. Otherwise move the work back.
12152	 */
12153	if (flags & WRITESUCCEEDED)
12154		handle_jwork(&bmsafemap->sm_freewr);
12155	else
12156		LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr,
12157		    worklist, wk_list);
12158
12159	/*
12160	 * Restore unwritten inode allocation pending jaddref writes.
12161	 */
12162	if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
12163		cgp = (struct cg *)bp->b_data;
12164		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
12165		inosused = cg_inosused(cgp);
12166		LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
12167		    ja_bmdeps, jatmp) {
12168			if ((jaddref->ja_state & UNDONE) == 0)
12169				continue;
12170			ino = jaddref->ja_ino % fs->fs_ipg;
12171			if (isset(inosused, ino))
12172				panic("handle_written_bmsafemap: "
12173				    "re-allocated inode");
12174			/* Do the roll-forward only if it's a real copy. */
12175			if (foreground) {
12176				if ((jaddref->ja_mode & IFMT) == IFDIR)
12177					cgp->cg_cs.cs_ndir++;
12178				cgp->cg_cs.cs_nifree--;
12179				setbit(inosused, ino);
12180				chgs = 1;
12181			}
12182			jaddref->ja_state &= ~UNDONE;
12183			jaddref->ja_state |= ATTACHED;
12184			free_jaddref(jaddref);
12185		}
12186	}
12187	/*
12188	 * Restore any block allocations which are pending journal writes.
12189	 */
12190	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
12191		cgp = (struct cg *)bp->b_data;
12192		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
12193		blksfree = cg_blksfree(cgp);
12194		LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
12195		    jntmp) {
12196			if ((jnewblk->jn_state & UNDONE) == 0)
12197				continue;
12198			/* Do the roll-forward only if it's a real copy. */
12199			if (foreground &&
12200			    jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
12201				chgs = 1;
12202			jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
12203			jnewblk->jn_state |= ATTACHED;
12204			free_jnewblk(jnewblk);
12205		}
12206	}
12207	/*
12208	 * If the write did not succeed, we have done all the roll-forward
12209	 * operations, but we cannot take the actions that will allow its
12210	 * dependencies to be processed.
12211	 */
12212	if ((flags & WRITESUCCEEDED) == 0) {
12213		LIST_CONCAT(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
12214		    newblk, nb_deps);
12215		LIST_CONCAT(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr,
12216		    worklist, wk_list);
12217		if (foreground)
12218			bdirty(bp);
12219		return (1);
12220	}
12221	while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
12222		newblk->nb_state |= DEPCOMPLETE;
12223		newblk->nb_state &= ~ONDEPLIST;
12224		newblk->nb_bmsafemap = NULL;
12225		LIST_REMOVE(newblk, nb_deps);
12226		if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
12227			handle_allocdirect_partdone(
12228			    WK_ALLOCDIRECT(&newblk->nb_list), NULL);
12229		else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
12230			handle_allocindir_partdone(
12231			    WK_ALLOCINDIR(&newblk->nb_list));
12232		else if (newblk->nb_list.wk_type != D_NEWBLK)
12233			panic("handle_written_bmsafemap: Unexpected type: %s",
12234			    TYPENAME(newblk->nb_list.wk_type));
12235	}
12236	while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
12237		inodedep->id_state |= DEPCOMPLETE;
12238		inodedep->id_state &= ~ONDEPLIST;
12239		LIST_REMOVE(inodedep, id_deps);
12240		inodedep->id_bmsafemap = NULL;
12241	}
12242	LIST_REMOVE(bmsafemap, sm_next);
12243	if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
12244	    LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
12245	    LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
12246	    LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
12247	    LIST_EMPTY(&bmsafemap->sm_freehd)) {
12248		LIST_REMOVE(bmsafemap, sm_hash);
12249		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
12250		return (0);
12251	}
12252	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
12253	if (foreground)
12254		bdirty(bp);
12255	return (1);
12256}
12257
12258/*
12259 * Try to free a mkdir dependency.
12260 */
12261static void
12262complete_mkdir(struct mkdir *mkdir)
12263{
12264	struct diradd *dap;
12265
12266	if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
12267		return;
12268	LIST_REMOVE(mkdir, md_mkdirs);
12269	dap = mkdir->md_diradd;
12270	dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
12271	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
12272		dap->da_state |= DEPCOMPLETE;
12273		complete_diradd(dap);
12274	}
12275	WORKITEM_FREE(mkdir, D_MKDIR);
12276}
12277
12278/*
12279 * Handle the completion of a mkdir dependency.
12280 */
12281static void
12282handle_written_mkdir(struct mkdir *mkdir, int type)
12283{
12284
12285	if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
12286		panic("handle_written_mkdir: bad type");
12287	mkdir->md_state |= COMPLETE;
12288	complete_mkdir(mkdir);
12289}
12290
12291static int
12292free_pagedep(struct pagedep *pagedep)
12293{
12294	int i;
12295
12296	if (pagedep->pd_state & NEWBLOCK)
12297		return (0);
12298	if (!LIST_EMPTY(&pagedep->pd_dirremhd))
12299		return (0);
12300	for (i = 0; i < DAHASHSZ; i++)
12301		if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
12302			return (0);
12303	if (!LIST_EMPTY(&pagedep->pd_pendinghd))
12304		return (0);
12305	if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
12306		return (0);
12307	if (pagedep->pd_state & ONWORKLIST)
12308		WORKLIST_REMOVE(&pagedep->pd_list);
12309	LIST_REMOVE(pagedep, pd_hash);
12310	WORKITEM_FREE(pagedep, D_PAGEDEP);
12311
12312	return (1);
12313}
12314
12315/*
12316 * Called from within softdep_disk_write_complete above.
12317 * A write operation was just completed. Removed inodes can
12318 * now be freed and associated block pointers may be committed.
12319 * Note that this routine is always called from interrupt level
12320 * with further interrupts from this device blocked.
12321 *
12322 * If the write did not succeed, we will do all the roll-forward
12323 * operations, but we will not take the actions that will allow its
12324 * dependencies to be processed.
12325 */
12326static int
12327handle_written_filepage(
12328	struct pagedep *pagedep,
12329	struct buf *bp,		/* buffer containing the written page */
12330	int flags)
12331{
12332	struct dirrem *dirrem;
12333	struct diradd *dap, *nextdap;
12334	struct direct *ep;
12335	int i, chgs;
12336
12337	if ((pagedep->pd_state & IOSTARTED) == 0)
12338		panic("handle_written_filepage: not started");
12339	pagedep->pd_state &= ~IOSTARTED;
12340	if ((flags & WRITESUCCEEDED) == 0)
12341		goto rollforward;
12342	/*
12343	 * Process any directory removals that have been committed.
12344	 */
12345	while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
12346		LIST_REMOVE(dirrem, dm_next);
12347		dirrem->dm_state |= COMPLETE;
12348		dirrem->dm_dirinum = pagedep->pd_ino;
12349		KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
12350		    ("handle_written_filepage: Journal entries not written."));
12351		add_to_worklist(&dirrem->dm_list, 0);
12352	}
12353	/*
12354	 * Free any directory additions that have been committed.
12355	 * If it is a newly allocated block, we have to wait until
12356	 * the on-disk directory inode claims the new block.
12357	 */
12358	if ((pagedep->pd_state & NEWBLOCK) == 0)
12359		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
12360			free_diradd(dap, NULL);
12361rollforward:
12362	/*
12363	 * Uncommitted directory entries must be restored.
12364	 */
12365	for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
12366		for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
12367		     dap = nextdap) {
12368			nextdap = LIST_NEXT(dap, da_pdlist);
12369			if (dap->da_state & ATTACHED)
12370				panic("handle_written_filepage: attached");
12371			ep = (struct direct *)
12372			    ((char *)bp->b_data + dap->da_offset);
12373			ep->d_ino = dap->da_newinum;
12374			dap->da_state &= ~UNDONE;
12375			dap->da_state |= ATTACHED;
12376			chgs = 1;
12377			/*
12378			 * If the inode referenced by the directory has
12379			 * been written out, then the dependency can be
12380			 * moved to the pending list.
12381			 */
12382			if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
12383				LIST_REMOVE(dap, da_pdlist);
12384				LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
12385				    da_pdlist);
12386			}
12387		}
12388	}
12389	/*
12390	 * If there were any rollbacks in the directory, then it must be
12391	 * marked dirty so that its will eventually get written back in
12392	 * its correct form.
12393	 */
12394	if (chgs || (flags & WRITESUCCEEDED) == 0) {
12395		if ((bp->b_flags & B_DELWRI) == 0)
12396			stat_dir_entry++;
12397		bdirty(bp);
12398		return (1);
12399	}
12400	/*
12401	 * If we are not waiting for a new directory block to be
12402	 * claimed by its inode, then the pagedep will be freed.
12403	 * Otherwise it will remain to track any new entries on
12404	 * the page in case they are fsync'ed.
12405	 */
12406	free_pagedep(pagedep);
12407	return (0);
12408}
12409
12410/*
12411 * Writing back in-core inode structures.
12412 *
12413 * The filesystem only accesses an inode's contents when it occupies an
12414 * "in-core" inode structure.  These "in-core" structures are separate from
12415 * the page frames used to cache inode blocks.  Only the latter are
12416 * transferred to/from the disk.  So, when the updated contents of the
12417 * "in-core" inode structure are copied to the corresponding in-memory inode
12418 * block, the dependencies are also transferred.  The following procedure is
12419 * called when copying a dirty "in-core" inode to a cached inode block.
12420 */
12421
12422/*
12423 * Called when an inode is loaded from disk. If the effective link count
12424 * differed from the actual link count when it was last flushed, then we
12425 * need to ensure that the correct effective link count is put back.
12426 */
12427void
12428softdep_load_inodeblock(
12429	struct inode *ip)	/* the "in_core" copy of the inode */
12430{
12431	struct inodedep *inodedep;
12432	struct ufsmount *ump;
12433
12434	ump = ITOUMP(ip);
12435	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
12436	    ("softdep_load_inodeblock called on non-softdep filesystem"));
12437	/*
12438	 * Check for alternate nlink count.
12439	 */
12440	ip->i_effnlink = ip->i_nlink;
12441	ACQUIRE_LOCK(ump);
12442	if (inodedep_lookup(UFSTOVFS(ump), ip->i_number, 0, &inodedep) == 0) {
12443		FREE_LOCK(ump);
12444		return;
12445	}
12446	if (ip->i_nlink != inodedep->id_nlinkwrote &&
12447	    inodedep->id_nlinkwrote != -1) {
12448		KASSERT(ip->i_nlink == 0 &&
12449		    (ump->um_flags & UM_FSFAIL_CLEANUP) != 0,
12450		    ("read bad i_nlink value"));
12451		ip->i_effnlink = ip->i_nlink = inodedep->id_nlinkwrote;
12452	}
12453	ip->i_effnlink -= inodedep->id_nlinkdelta;
12454	KASSERT(ip->i_effnlink >= 0,
12455	    ("softdep_load_inodeblock: negative i_effnlink"));
12456	FREE_LOCK(ump);
12457}
12458
12459/*
12460 * This routine is called just before the "in-core" inode
12461 * information is to be copied to the in-memory inode block.
12462 * Recall that an inode block contains several inodes. If
12463 * the force flag is set, then the dependencies will be
12464 * cleared so that the update can always be made. Note that
12465 * the buffer is locked when this routine is called, so we
12466 * will never be in the middle of writing the inode block
12467 * to disk.
12468 */
12469void
12470softdep_update_inodeblock(
12471	struct inode *ip,	/* the "in_core" copy of the inode */
12472	struct buf *bp,		/* the buffer containing the inode block */
12473	int waitfor)		/* nonzero => update must be allowed */
12474{
12475	struct inodedep *inodedep;
12476	struct inoref *inoref;
12477	struct ufsmount *ump;
12478	struct worklist *wk;
12479	struct mount *mp;
12480	struct buf *ibp;
12481	struct fs *fs;
12482	int error;
12483
12484	ump = ITOUMP(ip);
12485	mp = UFSTOVFS(ump);
12486	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
12487	    ("softdep_update_inodeblock called on non-softdep filesystem"));
12488	fs = ump->um_fs;
12489	/*
12490	 * If the effective link count is not equal to the actual link
12491	 * count, then we must track the difference in an inodedep while
12492	 * the inode is (potentially) tossed out of the cache. Otherwise,
12493	 * if there is no existing inodedep, then there are no dependencies
12494	 * to track.
12495	 */
12496	ACQUIRE_LOCK(ump);
12497again:
12498	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12499		FREE_LOCK(ump);
12500		if (ip->i_effnlink != ip->i_nlink)
12501			panic("softdep_update_inodeblock: bad link count");
12502		return;
12503	}
12504	/*
12505	 * Preserve the freelink that is on disk.  clear_unlinked_inodedep()
12506	 * does not have access to the in-core ip so must write directly into
12507	 * the inode block buffer when setting freelink.
12508	 */
12509	if ((inodedep->id_state & UNLINKED) != 0) {
12510		if (fs->fs_magic == FS_UFS1_MAGIC)
12511			DIP_SET(ip, i_freelink,
12512			    ((struct ufs1_dinode *)bp->b_data +
12513			    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12514		else
12515			DIP_SET(ip, i_freelink,
12516			    ((struct ufs2_dinode *)bp->b_data +
12517			    ino_to_fsbo(fs, ip->i_number))->di_freelink);
12518	}
12519	KASSERT(ip->i_nlink >= inodedep->id_nlinkdelta,
12520	    ("softdep_update_inodeblock inconsistent ip %p i_nlink %d "
12521	    "inodedep %p id_nlinkdelta %jd",
12522	    ip, ip->i_nlink, inodedep, (intmax_t)inodedep->id_nlinkdelta));
12523	inodedep->id_nlinkwrote = ip->i_nlink;
12524	if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
12525		panic("softdep_update_inodeblock: bad delta");
12526	/*
12527	 * If we're flushing all dependencies we must also move any waiting
12528	 * for journal writes onto the bufwait list prior to I/O.
12529	 */
12530	if (waitfor) {
12531		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12532			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12533			    == DEPCOMPLETE) {
12534				jwait(&inoref->if_list, MNT_WAIT);
12535				goto again;
12536			}
12537		}
12538	}
12539	/*
12540	 * Changes have been initiated. Anything depending on these
12541	 * changes cannot occur until this inode has been written.
12542	 */
12543	inodedep->id_state &= ~COMPLETE;
12544	if ((inodedep->id_state & ONWORKLIST) == 0)
12545		WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
12546	/*
12547	 * Any new dependencies associated with the incore inode must
12548	 * now be moved to the list associated with the buffer holding
12549	 * the in-memory copy of the inode. Once merged process any
12550	 * allocdirects that are completed by the merger.
12551	 */
12552	merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
12553	if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
12554		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
12555		    NULL);
12556	merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
12557	if (!TAILQ_EMPTY(&inodedep->id_extupdt))
12558		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
12559		    NULL);
12560	/*
12561	 * Now that the inode has been pushed into the buffer, the
12562	 * operations dependent on the inode being written to disk
12563	 * can be moved to the id_bufwait so that they will be
12564	 * processed when the buffer I/O completes.
12565	 */
12566	while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
12567		WORKLIST_REMOVE(wk);
12568		WORKLIST_INSERT(&inodedep->id_bufwait, wk);
12569	}
12570	/*
12571	 * Newly allocated inodes cannot be written until the bitmap
12572	 * that allocates them have been written (indicated by
12573	 * DEPCOMPLETE being set in id_state). If we are doing a
12574	 * forced sync (e.g., an fsync on a file), we force the bitmap
12575	 * to be written so that the update can be done.
12576	 */
12577	if (waitfor == 0) {
12578		FREE_LOCK(ump);
12579		return;
12580	}
12581retry:
12582	if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
12583		FREE_LOCK(ump);
12584		return;
12585	}
12586	ibp = inodedep->id_bmsafemap->sm_buf;
12587	ibp = getdirtybuf(ibp, LOCK_PTR(ump), MNT_WAIT);
12588	if (ibp == NULL) {
12589		/*
12590		 * If ibp came back as NULL, the dependency could have been
12591		 * freed while we slept.  Look it up again, and check to see
12592		 * that it has completed.
12593		 */
12594		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
12595			goto retry;
12596		FREE_LOCK(ump);
12597		return;
12598	}
12599	FREE_LOCK(ump);
12600	if ((error = bwrite(ibp)) != 0)
12601		softdep_error("softdep_update_inodeblock: bwrite", error);
12602}
12603
12604/*
12605 * Merge the a new inode dependency list (such as id_newinoupdt) into an
12606 * old inode dependency list (such as id_inoupdt).
12607 */
12608static void
12609merge_inode_lists(
12610	struct allocdirectlst *newlisthead,
12611	struct allocdirectlst *oldlisthead)
12612{
12613	struct allocdirect *listadp, *newadp;
12614
12615	newadp = TAILQ_FIRST(newlisthead);
12616	if (newadp != NULL)
12617		LOCK_OWNED(VFSTOUFS(newadp->ad_block.nb_list.wk_mp));
12618	for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
12619		if (listadp->ad_offset < newadp->ad_offset) {
12620			listadp = TAILQ_NEXT(listadp, ad_next);
12621			continue;
12622		}
12623		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12624		TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
12625		if (listadp->ad_offset == newadp->ad_offset) {
12626			allocdirect_merge(oldlisthead, newadp,
12627			    listadp);
12628			listadp = newadp;
12629		}
12630		newadp = TAILQ_FIRST(newlisthead);
12631	}
12632	while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
12633		TAILQ_REMOVE(newlisthead, newadp, ad_next);
12634		TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
12635	}
12636}
12637
12638/*
12639 * If we are doing an fsync, then we must ensure that any directory
12640 * entries for the inode have been written after the inode gets to disk.
12641 */
12642int
12643softdep_fsync(
12644	struct vnode *vp)	/* the "in_core" copy of the inode */
12645{
12646	struct inodedep *inodedep;
12647	struct pagedep *pagedep;
12648	struct inoref *inoref;
12649	struct ufsmount *ump;
12650	struct worklist *wk;
12651	struct diradd *dap;
12652	struct mount *mp;
12653	struct vnode *pvp;
12654	struct inode *ip;
12655	struct buf *bp;
12656	struct fs *fs;
12657	struct thread *td = curthread;
12658	int error, flushparent, pagedep_new_block;
12659	ino_t parentino;
12660	ufs_lbn_t lbn;
12661
12662	ip = VTOI(vp);
12663	mp = vp->v_mount;
12664	ump = VFSTOUFS(mp);
12665	fs = ump->um_fs;
12666	if (MOUNTEDSOFTDEP(mp) == 0)
12667		return (0);
12668	ACQUIRE_LOCK(ump);
12669restart:
12670	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12671		FREE_LOCK(ump);
12672		return (0);
12673	}
12674	TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12675		if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12676		    == DEPCOMPLETE) {
12677			jwait(&inoref->if_list, MNT_WAIT);
12678			goto restart;
12679		}
12680	}
12681	if (!LIST_EMPTY(&inodedep->id_inowait) ||
12682	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
12683	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
12684	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
12685	    !TAILQ_EMPTY(&inodedep->id_newinoupdt))
12686		panic("softdep_fsync: pending ops %p", inodedep);
12687	for (error = 0, flushparent = 0; ; ) {
12688		if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
12689			break;
12690		if (wk->wk_type != D_DIRADD)
12691			panic("softdep_fsync: Unexpected type %s",
12692			    TYPENAME(wk->wk_type));
12693		dap = WK_DIRADD(wk);
12694		/*
12695		 * Flush our parent if this directory entry has a MKDIR_PARENT
12696		 * dependency or is contained in a newly allocated block.
12697		 */
12698		if (dap->da_state & DIRCHG)
12699			pagedep = dap->da_previous->dm_pagedep;
12700		else
12701			pagedep = dap->da_pagedep;
12702		parentino = pagedep->pd_ino;
12703		lbn = pagedep->pd_lbn;
12704		if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
12705			panic("softdep_fsync: dirty");
12706		if ((dap->da_state & MKDIR_PARENT) ||
12707		    (pagedep->pd_state & NEWBLOCK))
12708			flushparent = 1;
12709		else
12710			flushparent = 0;
12711		/*
12712		 * If we are being fsync'ed as part of vgone'ing this vnode,
12713		 * then we will not be able to release and recover the
12714		 * vnode below, so we just have to give up on writing its
12715		 * directory entry out. It will eventually be written, just
12716		 * not now, but then the user was not asking to have it
12717		 * written, so we are not breaking any promises.
12718		 */
12719		if (VN_IS_DOOMED(vp))
12720			break;
12721		/*
12722		 * We prevent deadlock by always fetching inodes from the
12723		 * root, moving down the directory tree. Thus, when fetching
12724		 * our parent directory, we first try to get the lock. If
12725		 * that fails, we must unlock ourselves before requesting
12726		 * the lock on our parent. See the comment in ufs_lookup
12727		 * for details on possible races.
12728		 */
12729		FREE_LOCK(ump);
12730		error = get_parent_vp(vp, mp, parentino, NULL, NULL, NULL,
12731		    &pvp);
12732		if (error == ERELOOKUP)
12733			error = 0;
12734		if (error != 0)
12735			return (error);
12736		/*
12737		 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12738		 * that are contained in direct blocks will be resolved by
12739		 * doing a ffs_update. Pagedeps contained in indirect blocks
12740		 * may require a complete sync'ing of the directory. So, we
12741		 * try the cheap and fast ffs_update first, and if that fails,
12742		 * then we do the slower ffs_syncvnode of the directory.
12743		 */
12744		if (flushparent) {
12745			int locked;
12746
12747			if ((error = ffs_update(pvp, 1)) != 0) {
12748				vput(pvp);
12749				return (error);
12750			}
12751			ACQUIRE_LOCK(ump);
12752			locked = 1;
12753			if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12754				if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12755					if (wk->wk_type != D_DIRADD)
12756						panic("softdep_fsync: Unexpected type %s",
12757						      TYPENAME(wk->wk_type));
12758					dap = WK_DIRADD(wk);
12759					if (dap->da_state & DIRCHG)
12760						pagedep = dap->da_previous->dm_pagedep;
12761					else
12762						pagedep = dap->da_pagedep;
12763					pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12764					FREE_LOCK(ump);
12765					locked = 0;
12766					if (pagedep_new_block) {
12767						VOP_UNLOCK(vp);
12768						error = ffs_syncvnode(pvp,
12769						    MNT_WAIT, 0);
12770						if (error == 0)
12771							error = ERELOOKUP;
12772						vput(pvp);
12773						vn_lock(vp, LK_EXCLUSIVE |
12774						    LK_RETRY);
12775						return (error);
12776					}
12777				}
12778			}
12779			if (locked)
12780				FREE_LOCK(ump);
12781		}
12782		/*
12783		 * Flush directory page containing the inode's name.
12784		 */
12785		error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12786		    &bp);
12787		if (error == 0)
12788			error = bwrite(bp);
12789		else
12790			brelse(bp);
12791		vput(pvp);
12792		if (!ffs_fsfail_cleanup(ump, error))
12793			return (error);
12794		ACQUIRE_LOCK(ump);
12795		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12796			break;
12797	}
12798	FREE_LOCK(ump);
12799	return (0);
12800}
12801
12802/*
12803 * Flush all the dirty bitmaps associated with the block device
12804 * before flushing the rest of the dirty blocks so as to reduce
12805 * the number of dependencies that will have to be rolled back.
12806 *
12807 * XXX Unused?
12808 */
12809void
12810softdep_fsync_mountdev(struct vnode *vp)
12811{
12812	struct buf *bp, *nbp;
12813	struct worklist *wk;
12814	struct bufobj *bo;
12815
12816	if (!vn_isdisk(vp))
12817		panic("softdep_fsync_mountdev: vnode not a disk");
12818	bo = &vp->v_bufobj;
12819restart:
12820	BO_LOCK(bo);
12821	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12822		/*
12823		 * If it is already scheduled, skip to the next buffer.
12824		 */
12825		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12826			continue;
12827
12828		if ((bp->b_flags & B_DELWRI) == 0)
12829			panic("softdep_fsync_mountdev: not dirty");
12830		/*
12831		 * We are only interested in bitmaps with outstanding
12832		 * dependencies.
12833		 */
12834		if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12835		    wk->wk_type != D_BMSAFEMAP ||
12836		    (bp->b_vflags & BV_BKGRDINPROG)) {
12837			BUF_UNLOCK(bp);
12838			continue;
12839		}
12840		BO_UNLOCK(bo);
12841		bremfree(bp);
12842		(void) bawrite(bp);
12843		goto restart;
12844	}
12845	drain_output(vp);
12846	BO_UNLOCK(bo);
12847}
12848
12849/*
12850 * Sync all cylinder groups that were dirty at the time this function is
12851 * called.  Newly dirtied cgs will be inserted before the sentinel.  This
12852 * is used to flush freedep activity that may be holding up writes to a
12853 * indirect block.
12854 */
12855static int
12856sync_cgs(struct mount *mp, int waitfor)
12857{
12858	struct bmsafemap *bmsafemap;
12859	struct bmsafemap *sentinel;
12860	struct ufsmount *ump;
12861	struct buf *bp;
12862	int error;
12863
12864	sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12865	sentinel->sm_cg = -1;
12866	ump = VFSTOUFS(mp);
12867	error = 0;
12868	ACQUIRE_LOCK(ump);
12869	LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12870	for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12871	    bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12872		/* Skip sentinels and cgs with no work to release. */
12873		if (bmsafemap->sm_cg == -1 ||
12874		    (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12875		    LIST_EMPTY(&bmsafemap->sm_freewr))) {
12876			LIST_REMOVE(sentinel, sm_next);
12877			LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12878			continue;
12879		}
12880		/*
12881		 * If we don't get the lock and we're waiting try again, if
12882		 * not move on to the next buf and try to sync it.
12883		 */
12884		bp = getdirtybuf(bmsafemap->sm_buf, LOCK_PTR(ump), waitfor);
12885		if (bp == NULL && waitfor == MNT_WAIT)
12886			continue;
12887		LIST_REMOVE(sentinel, sm_next);
12888		LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12889		if (bp == NULL)
12890			continue;
12891		FREE_LOCK(ump);
12892		if (waitfor == MNT_NOWAIT)
12893			bawrite(bp);
12894		else
12895			error = bwrite(bp);
12896		ACQUIRE_LOCK(ump);
12897		if (error)
12898			break;
12899	}
12900	LIST_REMOVE(sentinel, sm_next);
12901	FREE_LOCK(ump);
12902	free(sentinel, M_BMSAFEMAP);
12903	return (error);
12904}
12905
12906/*
12907 * This routine is called when we are trying to synchronously flush a
12908 * file. This routine must eliminate any filesystem metadata dependencies
12909 * so that the syncing routine can succeed.
12910 */
12911int
12912softdep_sync_metadata(struct vnode *vp)
12913{
12914	struct inode *ip;
12915	int error;
12916
12917	ip = VTOI(vp);
12918	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12919	    ("softdep_sync_metadata called on non-softdep filesystem"));
12920	/*
12921	 * Ensure that any direct block dependencies have been cleared,
12922	 * truncations are started, and inode references are journaled.
12923	 */
12924	ACQUIRE_LOCK(VFSTOUFS(vp->v_mount));
12925	/*
12926	 * Write all journal records to prevent rollbacks on devvp.
12927	 */
12928	if (vp->v_type == VCHR)
12929		softdep_flushjournal(vp->v_mount);
12930	error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number);
12931	/*
12932	 * Ensure that all truncates are written so we won't find deps on
12933	 * indirect blocks.
12934	 */
12935	process_truncates(vp);
12936	FREE_LOCK(VFSTOUFS(vp->v_mount));
12937
12938	return (error);
12939}
12940
12941/*
12942 * This routine is called when we are attempting to sync a buf with
12943 * dependencies.  If waitfor is MNT_NOWAIT it attempts to schedule any
12944 * other IO it can but returns EBUSY if the buffer is not yet able to
12945 * be written.  Dependencies which will not cause rollbacks will always
12946 * return 0.
12947 */
12948int
12949softdep_sync_buf(struct vnode *vp,
12950	struct buf *bp,
12951	int waitfor)
12952{
12953	struct indirdep *indirdep;
12954	struct pagedep *pagedep;
12955	struct allocindir *aip;
12956	struct newblk *newblk;
12957	struct ufsmount *ump;
12958	struct buf *nbp;
12959	struct worklist *wk;
12960	int i, error;
12961
12962	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12963	    ("softdep_sync_buf called on non-softdep filesystem"));
12964	/*
12965	 * For VCHR we just don't want to force flush any dependencies that
12966	 * will cause rollbacks.
12967	 */
12968	if (vp->v_type == VCHR) {
12969		if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12970			return (EBUSY);
12971		return (0);
12972	}
12973	ump = VFSTOUFS(vp->v_mount);
12974	ACQUIRE_LOCK(ump);
12975	/*
12976	 * As we hold the buffer locked, none of its dependencies
12977	 * will disappear.
12978	 */
12979	error = 0;
12980top:
12981	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12982		switch (wk->wk_type) {
12983		case D_ALLOCDIRECT:
12984		case D_ALLOCINDIR:
12985			newblk = WK_NEWBLK(wk);
12986			if (newblk->nb_jnewblk != NULL) {
12987				if (waitfor == MNT_NOWAIT) {
12988					error = EBUSY;
12989					goto out_unlock;
12990				}
12991				jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12992				goto top;
12993			}
12994			if (newblk->nb_state & DEPCOMPLETE ||
12995			    waitfor == MNT_NOWAIT)
12996				continue;
12997			nbp = newblk->nb_bmsafemap->sm_buf;
12998			nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
12999			if (nbp == NULL)
13000				goto top;
13001			FREE_LOCK(ump);
13002			if ((error = bwrite(nbp)) != 0)
13003				goto out;
13004			ACQUIRE_LOCK(ump);
13005			continue;
13006
13007		case D_INDIRDEP:
13008			indirdep = WK_INDIRDEP(wk);
13009			if (waitfor == MNT_NOWAIT) {
13010				if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
13011				    !LIST_EMPTY(&indirdep->ir_deplisthd)) {
13012					error = EBUSY;
13013					goto out_unlock;
13014				}
13015			}
13016			if (!TAILQ_EMPTY(&indirdep->ir_trunc))
13017				panic("softdep_sync_buf: truncation pending.");
13018		restart:
13019			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13020				newblk = (struct newblk *)aip;
13021				if (newblk->nb_jnewblk != NULL) {
13022					jwait(&newblk->nb_jnewblk->jn_list,
13023					    waitfor);
13024					goto restart;
13025				}
13026				if (newblk->nb_state & DEPCOMPLETE)
13027					continue;
13028				nbp = newblk->nb_bmsafemap->sm_buf;
13029				nbp = getdirtybuf(nbp, LOCK_PTR(ump), waitfor);
13030				if (nbp == NULL)
13031					goto restart;
13032				FREE_LOCK(ump);
13033				if ((error = bwrite(nbp)) != 0)
13034					goto out;
13035				ACQUIRE_LOCK(ump);
13036				goto restart;
13037			}
13038			continue;
13039
13040		case D_PAGEDEP:
13041			/*
13042			 * Only flush directory entries in synchronous passes.
13043			 */
13044			if (waitfor != MNT_WAIT) {
13045				error = EBUSY;
13046				goto out_unlock;
13047			}
13048			/*
13049			 * While syncing snapshots, we must allow recursive
13050			 * lookups.
13051			 */
13052			BUF_AREC(bp);
13053			/*
13054			 * We are trying to sync a directory that may
13055			 * have dependencies on both its own metadata
13056			 * and/or dependencies on the inodes of any
13057			 * recently allocated files. We walk its diradd
13058			 * lists pushing out the associated inode.
13059			 */
13060			pagedep = WK_PAGEDEP(wk);
13061			for (i = 0; i < DAHASHSZ; i++) {
13062				if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
13063					continue;
13064				error = flush_pagedep_deps(vp, wk->wk_mp,
13065				    &pagedep->pd_diraddhd[i], bp);
13066				if (error != 0) {
13067					if (error != ERELOOKUP)
13068						BUF_NOREC(bp);
13069					goto out_unlock;
13070				}
13071			}
13072			BUF_NOREC(bp);
13073			continue;
13074
13075		case D_FREEWORK:
13076		case D_FREEDEP:
13077		case D_JSEGDEP:
13078		case D_JNEWBLK:
13079			continue;
13080
13081		default:
13082			panic("softdep_sync_buf: Unknown type %s",
13083			    TYPENAME(wk->wk_type));
13084			/* NOTREACHED */
13085		}
13086	}
13087out_unlock:
13088	FREE_LOCK(ump);
13089out:
13090	return (error);
13091}
13092
13093/*
13094 * Flush the dependencies associated with an inodedep.
13095 */
13096static int
13097flush_inodedep_deps(
13098	struct vnode *vp,
13099	struct mount *mp,
13100	ino_t ino)
13101{
13102	struct inodedep *inodedep;
13103	struct inoref *inoref;
13104	struct ufsmount *ump;
13105	int error, waitfor;
13106
13107	/*
13108	 * This work is done in two passes. The first pass grabs most
13109	 * of the buffers and begins asynchronously writing them. The
13110	 * only way to wait for these asynchronous writes is to sleep
13111	 * on the filesystem vnode which may stay busy for a long time
13112	 * if the filesystem is active. So, instead, we make a second
13113	 * pass over the dependencies blocking on each write. In the
13114	 * usual case we will be blocking against a write that we
13115	 * initiated, so when it is done the dependency will have been
13116	 * resolved. Thus the second pass is expected to end quickly.
13117	 * We give a brief window at the top of the loop to allow
13118	 * any pending I/O to complete.
13119	 */
13120	ump = VFSTOUFS(mp);
13121	LOCK_OWNED(ump);
13122	for (error = 0, waitfor = MNT_NOWAIT; ; ) {
13123		if (error)
13124			return (error);
13125		FREE_LOCK(ump);
13126		ACQUIRE_LOCK(ump);
13127restart:
13128		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13129			return (0);
13130		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
13131			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
13132			    == DEPCOMPLETE) {
13133				jwait(&inoref->if_list, MNT_WAIT);
13134				goto restart;
13135			}
13136		}
13137		if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
13138		    flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
13139		    flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
13140		    flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
13141			continue;
13142		/*
13143		 * If pass2, we are done, otherwise do pass 2.
13144		 */
13145		if (waitfor == MNT_WAIT)
13146			break;
13147		waitfor = MNT_WAIT;
13148	}
13149	/*
13150	 * Try freeing inodedep in case all dependencies have been removed.
13151	 */
13152	if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
13153		(void) free_inodedep(inodedep);
13154	return (0);
13155}
13156
13157/*
13158 * Flush an inode dependency list.
13159 */
13160static int
13161flush_deplist(
13162	struct allocdirectlst *listhead,
13163	int waitfor,
13164	int *errorp)
13165{
13166	struct allocdirect *adp;
13167	struct newblk *newblk;
13168	struct ufsmount *ump;
13169	struct buf *bp;
13170
13171	if ((adp = TAILQ_FIRST(listhead)) == NULL)
13172		return (0);
13173	ump = VFSTOUFS(adp->ad_list.wk_mp);
13174	LOCK_OWNED(ump);
13175	TAILQ_FOREACH(adp, listhead, ad_next) {
13176		newblk = (struct newblk *)adp;
13177		if (newblk->nb_jnewblk != NULL) {
13178			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
13179			return (1);
13180		}
13181		if (newblk->nb_state & DEPCOMPLETE)
13182			continue;
13183		bp = newblk->nb_bmsafemap->sm_buf;
13184		bp = getdirtybuf(bp, LOCK_PTR(ump), waitfor);
13185		if (bp == NULL) {
13186			if (waitfor == MNT_NOWAIT)
13187				continue;
13188			return (1);
13189		}
13190		FREE_LOCK(ump);
13191		if (waitfor == MNT_NOWAIT)
13192			bawrite(bp);
13193		else
13194			*errorp = bwrite(bp);
13195		ACQUIRE_LOCK(ump);
13196		return (1);
13197	}
13198	return (0);
13199}
13200
13201/*
13202 * Flush dependencies associated with an allocdirect block.
13203 */
13204static int
13205flush_newblk_dep(
13206	struct vnode *vp,
13207	struct mount *mp,
13208	ufs_lbn_t lbn)
13209{
13210	struct newblk *newblk;
13211	struct ufsmount *ump;
13212	struct bufobj *bo;
13213	struct inode *ip;
13214	struct buf *bp;
13215	ufs2_daddr_t blkno;
13216	int error;
13217
13218	error = 0;
13219	bo = &vp->v_bufobj;
13220	ip = VTOI(vp);
13221	blkno = DIP(ip, i_db[lbn]);
13222	if (blkno == 0)
13223		panic("flush_newblk_dep: Missing block");
13224	ump = VFSTOUFS(mp);
13225	ACQUIRE_LOCK(ump);
13226	/*
13227	 * Loop until all dependencies related to this block are satisfied.
13228	 * We must be careful to restart after each sleep in case a write
13229	 * completes some part of this process for us.
13230	 */
13231	for (;;) {
13232		if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
13233			FREE_LOCK(ump);
13234			break;
13235		}
13236		if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
13237			panic("flush_newblk_dep: Bad newblk %p", newblk);
13238		/*
13239		 * Flush the journal.
13240		 */
13241		if (newblk->nb_jnewblk != NULL) {
13242			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
13243			continue;
13244		}
13245		/*
13246		 * Write the bitmap dependency.
13247		 */
13248		if ((newblk->nb_state & DEPCOMPLETE) == 0) {
13249			bp = newblk->nb_bmsafemap->sm_buf;
13250			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
13251			if (bp == NULL)
13252				continue;
13253			FREE_LOCK(ump);
13254			error = bwrite(bp);
13255			if (error)
13256				break;
13257			ACQUIRE_LOCK(ump);
13258			continue;
13259		}
13260		/*
13261		 * Write the buffer.
13262		 */
13263		FREE_LOCK(ump);
13264		BO_LOCK(bo);
13265		bp = gbincore(bo, lbn);
13266		if (bp != NULL) {
13267			error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
13268			    LK_INTERLOCK, BO_LOCKPTR(bo));
13269			if (error == ENOLCK) {
13270				ACQUIRE_LOCK(ump);
13271				error = 0;
13272				continue; /* Slept, retry */
13273			}
13274			if (error != 0)
13275				break;	/* Failed */
13276			if (bp->b_flags & B_DELWRI) {
13277				bremfree(bp);
13278				error = bwrite(bp);
13279				if (error)
13280					break;
13281			} else
13282				BUF_UNLOCK(bp);
13283		} else
13284			BO_UNLOCK(bo);
13285		/*
13286		 * We have to wait for the direct pointers to
13287		 * point at the newdirblk before the dependency
13288		 * will go away.
13289		 */
13290		error = ffs_update(vp, 1);
13291		if (error)
13292			break;
13293		ACQUIRE_LOCK(ump);
13294	}
13295	return (error);
13296}
13297
13298/*
13299 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
13300 */
13301static int
13302flush_pagedep_deps(
13303	struct vnode *pvp,
13304	struct mount *mp,
13305	struct diraddhd *diraddhdp,
13306	struct buf *locked_bp)
13307{
13308	struct inodedep *inodedep;
13309	struct inoref *inoref;
13310	struct ufsmount *ump;
13311	struct diradd *dap;
13312	struct vnode *vp;
13313	int error = 0;
13314	struct buf *bp;
13315	ino_t inum;
13316	struct diraddhd unfinished;
13317
13318	LIST_INIT(&unfinished);
13319	ump = VFSTOUFS(mp);
13320	LOCK_OWNED(ump);
13321restart:
13322	while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
13323		/*
13324		 * Flush ourselves if this directory entry
13325		 * has a MKDIR_PARENT dependency.
13326		 */
13327		if (dap->da_state & MKDIR_PARENT) {
13328			FREE_LOCK(ump);
13329			if ((error = ffs_update(pvp, 1)) != 0)
13330				break;
13331			ACQUIRE_LOCK(ump);
13332			/*
13333			 * If that cleared dependencies, go on to next.
13334			 */
13335			if (dap != LIST_FIRST(diraddhdp))
13336				continue;
13337			/*
13338			 * All MKDIR_PARENT dependencies and all the
13339			 * NEWBLOCK pagedeps that are contained in direct
13340			 * blocks were resolved by doing above ffs_update.
13341			 * Pagedeps contained in indirect blocks may
13342			 * require a complete sync'ing of the directory.
13343			 * We are in the midst of doing a complete sync,
13344			 * so if they are not resolved in this pass we
13345			 * defer them for now as they will be sync'ed by
13346			 * our caller shortly.
13347			 */
13348			LIST_REMOVE(dap, da_pdlist);
13349			LIST_INSERT_HEAD(&unfinished, dap, da_pdlist);
13350			continue;
13351		}
13352		/*
13353		 * A newly allocated directory must have its "." and
13354		 * ".." entries written out before its name can be
13355		 * committed in its parent.
13356		 */
13357		inum = dap->da_newinum;
13358		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
13359			panic("flush_pagedep_deps: lost inode1");
13360		/*
13361		 * Wait for any pending journal adds to complete so we don't
13362		 * cause rollbacks while syncing.
13363		 */
13364		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
13365			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
13366			    == DEPCOMPLETE) {
13367				jwait(&inoref->if_list, MNT_WAIT);
13368				goto restart;
13369			}
13370		}
13371		if (dap->da_state & MKDIR_BODY) {
13372			FREE_LOCK(ump);
13373			error = get_parent_vp(pvp, mp, inum, locked_bp,
13374			    diraddhdp, &unfinished, &vp);
13375			if (error != 0)
13376				break;
13377			error = flush_newblk_dep(vp, mp, 0);
13378			/*
13379			 * If we still have the dependency we might need to
13380			 * update the vnode to sync the new link count to
13381			 * disk.
13382			 */
13383			if (error == 0 && dap == LIST_FIRST(diraddhdp))
13384				error = ffs_update(vp, 1);
13385			vput(vp);
13386			if (error != 0)
13387				break;
13388			ACQUIRE_LOCK(ump);
13389			/*
13390			 * If that cleared dependencies, go on to next.
13391			 */
13392			if (dap != LIST_FIRST(diraddhdp))
13393				continue;
13394			if (dap->da_state & MKDIR_BODY) {
13395				inodedep_lookup(UFSTOVFS(ump), inum, 0,
13396				    &inodedep);
13397				panic("flush_pagedep_deps: MKDIR_BODY "
13398				    "inodedep %p dap %p vp %p",
13399				    inodedep, dap, vp);
13400			}
13401		}
13402		/*
13403		 * Flush the inode on which the directory entry depends.
13404		 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
13405		 * the only remaining dependency is that the updated inode
13406		 * count must get pushed to disk. The inode has already
13407		 * been pushed into its inode buffer (via VOP_UPDATE) at
13408		 * the time of the reference count change. So we need only
13409		 * locate that buffer, ensure that there will be no rollback
13410		 * caused by a bitmap dependency, then write the inode buffer.
13411		 */
13412retry:
13413		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
13414			panic("flush_pagedep_deps: lost inode");
13415		/*
13416		 * If the inode still has bitmap dependencies,
13417		 * push them to disk.
13418		 */
13419		if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
13420			bp = inodedep->id_bmsafemap->sm_buf;
13421			bp = getdirtybuf(bp, LOCK_PTR(ump), MNT_WAIT);
13422			if (bp == NULL)
13423				goto retry;
13424			FREE_LOCK(ump);
13425			if ((error = bwrite(bp)) != 0)
13426				break;
13427			ACQUIRE_LOCK(ump);
13428			if (dap != LIST_FIRST(diraddhdp))
13429				continue;
13430		}
13431		/*
13432		 * If the inode is still sitting in a buffer waiting
13433		 * to be written or waiting for the link count to be
13434		 * adjusted update it here to flush it to disk.
13435		 */
13436		if (dap == LIST_FIRST(diraddhdp)) {
13437			FREE_LOCK(ump);
13438			error = get_parent_vp(pvp, mp, inum, locked_bp,
13439			    diraddhdp, &unfinished, &vp);
13440			if (error != 0)
13441				break;
13442			error = ffs_update(vp, 1);
13443			vput(vp);
13444			if (error)
13445				break;
13446			ACQUIRE_LOCK(ump);
13447		}
13448		/*
13449		 * If we have failed to get rid of all the dependencies
13450		 * then something is seriously wrong.
13451		 */
13452		if (dap == LIST_FIRST(diraddhdp)) {
13453			inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
13454			panic("flush_pagedep_deps: failed to flush "
13455			    "inodedep %p ino %ju dap %p",
13456			    inodedep, (uintmax_t)inum, dap);
13457		}
13458	}
13459	if (error)
13460		ACQUIRE_LOCK(ump);
13461	while ((dap = LIST_FIRST(&unfinished)) != NULL) {
13462		LIST_REMOVE(dap, da_pdlist);
13463		LIST_INSERT_HEAD(diraddhdp, dap, da_pdlist);
13464	}
13465	return (error);
13466}
13467
13468/*
13469 * A large burst of file addition or deletion activity can drive the
13470 * memory load excessively high. First attempt to slow things down
13471 * using the techniques below. If that fails, this routine requests
13472 * the offending operations to fall back to running synchronously
13473 * until the memory load returns to a reasonable level.
13474 */
13475int
13476softdep_slowdown(struct vnode *vp)
13477{
13478	struct ufsmount *ump;
13479	int jlow;
13480	int max_softdeps_hard;
13481
13482	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
13483	    ("softdep_slowdown called on non-softdep filesystem"));
13484	ump = VFSTOUFS(vp->v_mount);
13485	ACQUIRE_LOCK(ump);
13486	jlow = 0;
13487	/*
13488	 * Check for journal space if needed.
13489	 */
13490	if (DOINGSUJ(vp)) {
13491		if (journal_space(ump, 0) == 0)
13492			jlow = 1;
13493	}
13494	/*
13495	 * If the system is under its limits and our filesystem is
13496	 * not responsible for more than our share of the usage and
13497	 * we are not low on journal space, then no need to slow down.
13498	 */
13499	max_softdeps_hard = max_softdeps * 11 / 10;
13500	if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
13501	    dep_current[D_INODEDEP] < max_softdeps_hard &&
13502	    dep_current[D_INDIRDEP] < max_softdeps_hard / 1000 &&
13503	    dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0 &&
13504	    ump->softdep_curdeps[D_DIRREM] <
13505	    (max_softdeps_hard / 2) / stat_flush_threads &&
13506	    ump->softdep_curdeps[D_INODEDEP] <
13507	    max_softdeps_hard / stat_flush_threads &&
13508	    ump->softdep_curdeps[D_INDIRDEP] <
13509	    (max_softdeps_hard / 1000) / stat_flush_threads &&
13510	    ump->softdep_curdeps[D_FREEBLKS] <
13511	    max_softdeps_hard / stat_flush_threads) {
13512		FREE_LOCK(ump);
13513  		return (0);
13514	}
13515	/*
13516	 * If the journal is low or our filesystem is over its limit
13517	 * then speedup the cleanup.
13518	 */
13519	if (ump->softdep_curdeps[D_INDIRDEP] <
13520	    (max_softdeps_hard / 1000) / stat_flush_threads || jlow)
13521		softdep_speedup(ump);
13522	stat_sync_limit_hit += 1;
13523	FREE_LOCK(ump);
13524	/*
13525	 * We only slow down the rate at which new dependencies are
13526	 * generated if we are not using journaling. With journaling,
13527	 * the cleanup should always be sufficient to keep things
13528	 * under control.
13529	 */
13530	if (DOINGSUJ(vp))
13531		return (0);
13532	return (1);
13533}
13534
13535static int
13536softdep_request_cleanup_filter(struct vnode *vp, void *arg __unused)
13537{
13538	return ((vp->v_iflag & VI_OWEINACT) != 0 && vp->v_usecount == 0 &&
13539	    ((vp->v_vflag & VV_NOSYNC) != 0 || VTOI(vp)->i_effnlink == 0));
13540}
13541
13542static void
13543softdep_request_cleanup_inactivate(struct mount *mp)
13544{
13545	struct vnode *vp, *mvp;
13546	int error;
13547
13548	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, softdep_request_cleanup_filter,
13549	    NULL) {
13550		vholdl(vp);
13551		vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK | LK_RETRY);
13552		VI_LOCK(vp);
13553		if (IS_UFS(vp) && vp->v_usecount == 0) {
13554			while ((vp->v_iflag & VI_OWEINACT) != 0) {
13555				error = vinactive(vp);
13556				if (error != 0 && error != ERELOOKUP)
13557					break;
13558			}
13559			atomic_add_int(&stat_delayed_inact, 1);
13560		}
13561		VOP_UNLOCK(vp);
13562		vdropl(vp);
13563	}
13564}
13565
13566/*
13567 * Called by the allocation routines when they are about to fail
13568 * in the hope that we can free up the requested resource (inodes
13569 * or disk space).
13570 *
13571 * First check to see if the work list has anything on it. If it has,
13572 * clean up entries until we successfully free the requested resource.
13573 * Because this process holds inodes locked, we cannot handle any remove
13574 * requests that might block on a locked inode as that could lead to
13575 * deadlock. If the worklist yields none of the requested resource,
13576 * start syncing out vnodes to free up the needed space.
13577 */
13578int
13579softdep_request_cleanup(
13580	struct fs *fs,
13581	struct vnode *vp,
13582	struct ucred *cred,
13583	int resource)
13584{
13585	struct ufsmount *ump;
13586	struct mount *mp;
13587	long starttime;
13588	ufs2_daddr_t needed;
13589	int error, failed_vnode;
13590
13591	/*
13592	 * If we are being called because of a process doing a
13593	 * copy-on-write, then it is not safe to process any
13594	 * worklist items as we will recurse into the copyonwrite
13595	 * routine.  This will result in an incoherent snapshot.
13596	 * If the vnode that we hold is a snapshot, we must avoid
13597	 * handling other resources that could cause deadlock.
13598	 */
13599	if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
13600		return (0);
13601
13602	if (resource == FLUSH_BLOCKS_WAIT)
13603		stat_cleanup_blkrequests += 1;
13604	else
13605		stat_cleanup_inorequests += 1;
13606
13607	mp = vp->v_mount;
13608	ump = VFSTOUFS(mp);
13609	mtx_assert(UFS_MTX(ump), MA_OWNED);
13610	UFS_UNLOCK(ump);
13611	error = ffs_update(vp, 1);
13612	if (error != 0 || MOUNTEDSOFTDEP(mp) == 0) {
13613		UFS_LOCK(ump);
13614		return (0);
13615	}
13616	/*
13617	 * If we are in need of resources, start by cleaning up
13618	 * any block removals associated with our inode.
13619	 */
13620	ACQUIRE_LOCK(ump);
13621	process_removes(vp);
13622	process_truncates(vp);
13623	FREE_LOCK(ump);
13624	/*
13625	 * Now clean up at least as many resources as we will need.
13626	 *
13627	 * When requested to clean up inodes, the number that are needed
13628	 * is set by the number of simultaneous writers (mnt_writeopcount)
13629	 * plus a bit of slop (2) in case some more writers show up while
13630	 * we are cleaning.
13631	 *
13632	 * When requested to free up space, the amount of space that
13633	 * we need is enough blocks to allocate a full-sized segment
13634	 * (fs_contigsumsize). The number of such segments that will
13635	 * be needed is set by the number of simultaneous writers
13636	 * (mnt_writeopcount) plus a bit of slop (2) in case some more
13637	 * writers show up while we are cleaning.
13638	 *
13639	 * Additionally, if we are unpriviledged and allocating space,
13640	 * we need to ensure that we clean up enough blocks to get the
13641	 * needed number of blocks over the threshold of the minimum
13642	 * number of blocks required to be kept free by the filesystem
13643	 * (fs_minfree).
13644	 */
13645	if (resource == FLUSH_INODES_WAIT) {
13646		needed = vfs_mount_fetch_counter(vp->v_mount,
13647		    MNT_COUNT_WRITEOPCOUNT) + 2;
13648	} else if (resource == FLUSH_BLOCKS_WAIT) {
13649		needed = (vfs_mount_fetch_counter(vp->v_mount,
13650		    MNT_COUNT_WRITEOPCOUNT) + 2) * fs->fs_contigsumsize;
13651		if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE))
13652			needed += fragstoblks(fs,
13653			    roundup((fs->fs_dsize * fs->fs_minfree / 100) -
13654			    fs->fs_cstotal.cs_nffree, fs->fs_frag));
13655	} else {
13656		printf("softdep_request_cleanup: Unknown resource type %d\n",
13657		    resource);
13658		UFS_LOCK(ump);
13659		return (0);
13660	}
13661	starttime = time_second;
13662retry:
13663	if (resource == FLUSH_BLOCKS_WAIT &&
13664	    fs->fs_cstotal.cs_nbfree <= needed)
13665		softdep_send_speedup(ump, needed * fs->fs_bsize,
13666		    BIO_SPEEDUP_TRIM);
13667	if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
13668	    fs->fs_cstotal.cs_nbfree <= needed) ||
13669	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13670	    fs->fs_cstotal.cs_nifree <= needed)) {
13671		ACQUIRE_LOCK(ump);
13672		if (ump->softdep_on_worklist > 0 &&
13673		    process_worklist_item(UFSTOVFS(ump),
13674		    ump->softdep_on_worklist, LK_NOWAIT) != 0)
13675			stat_worklist_push += 1;
13676		FREE_LOCK(ump);
13677	}
13678
13679	/*
13680	 * Check that there are vnodes pending inactivation.  As they
13681	 * have been unlinked, inactivating them will free up their
13682	 * inodes.
13683	 */
13684	ACQUIRE_LOCK(ump);
13685	if (resource == FLUSH_INODES_WAIT &&
13686	    fs->fs_cstotal.cs_nifree <= needed &&
13687	    fs->fs_pendinginodes <= needed) {
13688		if ((ump->um_softdep->sd_flags & FLUSH_DI_ACTIVE) == 0) {
13689			ump->um_softdep->sd_flags |= FLUSH_DI_ACTIVE;
13690			FREE_LOCK(ump);
13691			softdep_request_cleanup_inactivate(mp);
13692			ACQUIRE_LOCK(ump);
13693			ump->um_softdep->sd_flags &= ~FLUSH_DI_ACTIVE;
13694			wakeup(&ump->um_softdep->sd_flags);
13695		} else {
13696			while ((ump->um_softdep->sd_flags &
13697			    FLUSH_DI_ACTIVE) != 0) {
13698				msleep(&ump->um_softdep->sd_flags,
13699				    LOCK_PTR(ump), PVM, "ffsvina", hz);
13700			}
13701		}
13702	}
13703	FREE_LOCK(ump);
13704
13705	/*
13706	 * If we still need resources and there are no more worklist
13707	 * entries to process to obtain them, we have to start flushing
13708	 * the dirty vnodes to force the release of additional requests
13709	 * to the worklist that we can then process to reap addition
13710	 * resources. We walk the vnodes associated with the mount point
13711	 * until we get the needed worklist requests that we can reap.
13712	 *
13713	 * If there are several threads all needing to clean the same
13714	 * mount point, only one is allowed to walk the mount list.
13715	 * When several threads all try to walk the same mount list,
13716	 * they end up competing with each other and often end up in
13717	 * livelock. This approach ensures that forward progress is
13718	 * made at the cost of occational ENOSPC errors being returned
13719	 * that might otherwise have been avoided.
13720	 */
13721	error = 1;
13722	if ((resource == FLUSH_BLOCKS_WAIT &&
13723	     fs->fs_cstotal.cs_nbfree <= needed) ||
13724	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
13725	     fs->fs_cstotal.cs_nifree <= needed)) {
13726		ACQUIRE_LOCK(ump);
13727		if ((ump->um_softdep->sd_flags & FLUSH_RC_ACTIVE) == 0) {
13728			ump->um_softdep->sd_flags |= FLUSH_RC_ACTIVE;
13729			FREE_LOCK(ump);
13730			failed_vnode = softdep_request_cleanup_flush(mp, ump);
13731			ACQUIRE_LOCK(ump);
13732			ump->um_softdep->sd_flags &= ~FLUSH_RC_ACTIVE;
13733			wakeup(&ump->um_softdep->sd_flags);
13734			FREE_LOCK(ump);
13735			if (ump->softdep_on_worklist > 0) {
13736				stat_cleanup_retries += 1;
13737				if (!failed_vnode)
13738					goto retry;
13739			}
13740		} else {
13741			while ((ump->um_softdep->sd_flags &
13742			    FLUSH_RC_ACTIVE) != 0) {
13743				msleep(&ump->um_softdep->sd_flags,
13744				    LOCK_PTR(ump), PVM, "ffsrca", hz);
13745			}
13746			FREE_LOCK(ump);
13747			error = 0;
13748		}
13749		stat_cleanup_failures += 1;
13750	}
13751	if (time_second - starttime > stat_cleanup_high_delay)
13752		stat_cleanup_high_delay = time_second - starttime;
13753	UFS_LOCK(ump);
13754	return (error);
13755}
13756
13757/*
13758 * Scan the vnodes for the specified mount point flushing out any
13759 * vnodes that can be locked without waiting. Finally, try to flush
13760 * the device associated with the mount point if it can be locked
13761 * without waiting.
13762 *
13763 * We return 0 if we were able to lock every vnode in our scan.
13764 * If we had to skip one or more vnodes, we return 1.
13765 */
13766static int
13767softdep_request_cleanup_flush(struct mount *mp, struct ufsmount *ump)
13768{
13769	struct thread *td;
13770	struct vnode *lvp, *mvp;
13771	int failed_vnode;
13772
13773	failed_vnode = 0;
13774	td = curthread;
13775	MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
13776		if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
13777			VI_UNLOCK(lvp);
13778			continue;
13779		}
13780		if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT) != 0) {
13781			failed_vnode = 1;
13782			continue;
13783		}
13784		if (lvp->v_vflag & VV_NOSYNC) {	/* unlinked */
13785			vput(lvp);
13786			continue;
13787		}
13788		(void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
13789		vput(lvp);
13790	}
13791	lvp = ump->um_devvp;
13792	if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
13793		VOP_FSYNC(lvp, MNT_NOWAIT, td);
13794		VOP_UNLOCK(lvp);
13795	}
13796	return (failed_vnode);
13797}
13798
13799static bool
13800softdep_excess_items(struct ufsmount *ump, int item)
13801{
13802
13803	KASSERT(item >= 0 && item < D_LAST, ("item %d", item));
13804	return (dep_current[item] > max_softdeps &&
13805	    ump->softdep_curdeps[item] > max_softdeps /
13806	    stat_flush_threads);
13807}
13808
13809static void
13810schedule_cleanup(struct mount *mp)
13811{
13812	struct ufsmount *ump;
13813	struct thread *td;
13814
13815	ump = VFSTOUFS(mp);
13816	LOCK_OWNED(ump);
13817	FREE_LOCK(ump);
13818	td = curthread;
13819	if ((td->td_pflags & TDP_KTHREAD) != 0 &&
13820	    (td->td_proc->p_flag2 & P2_AST_SU) == 0) {
13821		/*
13822		 * No ast is delivered to kernel threads, so nobody
13823		 * would deref the mp.  Some kernel threads
13824		 * explicitly check for AST, e.g. NFS daemon does
13825		 * this in the serving loop.
13826		 */
13827		return;
13828	}
13829	if (td->td_su != NULL)
13830		vfs_rel(td->td_su);
13831	vfs_ref(mp);
13832	td->td_su = mp;
13833	ast_sched(td, TDA_UFS);
13834}
13835
13836static void
13837softdep_ast_cleanup_proc(struct thread *td, int ast __unused)
13838{
13839	struct mount *mp;
13840	struct ufsmount *ump;
13841	int error;
13842	bool req;
13843
13844	while ((mp = td->td_su) != NULL) {
13845		td->td_su = NULL;
13846		error = vfs_busy(mp, MBF_NOWAIT);
13847		vfs_rel(mp);
13848		if (error != 0)
13849			return;
13850		if (ffs_own_mount(mp) && MOUNTEDSOFTDEP(mp)) {
13851			ump = VFSTOUFS(mp);
13852			for (;;) {
13853				req = false;
13854				ACQUIRE_LOCK(ump);
13855				if (softdep_excess_items(ump, D_INODEDEP)) {
13856					req = true;
13857					request_cleanup(mp, FLUSH_INODES);
13858				}
13859				if (softdep_excess_items(ump, D_DIRREM)) {
13860					req = true;
13861					request_cleanup(mp, FLUSH_BLOCKS);
13862				}
13863				FREE_LOCK(ump);
13864				if (softdep_excess_items(ump, D_NEWBLK) ||
13865				    softdep_excess_items(ump, D_ALLOCDIRECT) ||
13866				    softdep_excess_items(ump, D_ALLOCINDIR)) {
13867					error = vn_start_write(NULL, &mp,
13868					    V_WAIT);
13869					if (error == 0) {
13870						req = true;
13871						VFS_SYNC(mp, MNT_WAIT);
13872						vn_finished_write(mp);
13873					}
13874				}
13875				if ((td->td_pflags & TDP_KTHREAD) != 0 || !req)
13876					break;
13877			}
13878		}
13879		vfs_unbusy(mp);
13880	}
13881	if ((mp = td->td_su) != NULL) {
13882		td->td_su = NULL;
13883		vfs_rel(mp);
13884	}
13885}
13886
13887/*
13888 * If memory utilization has gotten too high, deliberately slow things
13889 * down and speed up the I/O processing.
13890 */
13891static int
13892request_cleanup(struct mount *mp, int resource)
13893{
13894	struct thread *td = curthread;
13895	struct ufsmount *ump;
13896
13897	ump = VFSTOUFS(mp);
13898	LOCK_OWNED(ump);
13899	/*
13900	 * We never hold up the filesystem syncer or buf daemon.
13901	 */
13902	if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
13903		return (0);
13904	/*
13905	 * First check to see if the work list has gotten backlogged.
13906	 * If it has, co-opt this process to help clean up two entries.
13907	 * Because this process may hold inodes locked, we cannot
13908	 * handle any remove requests that might block on a locked
13909	 * inode as that could lead to deadlock.  We set TDP_SOFTDEP
13910	 * to avoid recursively processing the worklist.
13911	 */
13912	if (ump->softdep_on_worklist > max_softdeps / 10) {
13913		td->td_pflags |= TDP_SOFTDEP;
13914		process_worklist_item(mp, 2, LK_NOWAIT);
13915		td->td_pflags &= ~TDP_SOFTDEP;
13916		stat_worklist_push += 2;
13917		return(1);
13918	}
13919	/*
13920	 * Next, we attempt to speed up the syncer process. If that
13921	 * is successful, then we allow the process to continue.
13922	 */
13923	if (softdep_speedup(ump) &&
13924	    resource != FLUSH_BLOCKS_WAIT &&
13925	    resource != FLUSH_INODES_WAIT)
13926		return(0);
13927	/*
13928	 * If we are resource constrained on inode dependencies, try
13929	 * flushing some dirty inodes. Otherwise, we are constrained
13930	 * by file deletions, so try accelerating flushes of directories
13931	 * with removal dependencies. We would like to do the cleanup
13932	 * here, but we probably hold an inode locked at this point and
13933	 * that might deadlock against one that we try to clean. So,
13934	 * the best that we can do is request the syncer daemon to do
13935	 * the cleanup for us.
13936	 */
13937	switch (resource) {
13938	case FLUSH_INODES:
13939	case FLUSH_INODES_WAIT:
13940		ACQUIRE_GBLLOCK(&lk);
13941		stat_ino_limit_push += 1;
13942		req_clear_inodedeps += 1;
13943		FREE_GBLLOCK(&lk);
13944		stat_countp = &stat_ino_limit_hit;
13945		break;
13946
13947	case FLUSH_BLOCKS:
13948	case FLUSH_BLOCKS_WAIT:
13949		ACQUIRE_GBLLOCK(&lk);
13950		stat_blk_limit_push += 1;
13951		req_clear_remove += 1;
13952		FREE_GBLLOCK(&lk);
13953		stat_countp = &stat_blk_limit_hit;
13954		break;
13955
13956	default:
13957		panic("request_cleanup: unknown type");
13958	}
13959	/*
13960	 * Hopefully the syncer daemon will catch up and awaken us.
13961	 * We wait at most tickdelay before proceeding in any case.
13962	 */
13963	ACQUIRE_GBLLOCK(&lk);
13964	FREE_LOCK(ump);
13965	proc_waiting += 1;
13966	if (callout_pending(&softdep_callout) == FALSE)
13967		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13968		    pause_timer, 0);
13969
13970	if ((td->td_pflags & TDP_KTHREAD) == 0)
13971		msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
13972	proc_waiting -= 1;
13973	FREE_GBLLOCK(&lk);
13974	ACQUIRE_LOCK(ump);
13975	return (1);
13976}
13977
13978/*
13979 * Awaken processes pausing in request_cleanup and clear proc_waiting
13980 * to indicate that there is no longer a timer running. Pause_timer
13981 * will be called with the global softdep mutex (&lk) locked.
13982 */
13983static void
13984pause_timer(void *arg)
13985{
13986
13987	GBLLOCK_OWNED(&lk);
13988	/*
13989	 * The callout_ API has acquired mtx and will hold it around this
13990	 * function call.
13991	 */
13992	*stat_countp += proc_waiting;
13993	wakeup(&proc_waiting);
13994}
13995
13996/*
13997 * If requested, try removing inode or removal dependencies.
13998 */
13999static void
14000check_clear_deps(struct mount *mp)
14001{
14002	struct ufsmount *ump;
14003	bool suj_susp;
14004
14005	/*
14006	 * Tell the lower layers that any TRIM or WRITE transactions that have
14007	 * been delayed for performance reasons should proceed to help alleviate
14008	 * the shortage faster. The race between checking req_* and the softdep
14009	 * mutex (lk) is fine since this is an advisory operation that at most
14010	 * causes deferred work to be done sooner.
14011	 */
14012	ump = VFSTOUFS(mp);
14013	suj_susp = ump->um_softdep->sd_jblocks != NULL &&
14014	    ump->softdep_jblocks->jb_suspended;
14015	if (req_clear_remove || req_clear_inodedeps || suj_susp) {
14016		FREE_LOCK(ump);
14017		softdep_send_speedup(ump, 0, BIO_SPEEDUP_TRIM | BIO_SPEEDUP_WRITE);
14018		ACQUIRE_LOCK(ump);
14019	}
14020
14021	/*
14022	 * If we are suspended, it may be because of our using
14023	 * too many inodedeps, so help clear them out.
14024	 */
14025	if (suj_susp)
14026		clear_inodedeps(mp);
14027
14028	/*
14029	 * General requests for cleanup of backed up dependencies
14030	 */
14031	ACQUIRE_GBLLOCK(&lk);
14032	if (req_clear_inodedeps) {
14033		req_clear_inodedeps -= 1;
14034		FREE_GBLLOCK(&lk);
14035		clear_inodedeps(mp);
14036		ACQUIRE_GBLLOCK(&lk);
14037		wakeup(&proc_waiting);
14038	}
14039	if (req_clear_remove) {
14040		req_clear_remove -= 1;
14041		FREE_GBLLOCK(&lk);
14042		clear_remove(mp);
14043		ACQUIRE_GBLLOCK(&lk);
14044		wakeup(&proc_waiting);
14045	}
14046	FREE_GBLLOCK(&lk);
14047}
14048
14049/*
14050 * Flush out a directory with at least one removal dependency in an effort to
14051 * reduce the number of dirrem, freefile, and freeblks dependency structures.
14052 */
14053static void
14054clear_remove(struct mount *mp)
14055{
14056	struct pagedep_hashhead *pagedephd;
14057	struct pagedep *pagedep;
14058	struct ufsmount *ump;
14059	struct vnode *vp;
14060	struct bufobj *bo;
14061	int error, cnt;
14062	ino_t ino;
14063
14064	ump = VFSTOUFS(mp);
14065	LOCK_OWNED(ump);
14066
14067	for (cnt = 0; cnt <= ump->pagedep_hash_size; cnt++) {
14068		pagedephd = &ump->pagedep_hashtbl[ump->pagedep_nextclean++];
14069		if (ump->pagedep_nextclean > ump->pagedep_hash_size)
14070			ump->pagedep_nextclean = 0;
14071		LIST_FOREACH(pagedep, pagedephd, pd_hash) {
14072			if (LIST_EMPTY(&pagedep->pd_dirremhd))
14073				continue;
14074			ino = pagedep->pd_ino;
14075			if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
14076				continue;
14077			FREE_LOCK(ump);
14078
14079			/*
14080			 * Let unmount clear deps
14081			 */
14082			error = vfs_busy(mp, MBF_NOWAIT);
14083			if (error != 0)
14084				goto finish_write;
14085			error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
14086			     FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP);
14087			vfs_unbusy(mp);
14088			if (error != 0) {
14089				softdep_error("clear_remove: vget", error);
14090				goto finish_write;
14091			}
14092			MPASS(VTOI(vp)->i_mode != 0);
14093			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
14094				softdep_error("clear_remove: fsync", error);
14095			bo = &vp->v_bufobj;
14096			BO_LOCK(bo);
14097			drain_output(vp);
14098			BO_UNLOCK(bo);
14099			vput(vp);
14100		finish_write:
14101			vn_finished_write(mp);
14102			ACQUIRE_LOCK(ump);
14103			return;
14104		}
14105	}
14106}
14107
14108/*
14109 * Clear out a block of dirty inodes in an effort to reduce
14110 * the number of inodedep dependency structures.
14111 */
14112static void
14113clear_inodedeps(struct mount *mp)
14114{
14115	struct inodedep_hashhead *inodedephd;
14116	struct inodedep *inodedep;
14117	struct ufsmount *ump;
14118	struct vnode *vp;
14119	struct fs *fs;
14120	int error, cnt;
14121	ino_t firstino, lastino, ino;
14122
14123	ump = VFSTOUFS(mp);
14124	fs = ump->um_fs;
14125	LOCK_OWNED(ump);
14126	/*
14127	 * Pick a random inode dependency to be cleared.
14128	 * We will then gather up all the inodes in its block
14129	 * that have dependencies and flush them out.
14130	 */
14131	for (cnt = 0; cnt <= ump->inodedep_hash_size; cnt++) {
14132		inodedephd = &ump->inodedep_hashtbl[ump->inodedep_nextclean++];
14133		if (ump->inodedep_nextclean > ump->inodedep_hash_size)
14134			ump->inodedep_nextclean = 0;
14135		if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
14136			break;
14137	}
14138	if (inodedep == NULL)
14139		return;
14140	/*
14141	 * Find the last inode in the block with dependencies.
14142	 */
14143	firstino = rounddown2(inodedep->id_ino, INOPB(fs));
14144	for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
14145		if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
14146			break;
14147	/*
14148	 * Asynchronously push all but the last inode with dependencies.
14149	 * Synchronously push the last inode with dependencies to ensure
14150	 * that the inode block gets written to free up the inodedeps.
14151	 */
14152	for (ino = firstino; ino <= lastino; ino++) {
14153		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
14154			continue;
14155		if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
14156			continue;
14157		FREE_LOCK(ump);
14158		error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
14159		if (error != 0) {
14160			vn_finished_write(mp);
14161			ACQUIRE_LOCK(ump);
14162			return;
14163		}
14164		if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
14165		    FFSV_FORCEINSMQ | FFSV_FORCEINODEDEP)) != 0) {
14166			softdep_error("clear_inodedeps: vget", error);
14167			vfs_unbusy(mp);
14168			vn_finished_write(mp);
14169			ACQUIRE_LOCK(ump);
14170			return;
14171		}
14172		vfs_unbusy(mp);
14173		if (VTOI(vp)->i_mode == 0) {
14174			vgone(vp);
14175		} else if (ino == lastino) {
14176			do {
14177				error = ffs_syncvnode(vp, MNT_WAIT, 0);
14178			} while (error == ERELOOKUP);
14179			if (error != 0)
14180				softdep_error("clear_inodedeps: fsync1", error);
14181		} else {
14182			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
14183				softdep_error("clear_inodedeps: fsync2", error);
14184			BO_LOCK(&vp->v_bufobj);
14185			drain_output(vp);
14186			BO_UNLOCK(&vp->v_bufobj);
14187		}
14188		vput(vp);
14189		vn_finished_write(mp);
14190		ACQUIRE_LOCK(ump);
14191	}
14192}
14193
14194void
14195softdep_buf_append(struct buf *bp, struct workhead *wkhd)
14196{
14197	struct worklist *wk;
14198	struct ufsmount *ump;
14199
14200	if ((wk = LIST_FIRST(wkhd)) == NULL)
14201		return;
14202	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
14203	    ("softdep_buf_append called on non-softdep filesystem"));
14204	ump = VFSTOUFS(wk->wk_mp);
14205	ACQUIRE_LOCK(ump);
14206	while ((wk = LIST_FIRST(wkhd)) != NULL) {
14207		WORKLIST_REMOVE(wk);
14208		WORKLIST_INSERT(&bp->b_dep, wk);
14209	}
14210	FREE_LOCK(ump);
14211
14212}
14213
14214void
14215softdep_inode_append(
14216	struct inode *ip,
14217	struct ucred *cred,
14218	struct workhead *wkhd)
14219{
14220	struct buf *bp;
14221	struct fs *fs;
14222	struct ufsmount *ump;
14223	int error;
14224
14225	ump = ITOUMP(ip);
14226	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
14227	    ("softdep_inode_append called on non-softdep filesystem"));
14228	fs = ump->um_fs;
14229	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
14230	    (int)fs->fs_bsize, cred, &bp);
14231	if (error) {
14232		bqrelse(bp);
14233		softdep_freework(wkhd);
14234		return;
14235	}
14236	softdep_buf_append(bp, wkhd);
14237	bqrelse(bp);
14238}
14239
14240void
14241softdep_freework(struct workhead *wkhd)
14242{
14243	struct worklist *wk;
14244	struct ufsmount *ump;
14245
14246	if ((wk = LIST_FIRST(wkhd)) == NULL)
14247		return;
14248	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
14249	    ("softdep_freework called on non-softdep filesystem"));
14250	ump = VFSTOUFS(wk->wk_mp);
14251	ACQUIRE_LOCK(ump);
14252	handle_jwork(wkhd);
14253	FREE_LOCK(ump);
14254}
14255
14256static struct ufsmount *
14257softdep_bp_to_mp(struct buf *bp)
14258{
14259	struct mount *mp;
14260	struct vnode *vp;
14261
14262	if (LIST_EMPTY(&bp->b_dep))
14263		return (NULL);
14264	vp = bp->b_vp;
14265	KASSERT(vp != NULL,
14266	    ("%s, buffer with dependencies lacks vnode", __func__));
14267
14268	/*
14269	 * The ump mount point is stable after we get a correct
14270	 * pointer, since bp is locked and this prevents unmount from
14271	 * proceeding.  But to get to it, we cannot dereference bp->b_dep
14272	 * head wk_mp, because we do not yet own SU ump lock and
14273	 * workitem might be freed while dereferenced.
14274	 */
14275retry:
14276	switch (vp->v_type) {
14277	case VCHR:
14278		VI_LOCK(vp);
14279		mp = vp->v_type == VCHR ? vp->v_rdev->si_mountpt : NULL;
14280		VI_UNLOCK(vp);
14281		if (mp == NULL)
14282			goto retry;
14283		break;
14284	case VREG:
14285	case VDIR:
14286	case VLNK:
14287	case VFIFO:
14288	case VSOCK:
14289		mp = vp->v_mount;
14290		break;
14291	case VBLK:
14292		vn_printf(vp, "softdep_bp_to_mp: unexpected block device\n");
14293		/* FALLTHROUGH */
14294	case VNON:
14295	case VBAD:
14296	case VMARKER:
14297		mp = NULL;
14298		break;
14299	default:
14300		vn_printf(vp, "unknown vnode type");
14301		mp = NULL;
14302		break;
14303	}
14304	return (VFSTOUFS(mp));
14305}
14306
14307/*
14308 * Function to determine if the buffer has outstanding dependencies
14309 * that will cause a roll-back if the buffer is written. If wantcount
14310 * is set, return number of dependencies, otherwise just yes or no.
14311 */
14312static int
14313softdep_count_dependencies(struct buf *bp, int wantcount)
14314{
14315	struct worklist *wk;
14316	struct ufsmount *ump;
14317	struct bmsafemap *bmsafemap;
14318	struct freework *freework;
14319	struct inodedep *inodedep;
14320	struct indirdep *indirdep;
14321	struct freeblks *freeblks;
14322	struct allocindir *aip;
14323	struct pagedep *pagedep;
14324	struct dirrem *dirrem;
14325	struct newblk *newblk;
14326	struct mkdir *mkdir;
14327	struct diradd *dap;
14328	int i, retval;
14329
14330	ump = softdep_bp_to_mp(bp);
14331	if (ump == NULL)
14332		return (0);
14333	retval = 0;
14334	ACQUIRE_LOCK(ump);
14335	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
14336		switch (wk->wk_type) {
14337		case D_INODEDEP:
14338			inodedep = WK_INODEDEP(wk);
14339			if ((inodedep->id_state & DEPCOMPLETE) == 0) {
14340				/* bitmap allocation dependency */
14341				retval += 1;
14342				if (!wantcount)
14343					goto out;
14344			}
14345			if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
14346				/* direct block pointer dependency */
14347				retval += 1;
14348				if (!wantcount)
14349					goto out;
14350			}
14351			if (TAILQ_FIRST(&inodedep->id_extupdt)) {
14352				/* direct block pointer dependency */
14353				retval += 1;
14354				if (!wantcount)
14355					goto out;
14356			}
14357			if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
14358				/* Add reference dependency. */
14359				retval += 1;
14360				if (!wantcount)
14361					goto out;
14362			}
14363			continue;
14364
14365		case D_INDIRDEP:
14366			indirdep = WK_INDIRDEP(wk);
14367
14368			TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
14369				/* indirect truncation dependency */
14370				retval += 1;
14371				if (!wantcount)
14372					goto out;
14373			}
14374
14375			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
14376				/* indirect block pointer dependency */
14377				retval += 1;
14378				if (!wantcount)
14379					goto out;
14380			}
14381			continue;
14382
14383		case D_PAGEDEP:
14384			pagedep = WK_PAGEDEP(wk);
14385			LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
14386				if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
14387					/* Journal remove ref dependency. */
14388					retval += 1;
14389					if (!wantcount)
14390						goto out;
14391				}
14392			}
14393			for (i = 0; i < DAHASHSZ; i++) {
14394				LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
14395					/* directory entry dependency */
14396					retval += 1;
14397					if (!wantcount)
14398						goto out;
14399				}
14400			}
14401			continue;
14402
14403		case D_BMSAFEMAP:
14404			bmsafemap = WK_BMSAFEMAP(wk);
14405			if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
14406				/* Add reference dependency. */
14407				retval += 1;
14408				if (!wantcount)
14409					goto out;
14410			}
14411			if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
14412				/* Allocate block dependency. */
14413				retval += 1;
14414				if (!wantcount)
14415					goto out;
14416			}
14417			continue;
14418
14419		case D_FREEBLKS:
14420			freeblks = WK_FREEBLKS(wk);
14421			if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
14422				/* Freeblk journal dependency. */
14423				retval += 1;
14424				if (!wantcount)
14425					goto out;
14426			}
14427			continue;
14428
14429		case D_ALLOCDIRECT:
14430		case D_ALLOCINDIR:
14431			newblk = WK_NEWBLK(wk);
14432			if (newblk->nb_jnewblk) {
14433				/* Journal allocate dependency. */
14434				retval += 1;
14435				if (!wantcount)
14436					goto out;
14437			}
14438			continue;
14439
14440		case D_MKDIR:
14441			mkdir = WK_MKDIR(wk);
14442			if (mkdir->md_jaddref) {
14443				/* Journal reference dependency. */
14444				retval += 1;
14445				if (!wantcount)
14446					goto out;
14447			}
14448			continue;
14449
14450		case D_FREEWORK:
14451		case D_FREEDEP:
14452		case D_JSEGDEP:
14453		case D_JSEG:
14454		case D_SBDEP:
14455			/* never a dependency on these blocks */
14456			continue;
14457
14458		default:
14459			panic("softdep_count_dependencies: Unexpected type %s",
14460			    TYPENAME(wk->wk_type));
14461			/* NOTREACHED */
14462		}
14463	}
14464out:
14465	FREE_LOCK(ump);
14466	return (retval);
14467}
14468
14469/*
14470 * Acquire exclusive access to a buffer.
14471 * Must be called with a locked mtx parameter.
14472 * Return acquired buffer or NULL on failure.
14473 */
14474static struct buf *
14475getdirtybuf(struct buf *bp,
14476	struct rwlock *lock,
14477	int waitfor)
14478{
14479	int error;
14480
14481	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
14482		if (waitfor != MNT_WAIT)
14483			return (NULL);
14484		error = BUF_LOCK(bp,
14485		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock);
14486		/*
14487		 * Even if we successfully acquire bp here, we have dropped
14488		 * lock, which may violates our guarantee.
14489		 */
14490		if (error == 0)
14491			BUF_UNLOCK(bp);
14492		else if (error != ENOLCK)
14493			panic("getdirtybuf: inconsistent lock: %d", error);
14494		rw_wlock(lock);
14495		return (NULL);
14496	}
14497	if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
14498		if (lock != BO_LOCKPTR(bp->b_bufobj) && waitfor == MNT_WAIT) {
14499			rw_wunlock(lock);
14500			BO_LOCK(bp->b_bufobj);
14501			BUF_UNLOCK(bp);
14502			if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
14503				bp->b_vflags |= BV_BKGRDWAIT;
14504				msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj),
14505				       PRIBIO | PDROP, "getbuf", 0);
14506			} else
14507				BO_UNLOCK(bp->b_bufobj);
14508			rw_wlock(lock);
14509			return (NULL);
14510		}
14511		BUF_UNLOCK(bp);
14512		if (waitfor != MNT_WAIT)
14513			return (NULL);
14514#ifdef DEBUG_VFS_LOCKS
14515		if (bp->b_vp->v_type != VCHR)
14516			ASSERT_BO_WLOCKED(bp->b_bufobj);
14517#endif
14518		bp->b_vflags |= BV_BKGRDWAIT;
14519		rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0);
14520		return (NULL);
14521	}
14522	if ((bp->b_flags & B_DELWRI) == 0) {
14523		BUF_UNLOCK(bp);
14524		return (NULL);
14525	}
14526	bremfree(bp);
14527	return (bp);
14528}
14529
14530/*
14531 * Check if it is safe to suspend the file system now.  On entry,
14532 * the vnode interlock for devvp should be held.  Return 0 with
14533 * the mount interlock held if the file system can be suspended now,
14534 * otherwise return EAGAIN with the mount interlock held.
14535 */
14536int
14537softdep_check_suspend(struct mount *mp,
14538		      struct vnode *devvp,
14539		      int softdep_depcnt,
14540		      int softdep_accdepcnt,
14541		      int secondary_writes,
14542		      int secondary_accwrites)
14543{
14544	struct buf *bp;
14545	struct bufobj *bo;
14546	struct ufsmount *ump;
14547	struct inodedep *inodedep;
14548	struct indirdep *indirdep;
14549	struct worklist *wk, *nextwk;
14550	int error, unlinked;
14551
14552	bo = &devvp->v_bufobj;
14553	ASSERT_BO_WLOCKED(bo);
14554
14555	/*
14556	 * If we are not running with soft updates, then we need only
14557	 * deal with secondary writes as we try to suspend.
14558	 */
14559	if (MOUNTEDSOFTDEP(mp) == 0) {
14560		MNT_ILOCK(mp);
14561		while (mp->mnt_secondary_writes != 0) {
14562			BO_UNLOCK(bo);
14563			msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
14564			    (PUSER - 1) | PDROP, "secwr", 0);
14565			BO_LOCK(bo);
14566			MNT_ILOCK(mp);
14567		}
14568
14569		/*
14570		 * Reasons for needing more work before suspend:
14571		 * - Dirty buffers on devvp.
14572		 * - Secondary writes occurred after start of vnode sync loop
14573		 */
14574		error = 0;
14575		if (bo->bo_numoutput > 0 ||
14576		    bo->bo_dirty.bv_cnt > 0 ||
14577		    secondary_writes != 0 ||
14578		    mp->mnt_secondary_writes != 0 ||
14579		    secondary_accwrites != mp->mnt_secondary_accwrites)
14580			error = EAGAIN;
14581		BO_UNLOCK(bo);
14582		return (error);
14583	}
14584
14585	/*
14586	 * If we are running with soft updates, then we need to coordinate
14587	 * with them as we try to suspend.
14588	 */
14589	ump = VFSTOUFS(mp);
14590	for (;;) {
14591		if (!TRY_ACQUIRE_LOCK(ump)) {
14592			BO_UNLOCK(bo);
14593			ACQUIRE_LOCK(ump);
14594			FREE_LOCK(ump);
14595			BO_LOCK(bo);
14596			continue;
14597		}
14598		MNT_ILOCK(mp);
14599		if (mp->mnt_secondary_writes != 0) {
14600			FREE_LOCK(ump);
14601			BO_UNLOCK(bo);
14602			msleep(&mp->mnt_secondary_writes,
14603			       MNT_MTX(mp),
14604			       (PUSER - 1) | PDROP, "secwr", 0);
14605			BO_LOCK(bo);
14606			continue;
14607		}
14608		break;
14609	}
14610
14611	unlinked = 0;
14612	if (MOUNTEDSUJ(mp)) {
14613		for (inodedep = TAILQ_FIRST(&ump->softdep_unlinked);
14614		    inodedep != NULL;
14615		    inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
14616			if ((inodedep->id_state & (UNLINKED | UNLINKLINKS |
14617			    UNLINKONLIST)) != (UNLINKED | UNLINKLINKS |
14618			    UNLINKONLIST) ||
14619			    !check_inodedep_free(inodedep))
14620				continue;
14621			unlinked++;
14622		}
14623	}
14624
14625	/*
14626	 * XXX Check for orphaned indirdep dependency structures.
14627	 *
14628	 * During forcible unmount after a disk failure there is a
14629	 * bug that causes one or more indirdep dependency structures
14630	 * to fail to be deallocated. We check for them here and clean
14631	 * them up so that the unmount can succeed.
14632	 */
14633	if ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0 && ump->softdep_deps > 0 &&
14634	    ump->softdep_deps == ump->softdep_curdeps[D_INDIRDEP]) {
14635		LIST_FOREACH_SAFE(wk, &ump->softdep_alldeps[D_INDIRDEP],
14636		    wk_all, nextwk) {
14637			indirdep = WK_INDIRDEP(wk);
14638			if ((indirdep->ir_state & (GOINGAWAY | DEPCOMPLETE)) !=
14639			    (GOINGAWAY | DEPCOMPLETE) ||
14640			    !TAILQ_EMPTY(&indirdep->ir_trunc) ||
14641			    !LIST_EMPTY(&indirdep->ir_completehd) ||
14642			    !LIST_EMPTY(&indirdep->ir_writehd) ||
14643			    !LIST_EMPTY(&indirdep->ir_donehd) ||
14644			    !LIST_EMPTY(&indirdep->ir_deplisthd) ||
14645			    indirdep->ir_saveddata != NULL ||
14646			    indirdep->ir_savebp == NULL) {
14647				printf("%s: skipping orphaned indirdep %p\n",
14648				    __FUNCTION__, indirdep);
14649				continue;
14650			}
14651			printf("%s: freeing orphaned indirdep %p\n",
14652			    __FUNCTION__, indirdep);
14653			bp = indirdep->ir_savebp;
14654			indirdep->ir_savebp = NULL;
14655			free_indirdep(indirdep);
14656			FREE_LOCK(ump);
14657			brelse(bp);
14658			while (!TRY_ACQUIRE_LOCK(ump)) {
14659				BO_UNLOCK(bo);
14660				ACQUIRE_LOCK(ump);
14661				FREE_LOCK(ump);
14662				BO_LOCK(bo);
14663			}
14664		}
14665	}
14666
14667	/*
14668	 * Reasons for needing more work before suspend:
14669	 * - Dirty buffers on devvp.
14670	 * - Dependency structures still exist
14671	 * - Softdep activity occurred after start of vnode sync loop
14672	 * - Secondary writes occurred after start of vnode sync loop
14673	 */
14674	error = 0;
14675	if (bo->bo_numoutput > 0 ||
14676	    bo->bo_dirty.bv_cnt > 0 ||
14677	    softdep_depcnt != unlinked ||
14678	    ump->softdep_deps != unlinked ||
14679	    softdep_accdepcnt != ump->softdep_accdeps ||
14680	    secondary_writes != 0 ||
14681	    mp->mnt_secondary_writes != 0 ||
14682	    secondary_accwrites != mp->mnt_secondary_accwrites)
14683		error = EAGAIN;
14684	FREE_LOCK(ump);
14685	BO_UNLOCK(bo);
14686	return (error);
14687}
14688
14689/*
14690 * Get the number of dependency structures for the file system, both
14691 * the current number and the total number allocated.  These will
14692 * later be used to detect that softdep processing has occurred.
14693 */
14694void
14695softdep_get_depcounts(struct mount *mp,
14696		      int *softdep_depsp,
14697		      int *softdep_accdepsp)
14698{
14699	struct ufsmount *ump;
14700
14701	if (MOUNTEDSOFTDEP(mp) == 0) {
14702		*softdep_depsp = 0;
14703		*softdep_accdepsp = 0;
14704		return;
14705	}
14706	ump = VFSTOUFS(mp);
14707	ACQUIRE_LOCK(ump);
14708	*softdep_depsp = ump->softdep_deps;
14709	*softdep_accdepsp = ump->softdep_accdeps;
14710	FREE_LOCK(ump);
14711}
14712
14713/*
14714 * Wait for pending output on a vnode to complete.
14715 */
14716static void
14717drain_output(struct vnode *vp)
14718{
14719
14720	ASSERT_VOP_LOCKED(vp, "drain_output");
14721	(void)bufobj_wwait(&vp->v_bufobj, 0, 0);
14722}
14723
14724/*
14725 * Called whenever a buffer that is being invalidated or reallocated
14726 * contains dependencies. This should only happen if an I/O error has
14727 * occurred. The routine is called with the buffer locked.
14728 */
14729static void
14730softdep_deallocate_dependencies(struct buf *bp)
14731{
14732
14733	if ((bp->b_ioflags & BIO_ERROR) == 0)
14734		panic("softdep_deallocate_dependencies: dangling deps");
14735	if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL)
14736		softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
14737	else
14738		printf("softdep_deallocate_dependencies: "
14739		    "got error %d while accessing filesystem\n", bp->b_error);
14740	if (bp->b_error != ENXIO)
14741		panic("softdep_deallocate_dependencies: unrecovered I/O error");
14742}
14743
14744/*
14745 * Function to handle asynchronous write errors in the filesystem.
14746 */
14747static void
14748softdep_error(char *func, int error)
14749{
14750
14751	/* XXX should do something better! */
14752	printf("%s: got error %d while accessing filesystem\n", func, error);
14753}
14754
14755#ifdef DDB
14756
14757/* exported to ffs_vfsops.c */
14758extern void db_print_ffs(struct ufsmount *ump);
14759void
14760db_print_ffs(struct ufsmount *ump)
14761{
14762	db_printf("mp %p (%s) devvp %p\n", ump->um_mountp,
14763	    ump->um_mountp->mnt_stat.f_mntonname, ump->um_devvp);
14764	db_printf("    fs %p ", ump->um_fs);
14765
14766	if (ump->um_softdep != NULL) {
14767		db_printf("su_wl %d su_deps %d su_req %d\n",
14768		    ump->softdep_on_worklist, ump->softdep_deps,
14769		    ump->softdep_req);
14770	} else {
14771		db_printf("su disabled\n");
14772	}
14773}
14774
14775static void
14776worklist_print(struct worklist *wk, int verbose)
14777{
14778
14779	if (!verbose) {
14780		db_printf("%s: %p state 0x%b\n", TYPENAME(wk->wk_type), wk,
14781		    wk->wk_state, PRINT_SOFTDEP_FLAGS);
14782		return;
14783	}
14784	db_printf("worklist: %p type %s state 0x%b next %p\n    ", wk,
14785	    TYPENAME(wk->wk_type), wk->wk_state, PRINT_SOFTDEP_FLAGS,
14786	    LIST_NEXT(wk, wk_list));
14787	db_print_ffs(VFSTOUFS(wk->wk_mp));
14788}
14789
14790static void
14791inodedep_print(struct inodedep *inodedep, int verbose)
14792{
14793
14794	worklist_print(&inodedep->id_list, 0);
14795	db_printf("    fs %p ino %jd inoblk %jd delta %jd nlink %jd\n",
14796	    inodedep->id_fs,
14797	    (intmax_t)inodedep->id_ino,
14798	    (intmax_t)fsbtodb(inodedep->id_fs,
14799	        ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
14800	    (intmax_t)inodedep->id_nlinkdelta,
14801	    (intmax_t)inodedep->id_savednlink);
14802
14803	if (verbose == 0)
14804		return;
14805
14806	db_printf("    bmsafemap %p, mkdiradd %p, inoreflst %p\n",
14807	    inodedep->id_bmsafemap,
14808	    inodedep->id_mkdiradd,
14809	    TAILQ_FIRST(&inodedep->id_inoreflst));
14810	db_printf("    dirremhd %p, pendinghd %p, bufwait %p\n",
14811	    LIST_FIRST(&inodedep->id_dirremhd),
14812	    LIST_FIRST(&inodedep->id_pendinghd),
14813	    LIST_FIRST(&inodedep->id_bufwait));
14814	db_printf("    inowait %p, inoupdt %p, newinoupdt %p\n",
14815	    LIST_FIRST(&inodedep->id_inowait),
14816	    TAILQ_FIRST(&inodedep->id_inoupdt),
14817	    TAILQ_FIRST(&inodedep->id_newinoupdt));
14818	db_printf("    extupdt %p, newextupdt %p, freeblklst %p\n",
14819	    TAILQ_FIRST(&inodedep->id_extupdt),
14820	    TAILQ_FIRST(&inodedep->id_newextupdt),
14821	    TAILQ_FIRST(&inodedep->id_freeblklst));
14822	db_printf("    saveino %p, savedsize %jd, savedextsize %jd\n",
14823	    inodedep->id_savedino1,
14824	    (intmax_t)inodedep->id_savedsize,
14825	    (intmax_t)inodedep->id_savedextsize);
14826}
14827
14828static void
14829newblk_print(struct newblk *nbp)
14830{
14831
14832	worklist_print(&nbp->nb_list, 0);
14833	db_printf("    newblkno %jd\n", (intmax_t)nbp->nb_newblkno);
14834	db_printf("    jnewblk %p, bmsafemap %p, freefrag %p\n",
14835	    &nbp->nb_jnewblk,
14836	    &nbp->nb_bmsafemap,
14837	    &nbp->nb_freefrag);
14838	db_printf("    indirdeps %p, newdirblk %p, jwork %p\n",
14839	    LIST_FIRST(&nbp->nb_indirdeps),
14840	    LIST_FIRST(&nbp->nb_newdirblk),
14841	    LIST_FIRST(&nbp->nb_jwork));
14842}
14843
14844static void
14845allocdirect_print(struct allocdirect *adp)
14846{
14847
14848	newblk_print(&adp->ad_block);
14849	db_printf("    oldblkno %jd, oldsize %ld, newsize %ld\n",
14850	    adp->ad_oldblkno, adp->ad_oldsize, adp->ad_newsize);
14851	db_printf("    offset %d, inodedep %p\n",
14852	    adp->ad_offset, adp->ad_inodedep);
14853}
14854
14855static void
14856allocindir_print(struct allocindir *aip)
14857{
14858
14859	newblk_print(&aip->ai_block);
14860	db_printf("    oldblkno %jd, lbn %jd\n",
14861	    (intmax_t)aip->ai_oldblkno, (intmax_t)aip->ai_lbn);
14862	db_printf("    offset %d, indirdep %p\n",
14863	    aip->ai_offset, aip->ai_indirdep);
14864}
14865
14866static void
14867mkdir_print(struct mkdir *mkdir)
14868{
14869
14870	worklist_print(&mkdir->md_list, 0);
14871	db_printf("    diradd %p, jaddref %p, buf %p\n",
14872		mkdir->md_diradd, mkdir->md_jaddref, mkdir->md_buf);
14873}
14874
14875DB_SHOW_COMMAND(sd_inodedep, db_show_sd_inodedep)
14876{
14877
14878	if (have_addr == 0) {
14879		db_printf("inodedep address required\n");
14880		return;
14881	}
14882	inodedep_print((struct inodedep*)addr, 1);
14883}
14884
14885DB_SHOW_COMMAND(sd_allinodedeps, db_show_sd_allinodedeps)
14886{
14887	struct inodedep_hashhead *inodedephd;
14888	struct inodedep *inodedep;
14889	struct ufsmount *ump;
14890	int cnt;
14891
14892	if (have_addr == 0) {
14893		db_printf("ufsmount address required\n");
14894		return;
14895	}
14896	ump = (struct ufsmount *)addr;
14897	for (cnt = 0; cnt < ump->inodedep_hash_size; cnt++) {
14898		inodedephd = &ump->inodedep_hashtbl[cnt];
14899		LIST_FOREACH(inodedep, inodedephd, id_hash) {
14900			inodedep_print(inodedep, 0);
14901		}
14902	}
14903}
14904
14905DB_SHOW_COMMAND(sd_worklist, db_show_sd_worklist)
14906{
14907
14908	if (have_addr == 0) {
14909		db_printf("worklist address required\n");
14910		return;
14911	}
14912	worklist_print((struct worklist *)addr, 1);
14913}
14914
14915DB_SHOW_COMMAND(sd_workhead, db_show_sd_workhead)
14916{
14917	struct worklist *wk;
14918	struct workhead *wkhd;
14919
14920	if (have_addr == 0) {
14921		db_printf("worklist address required "
14922		    "(for example value in bp->b_dep)\n");
14923		return;
14924	}
14925	/*
14926	 * We often do not have the address of the worklist head but
14927	 * instead a pointer to its first entry (e.g., we have the
14928	 * contents of bp->b_dep rather than &bp->b_dep). But the back
14929	 * pointer of bp->b_dep will point at the head of the list, so
14930	 * we cheat and use that instead. If we are in the middle of
14931	 * a list we will still get the same result, so nothing
14932	 * unexpected will result.
14933	 */
14934	wk = (struct worklist *)addr;
14935	if (wk == NULL)
14936		return;
14937	wkhd = (struct workhead *)wk->wk_list.le_prev;
14938	LIST_FOREACH(wk, wkhd, wk_list) {
14939		switch(wk->wk_type) {
14940		case D_INODEDEP:
14941			inodedep_print(WK_INODEDEP(wk), 0);
14942			continue;
14943		case D_ALLOCDIRECT:
14944			allocdirect_print(WK_ALLOCDIRECT(wk));
14945			continue;
14946		case D_ALLOCINDIR:
14947			allocindir_print(WK_ALLOCINDIR(wk));
14948			continue;
14949		case D_MKDIR:
14950			mkdir_print(WK_MKDIR(wk));
14951			continue;
14952		default:
14953			worklist_print(wk, 0);
14954			continue;
14955		}
14956	}
14957}
14958
14959DB_SHOW_COMMAND(sd_mkdir, db_show_sd_mkdir)
14960{
14961	if (have_addr == 0) {
14962		db_printf("mkdir address required\n");
14963		return;
14964	}
14965	mkdir_print((struct mkdir *)addr);
14966}
14967
14968DB_SHOW_COMMAND(sd_mkdir_list, db_show_sd_mkdir_list)
14969{
14970	struct mkdirlist *mkdirlisthd;
14971	struct mkdir *mkdir;
14972
14973	if (have_addr == 0) {
14974		db_printf("mkdir listhead address required\n");
14975		return;
14976	}
14977	mkdirlisthd = (struct mkdirlist *)addr;
14978	LIST_FOREACH(mkdir, mkdirlisthd, md_mkdirs) {
14979		mkdir_print(mkdir);
14980		if (mkdir->md_diradd != NULL) {
14981			db_printf("    ");
14982			worklist_print(&mkdir->md_diradd->da_list, 0);
14983		}
14984		if (mkdir->md_jaddref != NULL) {
14985			db_printf("    ");
14986			worklist_print(&mkdir->md_jaddref->ja_list, 0);
14987		}
14988	}
14989}
14990
14991DB_SHOW_COMMAND(sd_allocdirect, db_show_sd_allocdirect)
14992{
14993	if (have_addr == 0) {
14994		db_printf("allocdirect address required\n");
14995		return;
14996	}
14997	allocdirect_print((struct allocdirect *)addr);
14998}
14999
15000DB_SHOW_COMMAND(sd_allocindir, db_show_sd_allocindir)
15001{
15002	if (have_addr == 0) {
15003		db_printf("allocindir address required\n");
15004		return;
15005	}
15006	allocindir_print((struct allocindir *)addr);
15007}
15008
15009#endif /* DDB */
15010
15011#endif /* SOFTUPDATES */
15012