ext2_alloc.c revision 330897
1/*-
2 *  modified for Lites 1.1
3 *
4 *  Aug 1995, Godmar Back (gback@cs.utah.edu)
5 *  University of Utah, Department of Computer Science
6 */
7/*-
8 * SPDX-License-Identifier: BSD-3-Clause
9 *
10 * Copyright (c) 1982, 1986, 1989, 1993
11 *	The Regents of the University of California.  All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	@(#)ffs_alloc.c	8.8 (Berkeley) 2/21/94
38 * $FreeBSD: stable/11/sys/fs/ext2fs/ext2_alloc.c 330897 2018-03-14 03:19:51Z eadler $
39 */
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/conf.h>
44#include <sys/vnode.h>
45#include <sys/stat.h>
46#include <sys/mount.h>
47#include <sys/sysctl.h>
48#include <sys/syslog.h>
49#include <sys/buf.h>
50#include <sys/endian.h>
51
52#include <fs/ext2fs/fs.h>
53#include <fs/ext2fs/inode.h>
54#include <fs/ext2fs/ext2_mount.h>
55#include <fs/ext2fs/ext2fs.h>
56#include <fs/ext2fs/ext2_extern.h>
57
58static daddr_t	ext2_alloccg(struct inode *, int, daddr_t, int);
59static daddr_t	ext2_clusteralloc(struct inode *, int, daddr_t, int);
60static u_long	ext2_dirpref(struct inode *);
61static u_long	ext2_hashalloc(struct inode *, int, long, int,
62				daddr_t (*)(struct inode *, int, daddr_t,
63						int));
64static daddr_t	ext2_nodealloccg(struct inode *, int, daddr_t, int);
65static daddr_t  ext2_mapsearch(struct m_ext2fs *, char *, daddr_t);
66
67/*
68 * Allocate a block in the filesystem.
69 *
70 * A preference may be optionally specified. If a preference is given
71 * the following hierarchy is used to allocate a block:
72 *   1) allocate the requested block.
73 *   2) allocate a rotationally optimal block in the same cylinder.
74 *   3) allocate a block in the same cylinder group.
75 *   4) quadradically rehash into other cylinder groups, until an
76 *        available block is located.
77 * If no block preference is given the following hierarchy is used
78 * to allocate a block:
79 *   1) allocate a block in the cylinder group that contains the
80 *        inode for the file.
81 *   2) quadradically rehash into other cylinder groups, until an
82 *        available block is located.
83 */
84int
85ext2_alloc(struct inode *ip, daddr_t lbn, e4fs_daddr_t bpref, int size,
86    struct ucred *cred, e4fs_daddr_t *bnp)
87{
88	struct m_ext2fs *fs;
89	struct ext2mount *ump;
90	int32_t bno;
91	int cg;
92
93	*bnp = 0;
94	fs = ip->i_e2fs;
95	ump = ip->i_ump;
96	mtx_assert(EXT2_MTX(ump), MA_OWNED);
97#ifdef INVARIANTS
98	if ((u_int)size > fs->e2fs_bsize || blkoff(fs, size) != 0) {
99		vn_printf(ip->i_devvp, "bsize = %lu, size = %d, fs = %s\n",
100		    (long unsigned int)fs->e2fs_bsize, size, fs->e2fs_fsmnt);
101		panic("ext2_alloc: bad size");
102	}
103	if (cred == NOCRED)
104		panic("ext2_alloc: missing credential");
105#endif		/* INVARIANTS */
106	if (size == fs->e2fs_bsize && fs->e2fs->e2fs_fbcount == 0)
107		goto nospace;
108	if (cred->cr_uid != 0 &&
109	    fs->e2fs->e2fs_fbcount < fs->e2fs->e2fs_rbcount)
110		goto nospace;
111	if (bpref >= fs->e2fs->e2fs_bcount)
112		bpref = 0;
113	if (bpref == 0)
114		cg = ino_to_cg(fs, ip->i_number);
115	else
116		cg = dtog(fs, bpref);
117	bno = (daddr_t)ext2_hashalloc(ip, cg, bpref, fs->e2fs_bsize,
118	    ext2_alloccg);
119	if (bno > 0) {
120		/* set next_alloc fields as done in block_getblk */
121		ip->i_next_alloc_block = lbn;
122		ip->i_next_alloc_goal = bno;
123
124		ip->i_blocks += btodb(fs->e2fs_bsize);
125		ip->i_flag |= IN_CHANGE | IN_UPDATE;
126		*bnp = bno;
127		return (0);
128	}
129nospace:
130	EXT2_UNLOCK(ump);
131	ext2_fserr(fs, cred->cr_uid, "filesystem full");
132	uprintf("\n%s: write failed, filesystem is full\n", fs->e2fs_fsmnt);
133	return (ENOSPC);
134}
135
136/*
137 * Allocate EA's block for inode.
138 */
139daddr_t
140ext2_allocfacl(struct inode *ip)
141{
142	struct m_ext2fs *fs;
143	daddr_t facl;
144
145	fs = ip->i_e2fs;
146
147	EXT2_LOCK(ip->i_ump);
148	facl = ext2_alloccg(ip, ino_to_cg(fs, ip->i_number), 0, fs->e2fs_bsize);
149	if (0 == facl)
150		EXT2_UNLOCK(ip->i_ump);
151
152	return (facl);
153}
154
155/*
156 * Reallocate a sequence of blocks into a contiguous sequence of blocks.
157 *
158 * The vnode and an array of buffer pointers for a range of sequential
159 * logical blocks to be made contiguous is given. The allocator attempts
160 * to find a range of sequential blocks starting as close as possible to
161 * an fs_rotdelay offset from the end of the allocation for the logical
162 * block immediately preceding the current range. If successful, the
163 * physical block numbers in the buffer pointers and in the inode are
164 * changed to reflect the new allocation. If unsuccessful, the allocation
165 * is left unchanged. The success in doing the reallocation is returned.
166 * Note that the error return is not reflected back to the user. Rather
167 * the previous block allocation will be used.
168 */
169
170static SYSCTL_NODE(_vfs, OID_AUTO, ext2fs, CTLFLAG_RW, 0, "EXT2FS filesystem");
171
172static int doasyncfree = 1;
173
174SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0,
175    "Use asychronous writes to update block pointers when freeing blocks");
176
177static int doreallocblks = 0;
178
179SYSCTL_INT(_vfs_ext2fs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
180
181int
182ext2_reallocblks(struct vop_reallocblks_args *ap)
183{
184	struct m_ext2fs *fs;
185	struct inode *ip;
186	struct vnode *vp;
187	struct buf *sbp, *ebp;
188	uint32_t *bap, *sbap, *ebap;
189	struct ext2mount *ump;
190	struct cluster_save *buflist;
191	struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp;
192	e2fs_lbn_t start_lbn, end_lbn;
193	int soff;
194	e2fs_daddr_t newblk, blkno;
195	int i, len, start_lvl, end_lvl, pref, ssize;
196
197	if (doreallocblks == 0)
198		return (ENOSPC);
199
200	vp = ap->a_vp;
201	ip = VTOI(vp);
202	fs = ip->i_e2fs;
203	ump = ip->i_ump;
204
205	if (fs->e2fs_contigsumsize <= 0)
206		return (ENOSPC);
207
208	buflist = ap->a_buflist;
209	len = buflist->bs_nchildren;
210	start_lbn = buflist->bs_children[0]->b_lblkno;
211	end_lbn = start_lbn + len - 1;
212#ifdef INVARIANTS
213	for (i = 1; i < len; i++)
214		if (buflist->bs_children[i]->b_lblkno != start_lbn + i)
215			panic("ext2_reallocblks: non-cluster");
216#endif
217	/*
218	 * If the cluster crosses the boundary for the first indirect
219	 * block, leave space for the indirect block. Indirect blocks
220	 * are initially laid out in a position after the last direct
221	 * block. Block reallocation would usually destroy locality by
222	 * moving the indirect block out of the way to make room for
223	 * data blocks if we didn't compensate here. We should also do
224	 * this for other indirect block boundaries, but it is only
225	 * important for the first one.
226	 */
227	if (start_lbn < NDADDR && end_lbn >= NDADDR)
228		return (ENOSPC);
229	/*
230	 * If the latest allocation is in a new cylinder group, assume that
231	 * the filesystem has decided to move and do not force it back to
232	 * the previous cylinder group.
233	 */
234	if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) !=
235	    dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno)))
236		return (ENOSPC);
237	if (ext2_getlbns(vp, start_lbn, start_ap, &start_lvl) ||
238	    ext2_getlbns(vp, end_lbn, end_ap, &end_lvl))
239		return (ENOSPC);
240	/*
241	 * Get the starting offset and block map for the first block.
242	 */
243	if (start_lvl == 0) {
244		sbap = &ip->i_db[0];
245		soff = start_lbn;
246	} else {
247		idp = &start_ap[start_lvl - 1];
248		if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &sbp)) {
249			brelse(sbp);
250			return (ENOSPC);
251		}
252		sbap = (u_int *)sbp->b_data;
253		soff = idp->in_off;
254	}
255	/*
256	 * If the block range spans two block maps, get the second map.
257	 */
258	ebap = NULL;
259	if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) {
260		ssize = len;
261	} else {
262#ifdef INVARIANTS
263		if (start_ap[start_lvl - 1].in_lbn == idp->in_lbn)
264			panic("ext2_reallocblks: start == end");
265#endif
266		ssize = len - (idp->in_off + 1);
267		if (bread(vp, idp->in_lbn, (int)fs->e2fs_bsize, NOCRED, &ebp))
268			goto fail;
269		ebap = (u_int *)ebp->b_data;
270	}
271	/*
272	 * Find the preferred location for the cluster.
273	 */
274	EXT2_LOCK(ump);
275	pref = ext2_blkpref(ip, start_lbn, soff, sbap, 0);
276	/*
277	 * Search the block map looking for an allocation of the desired size.
278	 */
279	if ((newblk = (e2fs_daddr_t)ext2_hashalloc(ip, dtog(fs, pref), pref,
280	    len, ext2_clusteralloc)) == 0) {
281		EXT2_UNLOCK(ump);
282		goto fail;
283	}
284	/*
285	 * We have found a new contiguous block.
286	 *
287	 * First we have to replace the old block pointers with the new
288	 * block pointers in the inode and indirect blocks associated
289	 * with the file.
290	 */
291#ifdef DEBUG
292	printf("realloc: ino %ju, lbns %jd-%jd\n\told:",
293	    (uintmax_t)ip->i_number, (intmax_t)start_lbn, (intmax_t)end_lbn);
294#endif	/* DEBUG */
295	blkno = newblk;
296	for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
297		if (i == ssize) {
298			bap = ebap;
299			soff = -i;
300		}
301#ifdef INVARIANTS
302		if (buflist->bs_children[i]->b_blkno != fsbtodb(fs, *bap))
303			panic("ext2_reallocblks: alloc mismatch");
304#endif
305#ifdef DEBUG
306		printf(" %d,", *bap);
307#endif	/* DEBUG */
308		*bap++ = blkno;
309	}
310	/*
311	 * Next we must write out the modified inode and indirect blocks.
312	 * For strict correctness, the writes should be synchronous since
313	 * the old block values may have been written to disk. In practise
314	 * they are almost never written, but if we are concerned about
315	 * strict correctness, the `doasyncfree' flag should be set to zero.
316	 *
317	 * The test on `doasyncfree' should be changed to test a flag
318	 * that shows whether the associated buffers and inodes have
319	 * been written. The flag should be set when the cluster is
320	 * started and cleared whenever the buffer or inode is flushed.
321	 * We can then check below to see if it is set, and do the
322	 * synchronous write only when it has been cleared.
323	 */
324	if (sbap != &ip->i_db[0]) {
325		if (doasyncfree)
326			bdwrite(sbp);
327		else
328			bwrite(sbp);
329	} else {
330		ip->i_flag |= IN_CHANGE | IN_UPDATE;
331		if (!doasyncfree)
332			ext2_update(vp, 1);
333	}
334	if (ssize < len) {
335		if (doasyncfree)
336			bdwrite(ebp);
337		else
338			bwrite(ebp);
339	}
340	/*
341	 * Last, free the old blocks and assign the new blocks to the buffers.
342	 */
343#ifdef DEBUG
344	printf("\n\tnew:");
345#endif	/* DEBUG */
346	for (blkno = newblk, i = 0; i < len; i++, blkno += fs->e2fs_fpb) {
347		ext2_blkfree(ip, dbtofsb(fs, buflist->bs_children[i]->b_blkno),
348		    fs->e2fs_bsize);
349		buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno);
350#ifdef DEBUG
351		printf(" %d,", blkno);
352#endif	/* DEBUG */
353	}
354#ifdef DEBUG
355	printf("\n");
356#endif	/* DEBUG */
357	return (0);
358
359fail:
360	if (ssize < len)
361		brelse(ebp);
362	if (sbap != &ip->i_db[0])
363		brelse(sbp);
364	return (ENOSPC);
365}
366
367/*
368 * Allocate an inode in the filesystem.
369 *
370 */
371int
372ext2_valloc(struct vnode *pvp, int mode, struct ucred *cred, struct vnode **vpp)
373{
374	struct timespec ts;
375	struct inode *pip;
376	struct m_ext2fs *fs;
377	struct inode *ip;
378	struct ext2mount *ump;
379	ino_t ino, ipref;
380	int i, error, cg;
381
382	*vpp = NULL;
383	pip = VTOI(pvp);
384	fs = pip->i_e2fs;
385	ump = pip->i_ump;
386
387	EXT2_LOCK(ump);
388	if (fs->e2fs->e2fs_ficount == 0)
389		goto noinodes;
390	/*
391	 * If it is a directory then obtain a cylinder group based on
392	 * ext2_dirpref else obtain it using ino_to_cg. The preferred inode is
393	 * always the next inode.
394	 */
395	if ((mode & IFMT) == IFDIR) {
396		cg = ext2_dirpref(pip);
397		if (fs->e2fs_contigdirs[cg] < 255)
398			fs->e2fs_contigdirs[cg]++;
399	} else {
400		cg = ino_to_cg(fs, pip->i_number);
401		if (fs->e2fs_contigdirs[cg] > 0)
402			fs->e2fs_contigdirs[cg]--;
403	}
404	ipref = cg * fs->e2fs->e2fs_ipg + 1;
405	ino = (ino_t)ext2_hashalloc(pip, cg, (long)ipref, mode, ext2_nodealloccg);
406
407	if (ino == 0)
408		goto noinodes;
409	error = VFS_VGET(pvp->v_mount, ino, LK_EXCLUSIVE, vpp);
410	if (error) {
411		ext2_vfree(pvp, ino, mode);
412		return (error);
413	}
414	ip = VTOI(*vpp);
415
416	/*
417	 * The question is whether using VGET was such good idea at all:
418	 * Linux doesn't read the old inode in when it is allocating a
419	 * new one. I will set at least i_size and i_blocks to zero.
420	 */
421	ip->i_flag = 0;
422	ip->i_size = 0;
423	ip->i_blocks = 0;
424	ip->i_mode = 0;
425	ip->i_flags = 0;
426	/* now we want to make sure that the block pointers are zeroed out */
427	for (i = 0; i < NDADDR; i++)
428		ip->i_db[i] = 0;
429	for (i = 0; i < NIADDR; i++)
430		ip->i_ib[i] = 0;
431
432	/*
433	 * Set up a new generation number for this inode.
434	 * Avoid zero values.
435	 */
436	do {
437		ip->i_gen = arc4random();
438	} while (ip->i_gen == 0);
439
440	vfs_timestamp(&ts);
441	ip->i_birthtime = ts.tv_sec;
442	ip->i_birthnsec = ts.tv_nsec;
443
444/*
445printf("ext2_valloc: allocated inode %d\n", ino);
446*/
447	return (0);
448noinodes:
449	EXT2_UNLOCK(ump);
450	ext2_fserr(fs, cred->cr_uid, "out of inodes");
451	uprintf("\n%s: create/symlink failed, no inodes free\n", fs->e2fs_fsmnt);
452	return (ENOSPC);
453}
454
455/*
456 * Find a cylinder to place a directory.
457 *
458 * The policy implemented by this algorithm is to allocate a
459 * directory inode in the same cylinder group as its parent
460 * directory, but also to reserve space for its files inodes
461 * and data. Restrict the number of directories which may be
462 * allocated one after another in the same cylinder group
463 * without intervening allocation of files.
464 *
465 * If we allocate a first level directory then force allocation
466 * in another cylinder group.
467 *
468 */
469static u_long
470ext2_dirpref(struct inode *pip)
471{
472	struct m_ext2fs *fs;
473	int cg, prefcg, cgsize;
474	u_int avgifree, avgbfree, avgndir, curdirsize;
475	u_int minifree, minbfree, maxndir;
476	u_int mincg, minndir;
477	u_int dirsize, maxcontigdirs;
478
479	mtx_assert(EXT2_MTX(pip->i_ump), MA_OWNED);
480	fs = pip->i_e2fs;
481
482	avgifree = fs->e2fs->e2fs_ficount / fs->e2fs_gcount;
483	avgbfree = fs->e2fs->e2fs_fbcount / fs->e2fs_gcount;
484	avgndir = fs->e2fs_total_dir / fs->e2fs_gcount;
485
486	/*
487	 * Force allocation in another cg if creating a first level dir.
488	 */
489	ASSERT_VOP_LOCKED(ITOV(pip), "ext2fs_dirpref");
490	if (ITOV(pip)->v_vflag & VV_ROOT) {
491		prefcg = arc4random() % fs->e2fs_gcount;
492		mincg = prefcg;
493		minndir = fs->e2fs_ipg;
494		for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
495			if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
496			    fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
497			    fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
498				mincg = cg;
499				minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
500			}
501		for (cg = 0; cg < prefcg; cg++)
502			if (fs->e2fs_gd[cg].ext2bgd_ndirs < minndir &&
503			    fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree &&
504			    fs->e2fs_gd[cg].ext2bgd_nbfree >= avgbfree) {
505				mincg = cg;
506				minndir = fs->e2fs_gd[cg].ext2bgd_ndirs;
507			}
508		return (mincg);
509	}
510	/*
511	 * Count various limits which used for
512	 * optimal allocation of a directory inode.
513	 */
514	maxndir = min(avgndir + fs->e2fs_ipg / 16, fs->e2fs_ipg);
515	minifree = avgifree - avgifree / 4;
516	if (minifree < 1)
517		minifree = 1;
518	minbfree = avgbfree - avgbfree / 4;
519	if (minbfree < 1)
520		minbfree = 1;
521	cgsize = fs->e2fs_fsize * fs->e2fs_fpg;
522	dirsize = AVGDIRSIZE;
523	curdirsize = avgndir ? (cgsize - avgbfree * fs->e2fs_bsize) / avgndir : 0;
524	if (dirsize < curdirsize)
525		dirsize = curdirsize;
526	maxcontigdirs = min((avgbfree * fs->e2fs_bsize) / dirsize, 255);
527	maxcontigdirs = min(maxcontigdirs, fs->e2fs_ipg / AFPDIR);
528	if (maxcontigdirs == 0)
529		maxcontigdirs = 1;
530
531	/*
532	 * Limit number of dirs in one cg and reserve space for
533	 * regular files, but only if we have no deficit in
534	 * inodes or space.
535	 */
536	prefcg = ino_to_cg(fs, pip->i_number);
537	for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
538		if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
539		    fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
540		    fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
541			if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
542				return (cg);
543		}
544	for (cg = 0; cg < prefcg; cg++)
545		if (fs->e2fs_gd[cg].ext2bgd_ndirs < maxndir &&
546		    fs->e2fs_gd[cg].ext2bgd_nifree >= minifree &&
547		    fs->e2fs_gd[cg].ext2bgd_nbfree >= minbfree) {
548			if (fs->e2fs_contigdirs[cg] < maxcontigdirs)
549				return (cg);
550		}
551	/*
552	 * This is a backstop when we have deficit in space.
553	 */
554	for (cg = prefcg; cg < fs->e2fs_gcount; cg++)
555		if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
556			return (cg);
557	for (cg = 0; cg < prefcg; cg++)
558		if (fs->e2fs_gd[cg].ext2bgd_nifree >= avgifree)
559			break;
560	return (cg);
561}
562
563/*
564 * Select the desired position for the next block in a file.
565 *
566 * we try to mimic what Remy does in inode_getblk/block_getblk
567 *
568 * we note: blocknr == 0 means that we're about to allocate either
569 * a direct block or a pointer block at the first level of indirection
570 * (In other words, stuff that will go in i_db[] or i_ib[])
571 *
572 * blocknr != 0 means that we're allocating a block that is none
573 * of the above. Then, blocknr tells us the number of the block
574 * that will hold the pointer
575 */
576e4fs_daddr_t
577ext2_blkpref(struct inode *ip, e2fs_lbn_t lbn, int indx, e2fs_daddr_t *bap,
578    e2fs_daddr_t blocknr)
579{
580	int tmp;
581
582	mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
583
584	/*
585	 * If the next block is actually what we thought it is, then set the
586	 * goal to what we thought it should be.
587	 */
588	if (ip->i_next_alloc_block == lbn && ip->i_next_alloc_goal != 0)
589		return ip->i_next_alloc_goal;
590
591	/*
592	 * Now check whether we were provided with an array that basically
593	 * tells us previous blocks to which we want to stay close.
594	 */
595	if (bap)
596		for (tmp = indx - 1; tmp >= 0; tmp--)
597			if (bap[tmp])
598				return bap[tmp];
599
600	/*
601	 * Else lets fall back to the blocknr or, if there is none, follow
602	 * the rule that a block should be allocated near its inode.
603	 */
604	return blocknr ? blocknr :
605	    (e2fs_daddr_t)(ip->i_block_group *
606	    EXT2_BLOCKS_PER_GROUP(ip->i_e2fs)) +
607	    ip->i_e2fs->e2fs->e2fs_first_dblock;
608}
609
610/*
611 * Implement the cylinder overflow algorithm.
612 *
613 * The policy implemented by this algorithm is:
614 *   1) allocate the block in its requested cylinder group.
615 *   2) quadradically rehash on the cylinder group number.
616 *   3) brute force search for a free block.
617 */
618static u_long
619ext2_hashalloc(struct inode *ip, int cg, long pref, int size,
620    daddr_t (*allocator) (struct inode *, int, daddr_t, int))
621{
622	struct m_ext2fs *fs;
623	ino_t result;
624	int i, icg = cg;
625
626	mtx_assert(EXT2_MTX(ip->i_ump), MA_OWNED);
627	fs = ip->i_e2fs;
628	/*
629	 * 1: preferred cylinder group
630	 */
631	result = (*allocator)(ip, cg, pref, size);
632	if (result)
633		return (result);
634	/*
635	 * 2: quadratic rehash
636	 */
637	for (i = 1; i < fs->e2fs_gcount; i *= 2) {
638		cg += i;
639		if (cg >= fs->e2fs_gcount)
640			cg -= fs->e2fs_gcount;
641		result = (*allocator)(ip, cg, 0, size);
642		if (result)
643			return (result);
644	}
645	/*
646	 * 3: brute force search
647	 * Note that we start at i == 2, since 0 was checked initially,
648	 * and 1 is always checked in the quadratic rehash.
649	 */
650	cg = (icg + 2) % fs->e2fs_gcount;
651	for (i = 2; i < fs->e2fs_gcount; i++) {
652		result = (*allocator)(ip, cg, 0, size);
653		if (result)
654			return (result);
655		cg++;
656		if (cg == fs->e2fs_gcount)
657			cg = 0;
658	}
659	return (0);
660}
661
662static unsigned long
663ext2_cg_num_gdb(struct m_ext2fs *fs, int cg)
664{
665	int gd_per_block, metagroup, first, last;
666
667	gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd);
668	metagroup = cg / gd_per_block;
669	first = metagroup * gd_per_block;
670	last = first + gd_per_block - 1;
671
672	if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) ||
673	    metagroup < fs->e2fs->e3fs_first_meta_bg) {
674		if (!ext2_cg_has_sb(fs, cg))
675			return (0);
676		if (EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG))
677			return (fs->e2fs->e3fs_first_meta_bg);
678		return (fs->e2fs_gdbcount);
679	}
680
681	if (cg == first || cg == first + 1 || cg == last)
682		return (1);
683	return (0);
684
685}
686
687static int
688ext2_num_base_meta_blocks(struct m_ext2fs *fs, int cg)
689{
690	int num, gd_per_block;
691
692	gd_per_block = fs->e2fs_bsize / sizeof(struct ext2_gd);
693	num = ext2_cg_has_sb(fs, cg);
694
695	if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_META_BG) ||
696	    cg < fs->e2fs->e3fs_first_meta_bg * gd_per_block) {
697		if (num) {
698			num += ext2_cg_num_gdb(fs, cg);
699			num += fs->e2fs->e2fs_reserved_ngdb;
700		}
701	} else {
702		num += ext2_cg_num_gdb(fs, cg);
703	}
704
705	return (num);
706}
707
708static int
709ext2_get_cg_number(struct m_ext2fs *fs, daddr_t blk)
710{
711	int cg;
712
713	if (fs->e2fs->e2fs_bpg == fs->e2fs_bsize * 8)
714		cg = (blk - fs->e2fs->e2fs_first_dblock) / (fs->e2fs_bsize * 8);
715	else
716		cg = blk - fs->e2fs->e2fs_first_dblock;
717
718	return (cg);
719}
720
721static void
722ext2_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
723{
724	int i;
725
726	if (start_bit >= end_bit)
727		return;
728
729	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
730		setbit(bitmap, i);
731	if (i < end_bit)
732		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
733}
734
735static int
736ext2_cg_block_bitmap_init(struct m_ext2fs *fs, int cg, struct buf *bp)
737{
738	int bit, bit_max, inodes_per_block;
739	uint32_t start, tmp;
740
741	if (!EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM) ||
742	    !(fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_BLOCK_UNINIT))
743		return (0);
744
745	memset(bp->b_data, 0, fs->e2fs_bsize);
746
747	bit_max = ext2_num_base_meta_blocks(fs, cg);
748	if ((bit_max >> 3) >= fs->e2fs_bsize)
749		return (EINVAL);
750
751	for (bit = 0; bit < bit_max; bit++)
752		setbit(bp->b_data, bit);
753
754	start = cg * fs->e2fs->e2fs_bpg + fs->e2fs->e2fs_first_dblock;
755
756	/* Set bits for block and inode bitmaps, and inode table */
757	tmp = fs->e2fs_gd[cg].ext2bgd_b_bitmap;
758	if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) ||
759	    tmp == ext2_get_cg_number(fs, cg))
760		setbit(bp->b_data, tmp - start);
761
762	tmp = fs->e2fs_gd[cg].ext2bgd_i_bitmap;
763	if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) ||
764	    tmp == ext2_get_cg_number(fs, cg))
765		setbit(bp->b_data, tmp - start);
766
767	tmp = fs->e2fs_gd[cg].ext2bgd_i_tables;
768	inodes_per_block = fs->e2fs_bsize/EXT2_INODE_SIZE(fs);
769	while( tmp < fs->e2fs_gd[cg].ext2bgd_i_tables +
770	    fs->e2fs->e2fs_ipg / inodes_per_block ) {
771		if (!EXT2_HAS_INCOMPAT_FEATURE(fs, EXT2F_INCOMPAT_FLEX_BG) ||
772		    tmp == ext2_get_cg_number(fs, cg))
773			setbit(bp->b_data, tmp - start);
774		tmp++;
775	}
776
777	/*
778	 * Also if the number of blocks within the group is less than
779	 * the blocksize * 8 ( which is the size of bitmap ), set rest
780	 * of the block bitmap to 1
781	 */
782	ext2_mark_bitmap_end(fs->e2fs->e2fs_bpg, fs->e2fs_bsize * 8,
783	    bp->b_data);
784
785	/* Clean the flag */
786	fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_BLOCK_UNINIT;
787
788	return (0);
789}
790
791/*
792 * Determine whether a block can be allocated.
793 *
794 * Check to see if a block of the appropriate size is available,
795 * and if it is, allocate it.
796 */
797static daddr_t
798ext2_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
799{
800	struct m_ext2fs *fs;
801	struct buf *bp;
802	struct ext2mount *ump;
803	daddr_t bno, runstart, runlen;
804	int bit, loc, end, error, start;
805	char *bbp;
806	/* XXX ondisk32 */
807	fs = ip->i_e2fs;
808	ump = ip->i_ump;
809	if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0)
810		return (0);
811	EXT2_UNLOCK(ump);
812	error = bread(ip->i_devvp, fsbtodb(fs,
813	    fs->e2fs_gd[cg].ext2bgd_b_bitmap),
814	    (int)fs->e2fs_bsize, NOCRED, &bp);
815	if (error) {
816		brelse(bp);
817		EXT2_LOCK(ump);
818		return (0);
819	}
820	if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) {
821		error = ext2_cg_block_bitmap_init(fs, cg, bp);
822		if (error) {
823			brelse(bp);
824			EXT2_LOCK(ump);
825			return (0);
826		}
827	}
828	if (fs->e2fs_gd[cg].ext2bgd_nbfree == 0) {
829		/*
830		 * Another thread allocated the last block in this
831		 * group while we were waiting for the buffer.
832		 */
833		brelse(bp);
834		EXT2_LOCK(ump);
835		return (0);
836	}
837	bbp = (char *)bp->b_data;
838
839	if (dtog(fs, bpref) != cg)
840		bpref = 0;
841	if (bpref != 0) {
842		bpref = dtogd(fs, bpref);
843		/*
844		 * if the requested block is available, use it
845		 */
846		if (isclr(bbp, bpref)) {
847			bno = bpref;
848			goto gotit;
849		}
850	}
851	/*
852	 * no blocks in the requested cylinder, so take next
853	 * available one in this cylinder group.
854	 * first try to get 8 contigous blocks, then fall back to a single
855	 * block.
856	 */
857	if (bpref)
858		start = dtogd(fs, bpref) / NBBY;
859	else
860		start = 0;
861	end = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
862retry:
863	runlen = 0;
864	runstart = 0;
865	for (loc = start; loc < end; loc++) {
866		if (bbp[loc] == (char)0xff) {
867			runlen = 0;
868			continue;
869		}
870
871		/* Start of a run, find the number of high clear bits. */
872		if (runlen == 0) {
873			bit = fls(bbp[loc]);
874			runlen = NBBY - bit;
875			runstart = loc * NBBY + bit;
876		} else if (bbp[loc] == 0) {
877			/* Continue a run. */
878			runlen += NBBY;
879		} else {
880			/*
881			 * Finish the current run.  If it isn't long
882			 * enough, start a new one.
883			 */
884			bit = ffs(bbp[loc]) - 1;
885			runlen += bit;
886			if (runlen >= 8) {
887				bno = runstart;
888				goto gotit;
889			}
890
891			/* Run was too short, start a new one. */
892			bit = fls(bbp[loc]);
893			runlen = NBBY - bit;
894			runstart = loc * NBBY + bit;
895		}
896
897		/* If the current run is long enough, use it. */
898		if (runlen >= 8) {
899			bno = runstart;
900			goto gotit;
901		}
902	}
903	if (start != 0) {
904		end = start;
905		start = 0;
906		goto retry;
907	}
908	bno = ext2_mapsearch(fs, bbp, bpref);
909	if (bno < 0) {
910		brelse(bp);
911		EXT2_LOCK(ump);
912		return (0);
913	}
914gotit:
915#ifdef INVARIANTS
916	if (isset(bbp, bno)) {
917		printf("ext2fs_alloccgblk: cg=%d bno=%jd fs=%s\n",
918		    cg, (intmax_t)bno, fs->e2fs_fsmnt);
919		panic("ext2fs_alloccg: dup alloc");
920	}
921#endif
922	setbit(bbp, bno);
923	EXT2_LOCK(ump);
924	ext2_clusteracct(fs, bbp, cg, bno, -1);
925	fs->e2fs->e2fs_fbcount--;
926	fs->e2fs_gd[cg].ext2bgd_nbfree--;
927	fs->e2fs_fmod = 1;
928	EXT2_UNLOCK(ump);
929	bdwrite(bp);
930	return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
931}
932
933/*
934 * Determine whether a cluster can be allocated.
935 */
936static daddr_t
937ext2_clusteralloc(struct inode *ip, int cg, daddr_t bpref, int len)
938{
939	struct m_ext2fs *fs;
940	struct ext2mount *ump;
941	struct buf *bp;
942	char *bbp;
943	int bit, error, got, i, loc, run;
944	int32_t *lp;
945	daddr_t bno;
946
947	fs = ip->i_e2fs;
948	ump = ip->i_ump;
949
950	if (fs->e2fs_maxcluster[cg] < len)
951		return (0);
952
953	EXT2_UNLOCK(ump);
954	error = bread(ip->i_devvp,
955	    fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
956	    (int)fs->e2fs_bsize, NOCRED, &bp);
957	if (error)
958		goto fail_lock;
959
960	bbp = (char *)bp->b_data;
961	EXT2_LOCK(ump);
962	/*
963	 * Check to see if a cluster of the needed size (or bigger) is
964	 * available in this cylinder group.
965	 */
966	lp = &fs->e2fs_clustersum[cg].cs_sum[len];
967	for (i = len; i <= fs->e2fs_contigsumsize; i++)
968		if (*lp++ > 0)
969			break;
970	if (i > fs->e2fs_contigsumsize) {
971		/*
972		 * Update the cluster summary information to reflect
973		 * the true maximum-sized cluster so that future cluster
974		 * allocation requests can avoid reading the bitmap only
975		 * to find no cluster.
976		 */
977		lp = &fs->e2fs_clustersum[cg].cs_sum[len - 1];
978		for (i = len - 1; i > 0; i--)
979			if (*lp-- > 0)
980				break;
981		fs->e2fs_maxcluster[cg] = i;
982		goto fail;
983	}
984	EXT2_UNLOCK(ump);
985
986	/* Search the bitmap to find a big enough cluster like in FFS. */
987	if (dtog(fs, bpref) != cg)
988		bpref = 0;
989	if (bpref != 0)
990		bpref = dtogd(fs, bpref);
991	loc = bpref / NBBY;
992	bit = 1 << (bpref % NBBY);
993	for (run = 0, got = bpref; got < fs->e2fs->e2fs_fpg; got++) {
994		if ((bbp[loc] & bit) != 0)
995			run = 0;
996		else {
997			run++;
998			if (run == len)
999				break;
1000		}
1001		if ((got & (NBBY - 1)) != (NBBY - 1))
1002			bit <<= 1;
1003		else {
1004			loc++;
1005			bit = 1;
1006		}
1007	}
1008
1009	if (got >= fs->e2fs->e2fs_fpg)
1010		goto fail_lock;
1011
1012	/* Allocate the cluster that we found. */
1013	for (i = 1; i < len; i++)
1014		if (!isclr(bbp, got - run + i))
1015			panic("ext2_clusteralloc: map mismatch");
1016
1017	bno = got - run + 1;
1018	if (bno >= fs->e2fs->e2fs_fpg)
1019		panic("ext2_clusteralloc: allocated out of group");
1020
1021	EXT2_LOCK(ump);
1022	for (i = 0; i < len; i += fs->e2fs_fpb) {
1023		setbit(bbp, bno + i);
1024		ext2_clusteracct(fs, bbp, cg, bno + i, -1);
1025		fs->e2fs->e2fs_fbcount--;
1026		fs->e2fs_gd[cg].ext2bgd_nbfree--;
1027	}
1028	fs->e2fs_fmod = 1;
1029	EXT2_UNLOCK(ump);
1030
1031	bdwrite(bp);
1032	return (cg * fs->e2fs->e2fs_fpg + fs->e2fs->e2fs_first_dblock + bno);
1033
1034fail_lock:
1035	EXT2_LOCK(ump);
1036fail:
1037	brelse(bp);
1038	return (0);
1039}
1040
1041static int
1042ext2_zero_inode_table(struct inode *ip, int cg)
1043{
1044	struct m_ext2fs *fs;
1045	struct buf *bp;
1046	int i, all_blks, used_blks;
1047
1048	fs = ip->i_e2fs;
1049
1050	if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_ZEROED)
1051		return (0);
1052
1053	all_blks = fs->e2fs->e2fs_inode_size * fs->e2fs->e2fs_ipg /
1054	    fs->e2fs_bsize;
1055
1056	used_blks = howmany(fs->e2fs->e2fs_ipg -
1057	    fs->e2fs_gd[cg].ext4bgd_i_unused,
1058	    fs->e2fs_bsize / EXT2_INODE_SIZE(fs));
1059
1060	for (i = 0; i < all_blks - used_blks; i++) {
1061		bp = getblk(ip->i_devvp, fsbtodb(fs,
1062		    fs->e2fs_gd[cg].ext2bgd_i_tables + used_blks + i),
1063		    fs->e2fs_bsize, 0, 0, 0);
1064		if (!bp)
1065			return (EIO);
1066
1067		vfs_bio_bzero_buf(bp, 0, fs->e2fs_bsize);
1068		bawrite(bp);
1069	}
1070
1071	fs->e2fs_gd[cg].ext4bgd_flags |= EXT2_BG_INODE_ZEROED;
1072
1073	return (0);
1074}
1075
1076/*
1077 * Determine whether an inode can be allocated.
1078 *
1079 * Check to see if an inode is available, and if it is,
1080 * allocate it using tode in the specified cylinder group.
1081 */
1082static daddr_t
1083ext2_nodealloccg(struct inode *ip, int cg, daddr_t ipref, int mode)
1084{
1085	struct m_ext2fs *fs;
1086	struct buf *bp;
1087	struct ext2mount *ump;
1088	int error, start, len;
1089	char *ibp, *loc;
1090
1091	ipref--;	/* to avoid a lot of (ipref -1) */
1092	if (ipref == -1)
1093		ipref = 0;
1094	fs = ip->i_e2fs;
1095	ump = ip->i_ump;
1096	if (fs->e2fs_gd[cg].ext2bgd_nifree == 0)
1097		return (0);
1098	EXT2_UNLOCK(ump);
1099	error = bread(ip->i_devvp, fsbtodb(fs,
1100	    fs->e2fs_gd[cg].ext2bgd_i_bitmap),
1101	    (int)fs->e2fs_bsize, NOCRED, &bp);
1102	if (error) {
1103		brelse(bp);
1104		EXT2_LOCK(ump);
1105		return (0);
1106	}
1107	if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM)) {
1108		if (fs->e2fs_gd[cg].ext4bgd_flags & EXT2_BG_INODE_UNINIT) {
1109			memset(bp->b_data, 0, fs->e2fs_bsize);
1110			fs->e2fs_gd[cg].ext4bgd_flags &= ~EXT2_BG_INODE_UNINIT;
1111		}
1112		error = ext2_zero_inode_table(ip, cg);
1113		if (error) {
1114			brelse(bp);
1115			EXT2_LOCK(ump);
1116			return (0);
1117		}
1118	}
1119	if (fs->e2fs_gd[cg].ext2bgd_nifree == 0) {
1120		/*
1121		 * Another thread allocated the last i-node in this
1122		 * group while we were waiting for the buffer.
1123		 */
1124		brelse(bp);
1125		EXT2_LOCK(ump);
1126		return (0);
1127	}
1128	ibp = (char *)bp->b_data;
1129	if (ipref) {
1130		ipref %= fs->e2fs->e2fs_ipg;
1131		if (isclr(ibp, ipref))
1132			goto gotit;
1133	}
1134	start = ipref / NBBY;
1135	len = howmany(fs->e2fs->e2fs_ipg - ipref, NBBY);
1136	loc = memcchr(&ibp[start], 0xff, len);
1137	if (loc == NULL) {
1138		len = start + 1;
1139		start = 0;
1140		loc = memcchr(&ibp[start], 0xff, len);
1141		if (loc == NULL) {
1142			printf("cg = %d, ipref = %lld, fs = %s\n",
1143			    cg, (long long)ipref, fs->e2fs_fsmnt);
1144			panic("ext2fs_nodealloccg: map corrupted");
1145			/* NOTREACHED */
1146		}
1147	}
1148	ipref = (loc - ibp) * NBBY + ffs(~*loc) - 1;
1149gotit:
1150	setbit(ibp, ipref);
1151	EXT2_LOCK(ump);
1152	fs->e2fs_gd[cg].ext2bgd_nifree--;
1153	if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM))
1154		fs->e2fs_gd[cg].ext4bgd_i_unused--;
1155	fs->e2fs->e2fs_ficount--;
1156	fs->e2fs_fmod = 1;
1157	if ((mode & IFMT) == IFDIR) {
1158		fs->e2fs_gd[cg].ext2bgd_ndirs++;
1159		fs->e2fs_total_dir++;
1160	}
1161	EXT2_UNLOCK(ump);
1162	bdwrite(bp);
1163	return (cg * fs->e2fs->e2fs_ipg + ipref + 1);
1164}
1165
1166/*
1167 * Free a block or fragment.
1168 *
1169 */
1170void
1171ext2_blkfree(struct inode *ip, e4fs_daddr_t bno, long size)
1172{
1173	struct m_ext2fs *fs;
1174	struct buf *bp;
1175	struct ext2mount *ump;
1176	int cg, error;
1177	char *bbp;
1178
1179	fs = ip->i_e2fs;
1180	ump = ip->i_ump;
1181	cg = dtog(fs, bno);
1182	if ((u_int)bno >= fs->e2fs->e2fs_bcount) {
1183		printf("bad block %lld, ino %ju\n", (long long)bno,
1184		    (uintmax_t)ip->i_number);
1185		ext2_fserr(fs, ip->i_uid, "bad block");
1186		return;
1187	}
1188	error = bread(ip->i_devvp,
1189	    fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_b_bitmap),
1190	    (int)fs->e2fs_bsize, NOCRED, &bp);
1191	if (error) {
1192		brelse(bp);
1193		return;
1194	}
1195	bbp = (char *)bp->b_data;
1196	bno = dtogd(fs, bno);
1197	if (isclr(bbp, bno)) {
1198		printf("block = %lld, fs = %s\n",
1199		    (long long)bno, fs->e2fs_fsmnt);
1200		panic("ext2_blkfree: freeing free block");
1201	}
1202	clrbit(bbp, bno);
1203	EXT2_LOCK(ump);
1204	ext2_clusteracct(fs, bbp, cg, bno, 1);
1205	fs->e2fs->e2fs_fbcount++;
1206	fs->e2fs_gd[cg].ext2bgd_nbfree++;
1207	fs->e2fs_fmod = 1;
1208	EXT2_UNLOCK(ump);
1209	bdwrite(bp);
1210}
1211
1212/*
1213 * Free an inode.
1214 *
1215 */
1216int
1217ext2_vfree(struct vnode *pvp, ino_t ino, int mode)
1218{
1219	struct m_ext2fs *fs;
1220	struct inode *pip;
1221	struct buf *bp;
1222	struct ext2mount *ump;
1223	int error, cg;
1224	char *ibp;
1225
1226	pip = VTOI(pvp);
1227	fs = pip->i_e2fs;
1228	ump = pip->i_ump;
1229	if ((u_int)ino > fs->e2fs_ipg * fs->e2fs_gcount)
1230		panic("ext2_vfree: range: devvp = %p, ino = %ju, fs = %s",
1231		    pip->i_devvp, (uintmax_t)ino, fs->e2fs_fsmnt);
1232
1233	cg = ino_to_cg(fs, ino);
1234	error = bread(pip->i_devvp,
1235	    fsbtodb(fs, fs->e2fs_gd[cg].ext2bgd_i_bitmap),
1236	    (int)fs->e2fs_bsize, NOCRED, &bp);
1237	if (error) {
1238		brelse(bp);
1239		return (0);
1240	}
1241	ibp = (char *)bp->b_data;
1242	ino = (ino - 1) % fs->e2fs->e2fs_ipg;
1243	if (isclr(ibp, ino)) {
1244		printf("ino = %llu, fs = %s\n",
1245		    (unsigned long long)ino, fs->e2fs_fsmnt);
1246		if (fs->e2fs_ronly == 0)
1247			panic("ext2_vfree: freeing free inode");
1248	}
1249	clrbit(ibp, ino);
1250	EXT2_LOCK(ump);
1251	fs->e2fs->e2fs_ficount++;
1252	fs->e2fs_gd[cg].ext2bgd_nifree++;
1253	if (EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_GDT_CSUM))
1254		fs->e2fs_gd[cg].ext4bgd_i_unused++;
1255	if ((mode & IFMT) == IFDIR) {
1256		fs->e2fs_gd[cg].ext2bgd_ndirs--;
1257		fs->e2fs_total_dir--;
1258	}
1259	fs->e2fs_fmod = 1;
1260	EXT2_UNLOCK(ump);
1261	bdwrite(bp);
1262	return (0);
1263}
1264
1265/*
1266 * Find a block in the specified cylinder group.
1267 *
1268 * It is a panic if a request is made to find a block if none are
1269 * available.
1270 */
1271static daddr_t
1272ext2_mapsearch(struct m_ext2fs *fs, char *bbp, daddr_t bpref)
1273{
1274	char *loc;
1275	int start, len;
1276
1277	/*
1278	 * find the fragment by searching through the free block
1279	 * map for an appropriate bit pattern
1280	 */
1281	if (bpref)
1282		start = dtogd(fs, bpref) / NBBY;
1283	else
1284		start = 0;
1285	len = howmany(fs->e2fs->e2fs_fpg, NBBY) - start;
1286	loc = memcchr(&bbp[start], 0xff, len);
1287	if (loc == NULL) {
1288		len = start + 1;
1289		start = 0;
1290		loc = memcchr(&bbp[start], 0xff, len);
1291		if (loc == NULL) {
1292			printf("start = %d, len = %d, fs = %s\n",
1293			    start, len, fs->e2fs_fsmnt);
1294			panic("ext2_mapsearch: map corrupted");
1295			/* NOTREACHED */
1296		}
1297	}
1298	return ((loc - bbp) * NBBY + ffs(~*loc) - 1);
1299}
1300
1301/*
1302 * Fserr prints the name of a filesystem with an error diagnostic.
1303 *
1304 * The form of the error message is:
1305 *	fs: error message
1306 */
1307void
1308ext2_fserr(struct m_ext2fs *fs, uid_t uid, char *cp)
1309{
1310
1311	log(LOG_ERR, "uid %u on %s: %s\n", uid, fs->e2fs_fsmnt, cp);
1312}
1313
1314int
1315ext2_cg_has_sb(struct m_ext2fs *fs, int cg)
1316{
1317	int a3, a5, a7;
1318
1319	if (cg == 0)
1320		return (1);
1321
1322	if (EXT2_HAS_COMPAT_FEATURE(fs, EXT2F_COMPAT_SPARSESUPER2)) {
1323		if (cg == fs->e2fs->e4fs_backup_bgs[0] ||
1324		    cg == fs->e2fs->e4fs_backup_bgs[1])
1325			return (1);
1326		return (0);
1327	}
1328
1329	if ((cg <= 1) ||
1330	    !EXT2_HAS_RO_COMPAT_FEATURE(fs, EXT2F_ROCOMPAT_SPARSESUPER))
1331		return (1);
1332
1333	if (!(cg & 1))
1334		return (0);
1335
1336	for (a3 = 3, a5 = 5, a7 = 7;
1337	    a3 <= cg || a5 <= cg || a7 <= cg;
1338	    a3 *= 3, a5 *= 5, a7 *= 7)
1339		if (cg == a3 || cg == a5 || cg == a7)
1340			return (1);
1341	return (0);
1342}
1343