ffs_alloc.c revision 298527
1/*- 2 * Copyright (c) 2002 Networks Associates Technology, Inc. 3 * All rights reserved. 4 * 5 * This software was developed for the FreeBSD Project by Marshall 6 * Kirk McKusick and Network Associates Laboratories, the Security 7 * Research Division of Network Associates, Inc. under DARPA/SPAWAR 8 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS 9 * research program 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * Copyright (c) 1982, 1986, 1989, 1993 33 * The Regents of the University of California. All rights reserved. 34 * 35 * Redistribution and use in source and binary forms, with or without 36 * modification, are permitted provided that the following conditions 37 * are met: 38 * 1. Redistributions of source code must retain the above copyright 39 * notice, this list of conditions and the following disclaimer. 40 * 2. Redistributions in binary form must reproduce the above copyright 41 * notice, this list of conditions and the following disclaimer in the 42 * documentation and/or other materials provided with the distribution. 43 * 4. Neither the name of the University nor the names of its contributors 44 * may be used to endorse or promote products derived from this software 45 * without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 57 * SUCH DAMAGE. 58 * 59 * @(#)ffs_alloc.c 8.18 (Berkeley) 5/26/95 60 */ 61 62#include <sys/cdefs.h> 63__FBSDID("$FreeBSD: stable/10/sys/ufs/ffs/ffs_alloc.c 298527 2016-04-24 03:11:52Z pfg $"); 64 65#include "opt_quota.h" 66 67#include <sys/param.h> 68#include <sys/capsicum.h> 69#include <sys/systm.h> 70#include <sys/bio.h> 71#include <sys/buf.h> 72#include <sys/conf.h> 73#include <sys/fcntl.h> 74#include <sys/file.h> 75#include <sys/filedesc.h> 76#include <sys/priv.h> 77#include <sys/proc.h> 78#include <sys/vnode.h> 79#include <sys/mount.h> 80#include <sys/kernel.h> 81#include <sys/syscallsubr.h> 82#include <sys/sysctl.h> 83#include <sys/syslog.h> 84#include <sys/taskqueue.h> 85 86#include <security/audit/audit.h> 87 88#include <geom/geom.h> 89 90#include <ufs/ufs/dir.h> 91#include <ufs/ufs/extattr.h> 92#include <ufs/ufs/quota.h> 93#include <ufs/ufs/inode.h> 94#include <ufs/ufs/ufs_extern.h> 95#include <ufs/ufs/ufsmount.h> 96 97#include <ufs/ffs/fs.h> 98#include <ufs/ffs/ffs_extern.h> 99#include <ufs/ffs/softdep.h> 100 101typedef ufs2_daddr_t allocfcn_t(struct inode *ip, u_int cg, ufs2_daddr_t bpref, 102 int size, int rsize); 103 104static ufs2_daddr_t ffs_alloccg(struct inode *, u_int, ufs2_daddr_t, int, int); 105static ufs2_daddr_t 106 ffs_alloccgblk(struct inode *, struct buf *, ufs2_daddr_t, int); 107static void ffs_blkfree_cg(struct ufsmount *, struct fs *, 108 struct vnode *, ufs2_daddr_t, long, ino_t, 109 struct workhead *); 110static void ffs_blkfree_trim_completed(struct bio *); 111static void ffs_blkfree_trim_task(void *ctx, int pending __unused); 112#ifdef INVARIANTS 113static int ffs_checkblk(struct inode *, ufs2_daddr_t, long); 114#endif 115static ufs2_daddr_t ffs_clusteralloc(struct inode *, u_int, ufs2_daddr_t, int); 116static ino_t ffs_dirpref(struct inode *); 117static ufs2_daddr_t ffs_fragextend(struct inode *, u_int, ufs2_daddr_t, 118 int, int); 119static ufs2_daddr_t ffs_hashalloc 120 (struct inode *, u_int, ufs2_daddr_t, int, int, allocfcn_t *); 121static ufs2_daddr_t ffs_nodealloccg(struct inode *, u_int, ufs2_daddr_t, int, 122 int); 123static ufs1_daddr_t ffs_mapsearch(struct fs *, struct cg *, ufs2_daddr_t, int); 124static int ffs_reallocblks_ufs1(struct vop_reallocblks_args *); 125static int ffs_reallocblks_ufs2(struct vop_reallocblks_args *); 126 127/* 128 * Allocate a block in the filesystem. 129 * 130 * The size of the requested block is given, which must be some 131 * multiple of fs_fsize and <= fs_bsize. 132 * A preference may be optionally specified. If a preference is given 133 * the following hierarchy is used to allocate a block: 134 * 1) allocate the requested block. 135 * 2) allocate a rotationally optimal block in the same cylinder. 136 * 3) allocate a block in the same cylinder group. 137 * 4) quadradically rehash into other cylinder groups, until an 138 * available block is located. 139 * If no block preference is given the following hierarchy is used 140 * to allocate a block: 141 * 1) allocate a block in the cylinder group that contains the 142 * inode for the file. 143 * 2) quadradically rehash into other cylinder groups, until an 144 * available block is located. 145 */ 146int 147ffs_alloc(ip, lbn, bpref, size, flags, cred, bnp) 148 struct inode *ip; 149 ufs2_daddr_t lbn, bpref; 150 int size, flags; 151 struct ucred *cred; 152 ufs2_daddr_t *bnp; 153{ 154 struct fs *fs; 155 struct ufsmount *ump; 156 ufs2_daddr_t bno; 157 u_int cg, reclaimed; 158 static struct timeval lastfail; 159 static int curfail; 160 int64_t delta; 161#ifdef QUOTA 162 int error; 163#endif 164 165 *bnp = 0; 166 fs = ip->i_fs; 167 ump = ip->i_ump; 168 mtx_assert(UFS_MTX(ump), MA_OWNED); 169#ifdef INVARIANTS 170 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 171 printf("dev = %s, bsize = %ld, size = %d, fs = %s\n", 172 devtoname(ip->i_dev), (long)fs->fs_bsize, size, 173 fs->fs_fsmnt); 174 panic("ffs_alloc: bad size"); 175 } 176 if (cred == NOCRED) 177 panic("ffs_alloc: missing credential"); 178#endif /* INVARIANTS */ 179 reclaimed = 0; 180retry: 181#ifdef QUOTA 182 UFS_UNLOCK(ump); 183 error = chkdq(ip, btodb(size), cred, 0); 184 if (error) 185 return (error); 186 UFS_LOCK(ump); 187#endif 188 if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) 189 goto nospace; 190 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 191 freespace(fs, fs->fs_minfree) - numfrags(fs, size) < 0) 192 goto nospace; 193 if (bpref >= fs->fs_size) 194 bpref = 0; 195 if (bpref == 0) 196 cg = ino_to_cg(fs, ip->i_number); 197 else 198 cg = dtog(fs, bpref); 199 bno = ffs_hashalloc(ip, cg, bpref, size, size, ffs_alloccg); 200 if (bno > 0) { 201 delta = btodb(size); 202 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 203 if (flags & IO_EXT) 204 ip->i_flag |= IN_CHANGE; 205 else 206 ip->i_flag |= IN_CHANGE | IN_UPDATE; 207 *bnp = bno; 208 return (0); 209 } 210nospace: 211#ifdef QUOTA 212 UFS_UNLOCK(ump); 213 /* 214 * Restore user's disk quota because allocation failed. 215 */ 216 (void) chkdq(ip, -btodb(size), cred, FORCE); 217 UFS_LOCK(ump); 218#endif 219 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 220 reclaimed = 1; 221 softdep_request_cleanup(fs, ITOV(ip), cred, FLUSH_BLOCKS_WAIT); 222 goto retry; 223 } 224 UFS_UNLOCK(ump); 225 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 226 ffs_fserr(fs, ip->i_number, "filesystem full"); 227 uprintf("\n%s: write failed, filesystem is full\n", 228 fs->fs_fsmnt); 229 } 230 return (ENOSPC); 231} 232 233/* 234 * Reallocate a fragment to a bigger size 235 * 236 * The number and size of the old block is given, and a preference 237 * and new size is also specified. The allocator attempts to extend 238 * the original block. Failing that, the regular block allocator is 239 * invoked to get an appropriate block. 240 */ 241int 242ffs_realloccg(ip, lbprev, bprev, bpref, osize, nsize, flags, cred, bpp) 243 struct inode *ip; 244 ufs2_daddr_t lbprev; 245 ufs2_daddr_t bprev; 246 ufs2_daddr_t bpref; 247 int osize, nsize, flags; 248 struct ucred *cred; 249 struct buf **bpp; 250{ 251 struct vnode *vp; 252 struct fs *fs; 253 struct buf *bp; 254 struct ufsmount *ump; 255 u_int cg, request, reclaimed; 256 int error, gbflags; 257 ufs2_daddr_t bno; 258 static struct timeval lastfail; 259 static int curfail; 260 int64_t delta; 261 262 vp = ITOV(ip); 263 fs = ip->i_fs; 264 bp = NULL; 265 ump = ip->i_ump; 266 gbflags = (flags & BA_UNMAPPED) != 0 ? GB_UNMAPPED : 0; 267 268 mtx_assert(UFS_MTX(ump), MA_OWNED); 269#ifdef INVARIANTS 270 if (vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 271 panic("ffs_realloccg: allocation on suspended filesystem"); 272 if ((u_int)osize > fs->fs_bsize || fragoff(fs, osize) != 0 || 273 (u_int)nsize > fs->fs_bsize || fragoff(fs, nsize) != 0) { 274 printf( 275 "dev = %s, bsize = %ld, osize = %d, nsize = %d, fs = %s\n", 276 devtoname(ip->i_dev), (long)fs->fs_bsize, osize, 277 nsize, fs->fs_fsmnt); 278 panic("ffs_realloccg: bad size"); 279 } 280 if (cred == NOCRED) 281 panic("ffs_realloccg: missing credential"); 282#endif /* INVARIANTS */ 283 reclaimed = 0; 284retry: 285 if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0) && 286 freespace(fs, fs->fs_minfree) - numfrags(fs, nsize - osize) < 0) { 287 goto nospace; 288 } 289 if (bprev == 0) { 290 printf("dev = %s, bsize = %ld, bprev = %jd, fs = %s\n", 291 devtoname(ip->i_dev), (long)fs->fs_bsize, (intmax_t)bprev, 292 fs->fs_fsmnt); 293 panic("ffs_realloccg: bad bprev"); 294 } 295 UFS_UNLOCK(ump); 296 /* 297 * Allocate the extra space in the buffer. 298 */ 299 error = bread_gb(vp, lbprev, osize, NOCRED, gbflags, &bp); 300 if (error) { 301 brelse(bp); 302 return (error); 303 } 304 305 if (bp->b_blkno == bp->b_lblkno) { 306 if (lbprev >= NDADDR) 307 panic("ffs_realloccg: lbprev out of range"); 308 bp->b_blkno = fsbtodb(fs, bprev); 309 } 310 311#ifdef QUOTA 312 error = chkdq(ip, btodb(nsize - osize), cred, 0); 313 if (error) { 314 brelse(bp); 315 return (error); 316 } 317#endif 318 /* 319 * Check for extension in the existing location. 320 */ 321 *bpp = NULL; 322 cg = dtog(fs, bprev); 323 UFS_LOCK(ump); 324 bno = ffs_fragextend(ip, cg, bprev, osize, nsize); 325 if (bno) { 326 if (bp->b_blkno != fsbtodb(fs, bno)) 327 panic("ffs_realloccg: bad blockno"); 328 delta = btodb(nsize - osize); 329 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 330 if (flags & IO_EXT) 331 ip->i_flag |= IN_CHANGE; 332 else 333 ip->i_flag |= IN_CHANGE | IN_UPDATE; 334 allocbuf(bp, nsize); 335 bp->b_flags |= B_DONE; 336 vfs_bio_bzero_buf(bp, osize, nsize - osize); 337 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 338 vfs_bio_set_valid(bp, osize, nsize - osize); 339 *bpp = bp; 340 return (0); 341 } 342 /* 343 * Allocate a new disk location. 344 */ 345 if (bpref >= fs->fs_size) 346 bpref = 0; 347 switch ((int)fs->fs_optim) { 348 case FS_OPTSPACE: 349 /* 350 * Allocate an exact sized fragment. Although this makes 351 * best use of space, we will waste time relocating it if 352 * the file continues to grow. If the fragmentation is 353 * less than half of the minimum free reserve, we choose 354 * to begin optimizing for time. 355 */ 356 request = nsize; 357 if (fs->fs_minfree <= 5 || 358 fs->fs_cstotal.cs_nffree > 359 (off_t)fs->fs_dsize * fs->fs_minfree / (2 * 100)) 360 break; 361 log(LOG_NOTICE, "%s: optimization changed from SPACE to TIME\n", 362 fs->fs_fsmnt); 363 fs->fs_optim = FS_OPTTIME; 364 break; 365 case FS_OPTTIME: 366 /* 367 * At this point we have discovered a file that is trying to 368 * grow a small fragment to a larger fragment. To save time, 369 * we allocate a full sized block, then free the unused portion. 370 * If the file continues to grow, the `ffs_fragextend' call 371 * above will be able to grow it in place without further 372 * copying. If aberrant programs cause disk fragmentation to 373 * grow within 2% of the free reserve, we choose to begin 374 * optimizing for space. 375 */ 376 request = fs->fs_bsize; 377 if (fs->fs_cstotal.cs_nffree < 378 (off_t)fs->fs_dsize * (fs->fs_minfree - 2) / 100) 379 break; 380 log(LOG_NOTICE, "%s: optimization changed from TIME to SPACE\n", 381 fs->fs_fsmnt); 382 fs->fs_optim = FS_OPTSPACE; 383 break; 384 default: 385 printf("dev = %s, optim = %ld, fs = %s\n", 386 devtoname(ip->i_dev), (long)fs->fs_optim, fs->fs_fsmnt); 387 panic("ffs_realloccg: bad optim"); 388 /* NOTREACHED */ 389 } 390 bno = ffs_hashalloc(ip, cg, bpref, request, nsize, ffs_alloccg); 391 if (bno > 0) { 392 bp->b_blkno = fsbtodb(fs, bno); 393 if (!DOINGSOFTDEP(vp)) 394 ffs_blkfree(ump, fs, ip->i_devvp, bprev, (long)osize, 395 ip->i_number, vp->v_type, NULL); 396 delta = btodb(nsize - osize); 397 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + delta); 398 if (flags & IO_EXT) 399 ip->i_flag |= IN_CHANGE; 400 else 401 ip->i_flag |= IN_CHANGE | IN_UPDATE; 402 allocbuf(bp, nsize); 403 bp->b_flags |= B_DONE; 404 vfs_bio_bzero_buf(bp, osize, nsize - osize); 405 if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) 406 vfs_bio_set_valid(bp, osize, nsize - osize); 407 *bpp = bp; 408 return (0); 409 } 410#ifdef QUOTA 411 UFS_UNLOCK(ump); 412 /* 413 * Restore user's disk quota because allocation failed. 414 */ 415 (void) chkdq(ip, -btodb(nsize - osize), cred, FORCE); 416 UFS_LOCK(ump); 417#endif 418nospace: 419 /* 420 * no space available 421 */ 422 if (reclaimed == 0 && (flags & IO_BUFLOCKED) == 0) { 423 reclaimed = 1; 424 UFS_UNLOCK(ump); 425 if (bp) { 426 brelse(bp); 427 bp = NULL; 428 } 429 UFS_LOCK(ump); 430 softdep_request_cleanup(fs, vp, cred, FLUSH_BLOCKS_WAIT); 431 goto retry; 432 } 433 UFS_UNLOCK(ump); 434 if (bp) 435 brelse(bp); 436 if (reclaimed > 0 && ppsratecheck(&lastfail, &curfail, 1)) { 437 ffs_fserr(fs, ip->i_number, "filesystem full"); 438 uprintf("\n%s: write failed, filesystem is full\n", 439 fs->fs_fsmnt); 440 } 441 return (ENOSPC); 442} 443 444/* 445 * Reallocate a sequence of blocks into a contiguous sequence of blocks. 446 * 447 * The vnode and an array of buffer pointers for a range of sequential 448 * logical blocks to be made contiguous is given. The allocator attempts 449 * to find a range of sequential blocks starting as close as possible 450 * from the end of the allocation for the logical block immediately 451 * preceding the current range. If successful, the physical block numbers 452 * in the buffer pointers and in the inode are changed to reflect the new 453 * allocation. If unsuccessful, the allocation is left unchanged. The 454 * success in doing the reallocation is returned. Note that the error 455 * return is not reflected back to the user. Rather the previous block 456 * allocation will be used. 457 */ 458 459SYSCTL_NODE(_vfs, OID_AUTO, ffs, CTLFLAG_RW, 0, "FFS filesystem"); 460 461static int doasyncfree = 1; 462SYSCTL_INT(_vfs_ffs, OID_AUTO, doasyncfree, CTLFLAG_RW, &doasyncfree, 0, 463"do not force synchronous writes when blocks are reallocated"); 464 465static int doreallocblks = 1; 466SYSCTL_INT(_vfs_ffs, OID_AUTO, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, 467"enable block reallocation"); 468 469static int maxclustersearch = 10; 470SYSCTL_INT(_vfs_ffs, OID_AUTO, maxclustersearch, CTLFLAG_RW, &maxclustersearch, 4710, "max number of cylinder group to search for contigous blocks"); 472 473#ifdef DEBUG 474static volatile int prtrealloc = 0; 475#endif 476 477int 478ffs_reallocblks(ap) 479 struct vop_reallocblks_args /* { 480 struct vnode *a_vp; 481 struct cluster_save *a_buflist; 482 } */ *ap; 483{ 484 485 if (doreallocblks == 0) 486 return (ENOSPC); 487 /* 488 * We can't wait in softdep prealloc as it may fsync and recurse 489 * here. Instead we simply fail to reallocate blocks if this 490 * rare condition arises. 491 */ 492 if (DOINGSOFTDEP(ap->a_vp)) 493 if (softdep_prealloc(ap->a_vp, MNT_NOWAIT) != 0) 494 return (ENOSPC); 495 if (VTOI(ap->a_vp)->i_ump->um_fstype == UFS1) 496 return (ffs_reallocblks_ufs1(ap)); 497 return (ffs_reallocblks_ufs2(ap)); 498} 499 500static int 501ffs_reallocblks_ufs1(ap) 502 struct vop_reallocblks_args /* { 503 struct vnode *a_vp; 504 struct cluster_save *a_buflist; 505 } */ *ap; 506{ 507 struct fs *fs; 508 struct inode *ip; 509 struct vnode *vp; 510 struct buf *sbp, *ebp; 511 ufs1_daddr_t *bap, *sbap, *ebap; 512 struct cluster_save *buflist; 513 struct ufsmount *ump; 514 ufs_lbn_t start_lbn, end_lbn; 515 ufs1_daddr_t soff, newblk, blkno; 516 ufs2_daddr_t pref; 517 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 518 int i, cg, len, start_lvl, end_lvl, ssize; 519 520 vp = ap->a_vp; 521 ip = VTOI(vp); 522 fs = ip->i_fs; 523 ump = ip->i_ump; 524 /* 525 * If we are not tracking block clusters or if we have less than 4% 526 * free blocks left, then do not attempt to cluster. Running with 527 * less than 5% free block reserve is not recommended and those that 528 * choose to do so do not expect to have good file layout. 529 */ 530 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0) 531 return (ENOSPC); 532 buflist = ap->a_buflist; 533 len = buflist->bs_nchildren; 534 start_lbn = buflist->bs_children[0]->b_lblkno; 535 end_lbn = start_lbn + len - 1; 536#ifdef INVARIANTS 537 for (i = 0; i < len; i++) 538 if (!ffs_checkblk(ip, 539 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 540 panic("ffs_reallocblks: unallocated block 1"); 541 for (i = 1; i < len; i++) 542 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 543 panic("ffs_reallocblks: non-logical cluster"); 544 blkno = buflist->bs_children[0]->b_blkno; 545 ssize = fsbtodb(fs, fs->fs_frag); 546 for (i = 1; i < len - 1; i++) 547 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 548 panic("ffs_reallocblks: non-physical cluster %d", i); 549#endif 550 /* 551 * If the cluster crosses the boundary for the first indirect 552 * block, leave space for the indirect block. Indirect blocks 553 * are initially laid out in a position after the last direct 554 * block. Block reallocation would usually destroy locality by 555 * moving the indirect block out of the way to make room for 556 * data blocks if we didn't compensate here. We should also do 557 * this for other indirect block boundaries, but it is only 558 * important for the first one. 559 */ 560 if (start_lbn < NDADDR && end_lbn >= NDADDR) 561 return (ENOSPC); 562 /* 563 * If the latest allocation is in a new cylinder group, assume that 564 * the filesystem has decided to move and do not force it back to 565 * the previous cylinder group. 566 */ 567 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 568 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 569 return (ENOSPC); 570 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 571 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 572 return (ENOSPC); 573 /* 574 * Get the starting offset and block map for the first block. 575 */ 576 if (start_lvl == 0) { 577 sbap = &ip->i_din1->di_db[0]; 578 soff = start_lbn; 579 } else { 580 idp = &start_ap[start_lvl - 1]; 581 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 582 brelse(sbp); 583 return (ENOSPC); 584 } 585 sbap = (ufs1_daddr_t *)sbp->b_data; 586 soff = idp->in_off; 587 } 588 /* 589 * If the block range spans two block maps, get the second map. 590 */ 591 ebap = NULL; 592 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 593 ssize = len; 594 } else { 595#ifdef INVARIANTS 596 if (start_lvl > 0 && 597 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 598 panic("ffs_reallocblk: start == end"); 599#endif 600 ssize = len - (idp->in_off + 1); 601 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 602 goto fail; 603 ebap = (ufs1_daddr_t *)ebp->b_data; 604 } 605 /* 606 * Find the preferred location for the cluster. If we have not 607 * previously failed at this endeavor, then follow our standard 608 * preference calculation. If we have failed at it, then pick up 609 * where we last ended our search. 610 */ 611 UFS_LOCK(ump); 612 if (ip->i_nextclustercg == -1) 613 pref = ffs_blkpref_ufs1(ip, start_lbn, soff, sbap); 614 else 615 pref = cgdata(fs, ip->i_nextclustercg); 616 /* 617 * Search the block map looking for an allocation of the desired size. 618 * To avoid wasting too much time, we limit the number of cylinder 619 * groups that we will search. 620 */ 621 cg = dtog(fs, pref); 622 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) { 623 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0) 624 break; 625 cg += 1; 626 if (cg >= fs->fs_ncg) 627 cg = 0; 628 } 629 /* 630 * If we have failed in our search, record where we gave up for 631 * next time. Otherwise, fall back to our usual search citerion. 632 */ 633 if (newblk == 0) { 634 ip->i_nextclustercg = cg; 635 UFS_UNLOCK(ump); 636 goto fail; 637 } 638 ip->i_nextclustercg = -1; 639 /* 640 * We have found a new contiguous block. 641 * 642 * First we have to replace the old block pointers with the new 643 * block pointers in the inode and indirect blocks associated 644 * with the file. 645 */ 646#ifdef DEBUG 647 if (prtrealloc) 648 printf("realloc: ino %ju, lbns %jd-%jd\n\told:", 649 (uintmax_t)ip->i_number, 650 (intmax_t)start_lbn, (intmax_t)end_lbn); 651#endif 652 blkno = newblk; 653 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 654 if (i == ssize) { 655 bap = ebap; 656 soff = -i; 657 } 658#ifdef INVARIANTS 659 if (!ffs_checkblk(ip, 660 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 661 panic("ffs_reallocblks: unallocated block 2"); 662 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 663 panic("ffs_reallocblks: alloc mismatch"); 664#endif 665#ifdef DEBUG 666 if (prtrealloc) 667 printf(" %d,", *bap); 668#endif 669 if (DOINGSOFTDEP(vp)) { 670 if (sbap == &ip->i_din1->di_db[0] && i < ssize) 671 softdep_setup_allocdirect(ip, start_lbn + i, 672 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 673 buflist->bs_children[i]); 674 else 675 softdep_setup_allocindir_page(ip, start_lbn + i, 676 i < ssize ? sbp : ebp, soff + i, blkno, 677 *bap, buflist->bs_children[i]); 678 } 679 *bap++ = blkno; 680 } 681 /* 682 * Next we must write out the modified inode and indirect blocks. 683 * For strict correctness, the writes should be synchronous since 684 * the old block values may have been written to disk. In practise 685 * they are almost never written, but if we are concerned about 686 * strict correctness, the `doasyncfree' flag should be set to zero. 687 * 688 * The test on `doasyncfree' should be changed to test a flag 689 * that shows whether the associated buffers and inodes have 690 * been written. The flag should be set when the cluster is 691 * started and cleared whenever the buffer or inode is flushed. 692 * We can then check below to see if it is set, and do the 693 * synchronous write only when it has been cleared. 694 */ 695 if (sbap != &ip->i_din1->di_db[0]) { 696 if (doasyncfree) 697 bdwrite(sbp); 698 else 699 bwrite(sbp); 700 } else { 701 ip->i_flag |= IN_CHANGE | IN_UPDATE; 702 if (!doasyncfree) 703 ffs_update(vp, 1); 704 } 705 if (ssize < len) { 706 if (doasyncfree) 707 bdwrite(ebp); 708 else 709 bwrite(ebp); 710 } 711 /* 712 * Last, free the old blocks and assign the new blocks to the buffers. 713 */ 714#ifdef DEBUG 715 if (prtrealloc) 716 printf("\n\tnew:"); 717#endif 718 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 719 if (!DOINGSOFTDEP(vp)) 720 ffs_blkfree(ump, fs, ip->i_devvp, 721 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 722 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 723 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 724#ifdef INVARIANTS 725 if (!ffs_checkblk(ip, 726 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 727 panic("ffs_reallocblks: unallocated block 3"); 728#endif 729#ifdef DEBUG 730 if (prtrealloc) 731 printf(" %d,", blkno); 732#endif 733 } 734#ifdef DEBUG 735 if (prtrealloc) { 736 prtrealloc--; 737 printf("\n"); 738 } 739#endif 740 return (0); 741 742fail: 743 if (ssize < len) 744 brelse(ebp); 745 if (sbap != &ip->i_din1->di_db[0]) 746 brelse(sbp); 747 return (ENOSPC); 748} 749 750static int 751ffs_reallocblks_ufs2(ap) 752 struct vop_reallocblks_args /* { 753 struct vnode *a_vp; 754 struct cluster_save *a_buflist; 755 } */ *ap; 756{ 757 struct fs *fs; 758 struct inode *ip; 759 struct vnode *vp; 760 struct buf *sbp, *ebp; 761 ufs2_daddr_t *bap, *sbap, *ebap; 762 struct cluster_save *buflist; 763 struct ufsmount *ump; 764 ufs_lbn_t start_lbn, end_lbn; 765 ufs2_daddr_t soff, newblk, blkno, pref; 766 struct indir start_ap[NIADDR + 1], end_ap[NIADDR + 1], *idp; 767 int i, cg, len, start_lvl, end_lvl, ssize; 768 769 vp = ap->a_vp; 770 ip = VTOI(vp); 771 fs = ip->i_fs; 772 ump = ip->i_ump; 773 /* 774 * If we are not tracking block clusters or if we have less than 4% 775 * free blocks left, then do not attempt to cluster. Running with 776 * less than 5% free block reserve is not recommended and those that 777 * choose to do so do not expect to have good file layout. 778 */ 779 if (fs->fs_contigsumsize <= 0 || freespace(fs, 4) < 0) 780 return (ENOSPC); 781 buflist = ap->a_buflist; 782 len = buflist->bs_nchildren; 783 start_lbn = buflist->bs_children[0]->b_lblkno; 784 end_lbn = start_lbn + len - 1; 785#ifdef INVARIANTS 786 for (i = 0; i < len; i++) 787 if (!ffs_checkblk(ip, 788 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 789 panic("ffs_reallocblks: unallocated block 1"); 790 for (i = 1; i < len; i++) 791 if (buflist->bs_children[i]->b_lblkno != start_lbn + i) 792 panic("ffs_reallocblks: non-logical cluster"); 793 blkno = buflist->bs_children[0]->b_blkno; 794 ssize = fsbtodb(fs, fs->fs_frag); 795 for (i = 1; i < len - 1; i++) 796 if (buflist->bs_children[i]->b_blkno != blkno + (i * ssize)) 797 panic("ffs_reallocblks: non-physical cluster %d", i); 798#endif 799 /* 800 * If the cluster crosses the boundary for the first indirect 801 * block, do not move anything in it. Indirect blocks are 802 * usually initially laid out in a position between the data 803 * blocks. Block reallocation would usually destroy locality by 804 * moving the indirect block out of the way to make room for 805 * data blocks if we didn't compensate here. We should also do 806 * this for other indirect block boundaries, but it is only 807 * important for the first one. 808 */ 809 if (start_lbn < NDADDR && end_lbn >= NDADDR) 810 return (ENOSPC); 811 /* 812 * If the latest allocation is in a new cylinder group, assume that 813 * the filesystem has decided to move and do not force it back to 814 * the previous cylinder group. 815 */ 816 if (dtog(fs, dbtofsb(fs, buflist->bs_children[0]->b_blkno)) != 817 dtog(fs, dbtofsb(fs, buflist->bs_children[len - 1]->b_blkno))) 818 return (ENOSPC); 819 if (ufs_getlbns(vp, start_lbn, start_ap, &start_lvl) || 820 ufs_getlbns(vp, end_lbn, end_ap, &end_lvl)) 821 return (ENOSPC); 822 /* 823 * Get the starting offset and block map for the first block. 824 */ 825 if (start_lvl == 0) { 826 sbap = &ip->i_din2->di_db[0]; 827 soff = start_lbn; 828 } else { 829 idp = &start_ap[start_lvl - 1]; 830 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &sbp)) { 831 brelse(sbp); 832 return (ENOSPC); 833 } 834 sbap = (ufs2_daddr_t *)sbp->b_data; 835 soff = idp->in_off; 836 } 837 /* 838 * If the block range spans two block maps, get the second map. 839 */ 840 ebap = NULL; 841 if (end_lvl == 0 || (idp = &end_ap[end_lvl - 1])->in_off + 1 >= len) { 842 ssize = len; 843 } else { 844#ifdef INVARIANTS 845 if (start_lvl > 0 && 846 start_ap[start_lvl - 1].in_lbn == idp->in_lbn) 847 panic("ffs_reallocblk: start == end"); 848#endif 849 ssize = len - (idp->in_off + 1); 850 if (bread(vp, idp->in_lbn, (int)fs->fs_bsize, NOCRED, &ebp)) 851 goto fail; 852 ebap = (ufs2_daddr_t *)ebp->b_data; 853 } 854 /* 855 * Find the preferred location for the cluster. If we have not 856 * previously failed at this endeavor, then follow our standard 857 * preference calculation. If we have failed at it, then pick up 858 * where we last ended our search. 859 */ 860 UFS_LOCK(ump); 861 if (ip->i_nextclustercg == -1) 862 pref = ffs_blkpref_ufs2(ip, start_lbn, soff, sbap); 863 else 864 pref = cgdata(fs, ip->i_nextclustercg); 865 /* 866 * Search the block map looking for an allocation of the desired size. 867 * To avoid wasting too much time, we limit the number of cylinder 868 * groups that we will search. 869 */ 870 cg = dtog(fs, pref); 871 for (i = min(maxclustersearch, fs->fs_ncg); i > 0; i--) { 872 if ((newblk = ffs_clusteralloc(ip, cg, pref, len)) != 0) 873 break; 874 cg += 1; 875 if (cg >= fs->fs_ncg) 876 cg = 0; 877 } 878 /* 879 * If we have failed in our search, record where we gave up for 880 * next time. Otherwise, fall back to our usual search citerion. 881 */ 882 if (newblk == 0) { 883 ip->i_nextclustercg = cg; 884 UFS_UNLOCK(ump); 885 goto fail; 886 } 887 ip->i_nextclustercg = -1; 888 /* 889 * We have found a new contiguous block. 890 * 891 * First we have to replace the old block pointers with the new 892 * block pointers in the inode and indirect blocks associated 893 * with the file. 894 */ 895#ifdef DEBUG 896 if (prtrealloc) 897 printf("realloc: ino %d, lbns %jd-%jd\n\told:", ip->i_number, 898 (intmax_t)start_lbn, (intmax_t)end_lbn); 899#endif 900 blkno = newblk; 901 for (bap = &sbap[soff], i = 0; i < len; i++, blkno += fs->fs_frag) { 902 if (i == ssize) { 903 bap = ebap; 904 soff = -i; 905 } 906#ifdef INVARIANTS 907 if (!ffs_checkblk(ip, 908 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 909 panic("ffs_reallocblks: unallocated block 2"); 910 if (dbtofsb(fs, buflist->bs_children[i]->b_blkno) != *bap) 911 panic("ffs_reallocblks: alloc mismatch"); 912#endif 913#ifdef DEBUG 914 if (prtrealloc) 915 printf(" %jd,", (intmax_t)*bap); 916#endif 917 if (DOINGSOFTDEP(vp)) { 918 if (sbap == &ip->i_din2->di_db[0] && i < ssize) 919 softdep_setup_allocdirect(ip, start_lbn + i, 920 blkno, *bap, fs->fs_bsize, fs->fs_bsize, 921 buflist->bs_children[i]); 922 else 923 softdep_setup_allocindir_page(ip, start_lbn + i, 924 i < ssize ? sbp : ebp, soff + i, blkno, 925 *bap, buflist->bs_children[i]); 926 } 927 *bap++ = blkno; 928 } 929 /* 930 * Next we must write out the modified inode and indirect blocks. 931 * For strict correctness, the writes should be synchronous since 932 * the old block values may have been written to disk. In practise 933 * they are almost never written, but if we are concerned about 934 * strict correctness, the `doasyncfree' flag should be set to zero. 935 * 936 * The test on `doasyncfree' should be changed to test a flag 937 * that shows whether the associated buffers and inodes have 938 * been written. The flag should be set when the cluster is 939 * started and cleared whenever the buffer or inode is flushed. 940 * We can then check below to see if it is set, and do the 941 * synchronous write only when it has been cleared. 942 */ 943 if (sbap != &ip->i_din2->di_db[0]) { 944 if (doasyncfree) 945 bdwrite(sbp); 946 else 947 bwrite(sbp); 948 } else { 949 ip->i_flag |= IN_CHANGE | IN_UPDATE; 950 if (!doasyncfree) 951 ffs_update(vp, 1); 952 } 953 if (ssize < len) { 954 if (doasyncfree) 955 bdwrite(ebp); 956 else 957 bwrite(ebp); 958 } 959 /* 960 * Last, free the old blocks and assign the new blocks to the buffers. 961 */ 962#ifdef DEBUG 963 if (prtrealloc) 964 printf("\n\tnew:"); 965#endif 966 for (blkno = newblk, i = 0; i < len; i++, blkno += fs->fs_frag) { 967 if (!DOINGSOFTDEP(vp)) 968 ffs_blkfree(ump, fs, ip->i_devvp, 969 dbtofsb(fs, buflist->bs_children[i]->b_blkno), 970 fs->fs_bsize, ip->i_number, vp->v_type, NULL); 971 buflist->bs_children[i]->b_blkno = fsbtodb(fs, blkno); 972#ifdef INVARIANTS 973 if (!ffs_checkblk(ip, 974 dbtofsb(fs, buflist->bs_children[i]->b_blkno), fs->fs_bsize)) 975 panic("ffs_reallocblks: unallocated block 3"); 976#endif 977#ifdef DEBUG 978 if (prtrealloc) 979 printf(" %jd,", (intmax_t)blkno); 980#endif 981 } 982#ifdef DEBUG 983 if (prtrealloc) { 984 prtrealloc--; 985 printf("\n"); 986 } 987#endif 988 return (0); 989 990fail: 991 if (ssize < len) 992 brelse(ebp); 993 if (sbap != &ip->i_din2->di_db[0]) 994 brelse(sbp); 995 return (ENOSPC); 996} 997 998/* 999 * Allocate an inode in the filesystem. 1000 * 1001 * If allocating a directory, use ffs_dirpref to select the inode. 1002 * If allocating in a directory, the following hierarchy is followed: 1003 * 1) allocate the preferred inode. 1004 * 2) allocate an inode in the same cylinder group. 1005 * 3) quadradically rehash into other cylinder groups, until an 1006 * available inode is located. 1007 * If no inode preference is given the following hierarchy is used 1008 * to allocate an inode: 1009 * 1) allocate an inode in cylinder group 0. 1010 * 2) quadradically rehash into other cylinder groups, until an 1011 * available inode is located. 1012 */ 1013int 1014ffs_valloc(pvp, mode, cred, vpp) 1015 struct vnode *pvp; 1016 int mode; 1017 struct ucred *cred; 1018 struct vnode **vpp; 1019{ 1020 struct inode *pip; 1021 struct fs *fs; 1022 struct inode *ip; 1023 struct timespec ts; 1024 struct ufsmount *ump; 1025 ino_t ino, ipref; 1026 u_int cg; 1027 int error, error1, reclaimed; 1028 static struct timeval lastfail; 1029 static int curfail; 1030 1031 *vpp = NULL; 1032 pip = VTOI(pvp); 1033 fs = pip->i_fs; 1034 ump = pip->i_ump; 1035 1036 UFS_LOCK(ump); 1037 reclaimed = 0; 1038retry: 1039 if (fs->fs_cstotal.cs_nifree == 0) 1040 goto noinodes; 1041 1042 if ((mode & IFMT) == IFDIR) 1043 ipref = ffs_dirpref(pip); 1044 else 1045 ipref = pip->i_number; 1046 if (ipref >= fs->fs_ncg * fs->fs_ipg) 1047 ipref = 0; 1048 cg = ino_to_cg(fs, ipref); 1049 /* 1050 * Track number of dirs created one after another 1051 * in a same cg without intervening by files. 1052 */ 1053 if ((mode & IFMT) == IFDIR) { 1054 if (fs->fs_contigdirs[cg] < 255) 1055 fs->fs_contigdirs[cg]++; 1056 } else { 1057 if (fs->fs_contigdirs[cg] > 0) 1058 fs->fs_contigdirs[cg]--; 1059 } 1060 ino = (ino_t)ffs_hashalloc(pip, cg, ipref, mode, 0, 1061 (allocfcn_t *)ffs_nodealloccg); 1062 if (ino == 0) 1063 goto noinodes; 1064 error = ffs_vget(pvp->v_mount, ino, LK_EXCLUSIVE, vpp); 1065 if (error) { 1066 error1 = ffs_vgetf(pvp->v_mount, ino, LK_EXCLUSIVE, vpp, 1067 FFSV_FORCEINSMQ); 1068 ffs_vfree(pvp, ino, mode); 1069 if (error1 == 0) { 1070 ip = VTOI(*vpp); 1071 if (ip->i_mode) 1072 goto dup_alloc; 1073 ip->i_flag |= IN_MODIFIED; 1074 vput(*vpp); 1075 } 1076 return (error); 1077 } 1078 ip = VTOI(*vpp); 1079 if (ip->i_mode) { 1080dup_alloc: 1081 printf("mode = 0%o, inum = %lu, fs = %s\n", 1082 ip->i_mode, (u_long)ip->i_number, fs->fs_fsmnt); 1083 panic("ffs_valloc: dup alloc"); 1084 } 1085 if (DIP(ip, i_blocks) && (fs->fs_flags & FS_UNCLEAN) == 0) { /* XXX */ 1086 printf("free inode %s/%lu had %ld blocks\n", 1087 fs->fs_fsmnt, (u_long)ino, (long)DIP(ip, i_blocks)); 1088 DIP_SET(ip, i_blocks, 0); 1089 } 1090 ip->i_flags = 0; 1091 DIP_SET(ip, i_flags, 0); 1092 /* 1093 * Set up a new generation number for this inode. 1094 */ 1095 if (ip->i_gen == 0 || ++ip->i_gen == 0) 1096 ip->i_gen = arc4random() / 2 + 1; 1097 DIP_SET(ip, i_gen, ip->i_gen); 1098 if (fs->fs_magic == FS_UFS2_MAGIC) { 1099 vfs_timestamp(&ts); 1100 ip->i_din2->di_birthtime = ts.tv_sec; 1101 ip->i_din2->di_birthnsec = ts.tv_nsec; 1102 } 1103 ufs_prepare_reclaim(*vpp); 1104 ip->i_flag = 0; 1105 (*vpp)->v_vflag = 0; 1106 (*vpp)->v_type = VNON; 1107 if (fs->fs_magic == FS_UFS2_MAGIC) 1108 (*vpp)->v_op = &ffs_vnodeops2; 1109 else 1110 (*vpp)->v_op = &ffs_vnodeops1; 1111 return (0); 1112noinodes: 1113 if (reclaimed == 0) { 1114 reclaimed = 1; 1115 softdep_request_cleanup(fs, pvp, cred, FLUSH_INODES_WAIT); 1116 goto retry; 1117 } 1118 UFS_UNLOCK(ump); 1119 if (ppsratecheck(&lastfail, &curfail, 1)) { 1120 ffs_fserr(fs, pip->i_number, "out of inodes"); 1121 uprintf("\n%s: create/symlink failed, no inodes free\n", 1122 fs->fs_fsmnt); 1123 } 1124 return (ENOSPC); 1125} 1126 1127/* 1128 * Find a cylinder group to place a directory. 1129 * 1130 * The policy implemented by this algorithm is to allocate a 1131 * directory inode in the same cylinder group as its parent 1132 * directory, but also to reserve space for its files inodes 1133 * and data. Restrict the number of directories which may be 1134 * allocated one after another in the same cylinder group 1135 * without intervening allocation of files. 1136 * 1137 * If we allocate a first level directory then force allocation 1138 * in another cylinder group. 1139 */ 1140static ino_t 1141ffs_dirpref(pip) 1142 struct inode *pip; 1143{ 1144 struct fs *fs; 1145 int cg, prefcg, dirsize, cgsize; 1146 u_int avgifree, avgbfree, avgndir, curdirsize; 1147 u_int minifree, minbfree, maxndir; 1148 u_int mincg, minndir; 1149 u_int maxcontigdirs; 1150 1151 mtx_assert(UFS_MTX(pip->i_ump), MA_OWNED); 1152 fs = pip->i_fs; 1153 1154 avgifree = fs->fs_cstotal.cs_nifree / fs->fs_ncg; 1155 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1156 avgndir = fs->fs_cstotal.cs_ndir / fs->fs_ncg; 1157 1158 /* 1159 * Force allocation in another cg if creating a first level dir. 1160 */ 1161 ASSERT_VOP_LOCKED(ITOV(pip), "ffs_dirpref"); 1162 if (ITOV(pip)->v_vflag & VV_ROOT) { 1163 prefcg = arc4random() % fs->fs_ncg; 1164 mincg = prefcg; 1165 minndir = fs->fs_ipg; 1166 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1167 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1168 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1169 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1170 mincg = cg; 1171 minndir = fs->fs_cs(fs, cg).cs_ndir; 1172 } 1173 for (cg = 0; cg < prefcg; cg++) 1174 if (fs->fs_cs(fs, cg).cs_ndir < minndir && 1175 fs->fs_cs(fs, cg).cs_nifree >= avgifree && 1176 fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1177 mincg = cg; 1178 minndir = fs->fs_cs(fs, cg).cs_ndir; 1179 } 1180 return ((ino_t)(fs->fs_ipg * mincg)); 1181 } 1182 1183 /* 1184 * Count various limits which used for 1185 * optimal allocation of a directory inode. 1186 */ 1187 maxndir = min(avgndir + fs->fs_ipg / 16, fs->fs_ipg); 1188 minifree = avgifree - avgifree / 4; 1189 if (minifree < 1) 1190 minifree = 1; 1191 minbfree = avgbfree - avgbfree / 4; 1192 if (minbfree < 1) 1193 minbfree = 1; 1194 cgsize = fs->fs_fsize * fs->fs_fpg; 1195 dirsize = fs->fs_avgfilesize * fs->fs_avgfpdir; 1196 curdirsize = avgndir ? (cgsize - avgbfree * fs->fs_bsize) / avgndir : 0; 1197 if (dirsize < curdirsize) 1198 dirsize = curdirsize; 1199 if (dirsize <= 0) 1200 maxcontigdirs = 0; /* dirsize overflowed */ 1201 else 1202 maxcontigdirs = min((avgbfree * fs->fs_bsize) / dirsize, 255); 1203 if (fs->fs_avgfpdir > 0) 1204 maxcontigdirs = min(maxcontigdirs, 1205 fs->fs_ipg / fs->fs_avgfpdir); 1206 if (maxcontigdirs == 0) 1207 maxcontigdirs = 1; 1208 1209 /* 1210 * Limit number of dirs in one cg and reserve space for 1211 * regular files, but only if we have no deficit in 1212 * inodes or space. 1213 * 1214 * We are trying to find a suitable cylinder group nearby 1215 * our preferred cylinder group to place a new directory. 1216 * We scan from our preferred cylinder group forward looking 1217 * for a cylinder group that meets our criterion. If we get 1218 * to the final cylinder group and do not find anything, 1219 * we start scanning backwards from our preferred cylinder 1220 * group. The ideal would be to alternate looking forward 1221 * and backward, but that is just too complex to code for 1222 * the gain it would get. The most likely place where the 1223 * backward scan would take effect is when we start near 1224 * the end of the filesystem and do not find anything from 1225 * where we are to the end. In that case, scanning backward 1226 * will likely find us a suitable cylinder group much closer 1227 * to our desired location than if we were to start scanning 1228 * forward from the beginning of the filesystem. 1229 */ 1230 prefcg = ino_to_cg(fs, pip->i_number); 1231 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1232 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1233 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1234 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1235 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1236 return ((ino_t)(fs->fs_ipg * cg)); 1237 } 1238 for (cg = 0; cg < prefcg; cg++) 1239 if (fs->fs_cs(fs, cg).cs_ndir < maxndir && 1240 fs->fs_cs(fs, cg).cs_nifree >= minifree && 1241 fs->fs_cs(fs, cg).cs_nbfree >= minbfree) { 1242 if (fs->fs_contigdirs[cg] < maxcontigdirs) 1243 return ((ino_t)(fs->fs_ipg * cg)); 1244 } 1245 /* 1246 * This is a backstop when we have deficit in space. 1247 */ 1248 for (cg = prefcg; cg < fs->fs_ncg; cg++) 1249 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1250 return ((ino_t)(fs->fs_ipg * cg)); 1251 for (cg = 0; cg < prefcg; cg++) 1252 if (fs->fs_cs(fs, cg).cs_nifree >= avgifree) 1253 break; 1254 return ((ino_t)(fs->fs_ipg * cg)); 1255} 1256 1257/* 1258 * Select the desired position for the next block in a file. The file is 1259 * logically divided into sections. The first section is composed of the 1260 * direct blocks and the next fs_maxbpg blocks. Each additional section 1261 * contains fs_maxbpg blocks. 1262 * 1263 * If no blocks have been allocated in the first section, the policy is to 1264 * request a block in the same cylinder group as the inode that describes 1265 * the file. The first indirect is allocated immediately following the last 1266 * direct block and the data blocks for the first indirect immediately 1267 * follow it. 1268 * 1269 * If no blocks have been allocated in any other section, the indirect 1270 * block(s) are allocated in the same cylinder group as its inode in an 1271 * area reserved immediately following the inode blocks. The policy for 1272 * the data blocks is to place them in a cylinder group with a greater than 1273 * average number of free blocks. An appropriate cylinder group is found 1274 * by using a rotor that sweeps the cylinder groups. When a new group of 1275 * blocks is needed, the sweep begins in the cylinder group following the 1276 * cylinder group from which the previous allocation was made. The sweep 1277 * continues until a cylinder group with greater than the average number 1278 * of free blocks is found. If the allocation is for the first block in an 1279 * indirect block or the previous block is a hole, then the information on 1280 * the previous allocation is unavailable; here a best guess is made based 1281 * on the logical block number being allocated. 1282 * 1283 * If a section is already partially allocated, the policy is to 1284 * allocate blocks contiguously within the section if possible. 1285 */ 1286ufs2_daddr_t 1287ffs_blkpref_ufs1(ip, lbn, indx, bap) 1288 struct inode *ip; 1289 ufs_lbn_t lbn; 1290 int indx; 1291 ufs1_daddr_t *bap; 1292{ 1293 struct fs *fs; 1294 u_int cg, inocg; 1295 u_int avgbfree, startcg; 1296 ufs2_daddr_t pref; 1297 1298 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap")); 1299 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1300 fs = ip->i_fs; 1301 /* 1302 * Allocation of indirect blocks is indicated by passing negative 1303 * values in indx: -1 for single indirect, -2 for double indirect, 1304 * -3 for triple indirect. As noted below, we attempt to allocate 1305 * the first indirect inline with the file data. For all later 1306 * indirect blocks, the data is often allocated in other cylinder 1307 * groups. However to speed random file access and to speed up 1308 * fsck, the filesystem reserves the first fs_metaspace blocks 1309 * (typically half of fs_minfree) of the data area of each cylinder 1310 * group to hold these later indirect blocks. 1311 */ 1312 inocg = ino_to_cg(fs, ip->i_number); 1313 if (indx < 0) { 1314 /* 1315 * Our preference for indirect blocks is the zone at the 1316 * beginning of the inode's cylinder group data area that 1317 * we try to reserve for indirect blocks. 1318 */ 1319 pref = cgmeta(fs, inocg); 1320 /* 1321 * If we are allocating the first indirect block, try to 1322 * place it immediately following the last direct block. 1323 */ 1324 if (indx == -1 && lbn < NDADDR + NINDIR(fs) && 1325 ip->i_din1->di_db[NDADDR - 1] != 0) 1326 pref = ip->i_din1->di_db[NDADDR - 1] + fs->fs_frag; 1327 return (pref); 1328 } 1329 /* 1330 * If we are allocating the first data block in the first indirect 1331 * block and the indirect has been allocated in the data block area, 1332 * try to place it immediately following the indirect block. 1333 */ 1334 if (lbn == NDADDR) { 1335 pref = ip->i_din1->di_ib[0]; 1336 if (pref != 0 && pref >= cgdata(fs, inocg) && 1337 pref < cgbase(fs, inocg + 1)) 1338 return (pref + fs->fs_frag); 1339 } 1340 /* 1341 * If we are at the beginning of a file, or we have already allocated 1342 * the maximum number of blocks per cylinder group, or we do not 1343 * have a block allocated immediately preceeding us, then we need 1344 * to decide where to start allocating new blocks. 1345 */ 1346 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1347 /* 1348 * If we are allocating a directory data block, we want 1349 * to place it in the metadata area. 1350 */ 1351 if ((ip->i_mode & IFMT) == IFDIR) 1352 return (cgmeta(fs, inocg)); 1353 /* 1354 * Until we fill all the direct and all the first indirect's 1355 * blocks, we try to allocate in the data area of the inode's 1356 * cylinder group. 1357 */ 1358 if (lbn < NDADDR + NINDIR(fs)) 1359 return (cgdata(fs, inocg)); 1360 /* 1361 * Find a cylinder with greater than average number of 1362 * unused data blocks. 1363 */ 1364 if (indx == 0 || bap[indx - 1] == 0) 1365 startcg = inocg + lbn / fs->fs_maxbpg; 1366 else 1367 startcg = dtog(fs, bap[indx - 1]) + 1; 1368 startcg %= fs->fs_ncg; 1369 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1370 for (cg = startcg; cg < fs->fs_ncg; cg++) 1371 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1372 fs->fs_cgrotor = cg; 1373 return (cgdata(fs, cg)); 1374 } 1375 for (cg = 0; cg <= startcg; cg++) 1376 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1377 fs->fs_cgrotor = cg; 1378 return (cgdata(fs, cg)); 1379 } 1380 return (0); 1381 } 1382 /* 1383 * Otherwise, we just always try to lay things out contiguously. 1384 */ 1385 return (bap[indx - 1] + fs->fs_frag); 1386} 1387 1388/* 1389 * Same as above, but for UFS2 1390 */ 1391ufs2_daddr_t 1392ffs_blkpref_ufs2(ip, lbn, indx, bap) 1393 struct inode *ip; 1394 ufs_lbn_t lbn; 1395 int indx; 1396 ufs2_daddr_t *bap; 1397{ 1398 struct fs *fs; 1399 u_int cg, inocg; 1400 u_int avgbfree, startcg; 1401 ufs2_daddr_t pref; 1402 1403 KASSERT(indx <= 0 || bap != NULL, ("need non-NULL bap")); 1404 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1405 fs = ip->i_fs; 1406 /* 1407 * Allocation of indirect blocks is indicated by passing negative 1408 * values in indx: -1 for single indirect, -2 for double indirect, 1409 * -3 for triple indirect. As noted below, we attempt to allocate 1410 * the first indirect inline with the file data. For all later 1411 * indirect blocks, the data is often allocated in other cylinder 1412 * groups. However to speed random file access and to speed up 1413 * fsck, the filesystem reserves the first fs_metaspace blocks 1414 * (typically half of fs_minfree) of the data area of each cylinder 1415 * group to hold these later indirect blocks. 1416 */ 1417 inocg = ino_to_cg(fs, ip->i_number); 1418 if (indx < 0) { 1419 /* 1420 * Our preference for indirect blocks is the zone at the 1421 * beginning of the inode's cylinder group data area that 1422 * we try to reserve for indirect blocks. 1423 */ 1424 pref = cgmeta(fs, inocg); 1425 /* 1426 * If we are allocating the first indirect block, try to 1427 * place it immediately following the last direct block. 1428 */ 1429 if (indx == -1 && lbn < NDADDR + NINDIR(fs) && 1430 ip->i_din2->di_db[NDADDR - 1] != 0) 1431 pref = ip->i_din2->di_db[NDADDR - 1] + fs->fs_frag; 1432 return (pref); 1433 } 1434 /* 1435 * If we are allocating the first data block in the first indirect 1436 * block and the indirect has been allocated in the data block area, 1437 * try to place it immediately following the indirect block. 1438 */ 1439 if (lbn == NDADDR) { 1440 pref = ip->i_din2->di_ib[0]; 1441 if (pref != 0 && pref >= cgdata(fs, inocg) && 1442 pref < cgbase(fs, inocg + 1)) 1443 return (pref + fs->fs_frag); 1444 } 1445 /* 1446 * If we are at the beginning of a file, or we have already allocated 1447 * the maximum number of blocks per cylinder group, or we do not 1448 * have a block allocated immediately preceeding us, then we need 1449 * to decide where to start allocating new blocks. 1450 */ 1451 if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { 1452 /* 1453 * If we are allocating a directory data block, we want 1454 * to place it in the metadata area. 1455 */ 1456 if ((ip->i_mode & IFMT) == IFDIR) 1457 return (cgmeta(fs, inocg)); 1458 /* 1459 * Until we fill all the direct and all the first indirect's 1460 * blocks, we try to allocate in the data area of the inode's 1461 * cylinder group. 1462 */ 1463 if (lbn < NDADDR + NINDIR(fs)) 1464 return (cgdata(fs, inocg)); 1465 /* 1466 * Find a cylinder with greater than average number of 1467 * unused data blocks. 1468 */ 1469 if (indx == 0 || bap[indx - 1] == 0) 1470 startcg = inocg + lbn / fs->fs_maxbpg; 1471 else 1472 startcg = dtog(fs, bap[indx - 1]) + 1; 1473 startcg %= fs->fs_ncg; 1474 avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg; 1475 for (cg = startcg; cg < fs->fs_ncg; cg++) 1476 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1477 fs->fs_cgrotor = cg; 1478 return (cgdata(fs, cg)); 1479 } 1480 for (cg = 0; cg <= startcg; cg++) 1481 if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) { 1482 fs->fs_cgrotor = cg; 1483 return (cgdata(fs, cg)); 1484 } 1485 return (0); 1486 } 1487 /* 1488 * Otherwise, we just always try to lay things out contiguously. 1489 */ 1490 return (bap[indx - 1] + fs->fs_frag); 1491} 1492 1493/* 1494 * Implement the cylinder overflow algorithm. 1495 * 1496 * The policy implemented by this algorithm is: 1497 * 1) allocate the block in its requested cylinder group. 1498 * 2) quadradically rehash on the cylinder group number. 1499 * 3) brute force search for a free block. 1500 * 1501 * Must be called with the UFS lock held. Will release the lock on success 1502 * and return with it held on failure. 1503 */ 1504/*VARARGS5*/ 1505static ufs2_daddr_t 1506ffs_hashalloc(ip, cg, pref, size, rsize, allocator) 1507 struct inode *ip; 1508 u_int cg; 1509 ufs2_daddr_t pref; 1510 int size; /* Search size for data blocks, mode for inodes */ 1511 int rsize; /* Real allocated size. */ 1512 allocfcn_t *allocator; 1513{ 1514 struct fs *fs; 1515 ufs2_daddr_t result; 1516 u_int i, icg = cg; 1517 1518 mtx_assert(UFS_MTX(ip->i_ump), MA_OWNED); 1519#ifdef INVARIANTS 1520 if (ITOV(ip)->v_mount->mnt_kern_flag & MNTK_SUSPENDED) 1521 panic("ffs_hashalloc: allocation on suspended filesystem"); 1522#endif 1523 fs = ip->i_fs; 1524 /* 1525 * 1: preferred cylinder group 1526 */ 1527 result = (*allocator)(ip, cg, pref, size, rsize); 1528 if (result) 1529 return (result); 1530 /* 1531 * 2: quadratic rehash 1532 */ 1533 for (i = 1; i < fs->fs_ncg; i *= 2) { 1534 cg += i; 1535 if (cg >= fs->fs_ncg) 1536 cg -= fs->fs_ncg; 1537 result = (*allocator)(ip, cg, 0, size, rsize); 1538 if (result) 1539 return (result); 1540 } 1541 /* 1542 * 3: brute force search 1543 * Note that we start at i == 2, since 0 was checked initially, 1544 * and 1 is always checked in the quadratic rehash. 1545 */ 1546 cg = (icg + 2) % fs->fs_ncg; 1547 for (i = 2; i < fs->fs_ncg; i++) { 1548 result = (*allocator)(ip, cg, 0, size, rsize); 1549 if (result) 1550 return (result); 1551 cg++; 1552 if (cg == fs->fs_ncg) 1553 cg = 0; 1554 } 1555 return (0); 1556} 1557 1558/* 1559 * Determine whether a fragment can be extended. 1560 * 1561 * Check to see if the necessary fragments are available, and 1562 * if they are, allocate them. 1563 */ 1564static ufs2_daddr_t 1565ffs_fragextend(ip, cg, bprev, osize, nsize) 1566 struct inode *ip; 1567 u_int cg; 1568 ufs2_daddr_t bprev; 1569 int osize, nsize; 1570{ 1571 struct fs *fs; 1572 struct cg *cgp; 1573 struct buf *bp; 1574 struct ufsmount *ump; 1575 int nffree; 1576 long bno; 1577 int frags, bbase; 1578 int i, error; 1579 u_int8_t *blksfree; 1580 1581 ump = ip->i_ump; 1582 fs = ip->i_fs; 1583 if (fs->fs_cs(fs, cg).cs_nffree < numfrags(fs, nsize - osize)) 1584 return (0); 1585 frags = numfrags(fs, nsize); 1586 bbase = fragnum(fs, bprev); 1587 if (bbase > fragnum(fs, (bprev + frags - 1))) { 1588 /* cannot extend across a block boundary */ 1589 return (0); 1590 } 1591 UFS_UNLOCK(ump); 1592 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1593 (int)fs->fs_cgsize, NOCRED, &bp); 1594 if (error) 1595 goto fail; 1596 cgp = (struct cg *)bp->b_data; 1597 if (!cg_chkmagic(cgp)) 1598 goto fail; 1599 bp->b_xflags |= BX_BKGRDWRITE; 1600 cgp->cg_old_time = cgp->cg_time = time_second; 1601 bno = dtogd(fs, bprev); 1602 blksfree = cg_blksfree(cgp); 1603 for (i = numfrags(fs, osize); i < frags; i++) 1604 if (isclr(blksfree, bno + i)) 1605 goto fail; 1606 /* 1607 * the current fragment can be extended 1608 * deduct the count on fragment being extended into 1609 * increase the count on the remaining fragment (if any) 1610 * allocate the extended piece 1611 */ 1612 for (i = frags; i < fs->fs_frag - bbase; i++) 1613 if (isclr(blksfree, bno + i)) 1614 break; 1615 cgp->cg_frsum[i - numfrags(fs, osize)]--; 1616 if (i != frags) 1617 cgp->cg_frsum[i - frags]++; 1618 for (i = numfrags(fs, osize), nffree = 0; i < frags; i++) { 1619 clrbit(blksfree, bno + i); 1620 cgp->cg_cs.cs_nffree--; 1621 nffree++; 1622 } 1623 UFS_LOCK(ump); 1624 fs->fs_cstotal.cs_nffree -= nffree; 1625 fs->fs_cs(fs, cg).cs_nffree -= nffree; 1626 fs->fs_fmod = 1; 1627 ACTIVECLEAR(fs, cg); 1628 UFS_UNLOCK(ump); 1629 if (DOINGSOFTDEP(ITOV(ip))) 1630 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), bprev, 1631 frags, numfrags(fs, osize)); 1632 bdwrite(bp); 1633 return (bprev); 1634 1635fail: 1636 brelse(bp); 1637 UFS_LOCK(ump); 1638 return (0); 1639 1640} 1641 1642/* 1643 * Determine whether a block can be allocated. 1644 * 1645 * Check to see if a block of the appropriate size is available, 1646 * and if it is, allocate it. 1647 */ 1648static ufs2_daddr_t 1649ffs_alloccg(ip, cg, bpref, size, rsize) 1650 struct inode *ip; 1651 u_int cg; 1652 ufs2_daddr_t bpref; 1653 int size; 1654 int rsize; 1655{ 1656 struct fs *fs; 1657 struct cg *cgp; 1658 struct buf *bp; 1659 struct ufsmount *ump; 1660 ufs1_daddr_t bno; 1661 ufs2_daddr_t blkno; 1662 int i, allocsiz, error, frags; 1663 u_int8_t *blksfree; 1664 1665 ump = ip->i_ump; 1666 fs = ip->i_fs; 1667 if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize) 1668 return (0); 1669 UFS_UNLOCK(ump); 1670 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1671 (int)fs->fs_cgsize, NOCRED, &bp); 1672 if (error) 1673 goto fail; 1674 cgp = (struct cg *)bp->b_data; 1675 if (!cg_chkmagic(cgp) || 1676 (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) 1677 goto fail; 1678 bp->b_xflags |= BX_BKGRDWRITE; 1679 cgp->cg_old_time = cgp->cg_time = time_second; 1680 if (size == fs->fs_bsize) { 1681 UFS_LOCK(ump); 1682 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1683 ACTIVECLEAR(fs, cg); 1684 UFS_UNLOCK(ump); 1685 bdwrite(bp); 1686 return (blkno); 1687 } 1688 /* 1689 * check to see if any fragments are already available 1690 * allocsiz is the size which will be allocated, hacking 1691 * it down to a smaller size if necessary 1692 */ 1693 blksfree = cg_blksfree(cgp); 1694 frags = numfrags(fs, size); 1695 for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++) 1696 if (cgp->cg_frsum[allocsiz] != 0) 1697 break; 1698 if (allocsiz == fs->fs_frag) { 1699 /* 1700 * no fragments were available, so a block will be 1701 * allocated, and hacked up 1702 */ 1703 if (cgp->cg_cs.cs_nbfree == 0) 1704 goto fail; 1705 UFS_LOCK(ump); 1706 blkno = ffs_alloccgblk(ip, bp, bpref, rsize); 1707 ACTIVECLEAR(fs, cg); 1708 UFS_UNLOCK(ump); 1709 bdwrite(bp); 1710 return (blkno); 1711 } 1712 KASSERT(size == rsize, 1713 ("ffs_alloccg: size(%d) != rsize(%d)", size, rsize)); 1714 bno = ffs_mapsearch(fs, cgp, bpref, allocsiz); 1715 if (bno < 0) 1716 goto fail; 1717 for (i = 0; i < frags; i++) 1718 clrbit(blksfree, bno + i); 1719 cgp->cg_cs.cs_nffree -= frags; 1720 cgp->cg_frsum[allocsiz]--; 1721 if (frags != allocsiz) 1722 cgp->cg_frsum[allocsiz - frags]++; 1723 UFS_LOCK(ump); 1724 fs->fs_cstotal.cs_nffree -= frags; 1725 fs->fs_cs(fs, cg).cs_nffree -= frags; 1726 fs->fs_fmod = 1; 1727 blkno = cgbase(fs, cg) + bno; 1728 ACTIVECLEAR(fs, cg); 1729 UFS_UNLOCK(ump); 1730 if (DOINGSOFTDEP(ITOV(ip))) 1731 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, frags, 0); 1732 bdwrite(bp); 1733 return (blkno); 1734 1735fail: 1736 brelse(bp); 1737 UFS_LOCK(ump); 1738 return (0); 1739} 1740 1741/* 1742 * Allocate a block in a cylinder group. 1743 * 1744 * This algorithm implements the following policy: 1745 * 1) allocate the requested block. 1746 * 2) allocate a rotationally optimal block in the same cylinder. 1747 * 3) allocate the next available block on the block rotor for the 1748 * specified cylinder group. 1749 * Note that this routine only allocates fs_bsize blocks; these 1750 * blocks may be fragmented by the routine that allocates them. 1751 */ 1752static ufs2_daddr_t 1753ffs_alloccgblk(ip, bp, bpref, size) 1754 struct inode *ip; 1755 struct buf *bp; 1756 ufs2_daddr_t bpref; 1757 int size; 1758{ 1759 struct fs *fs; 1760 struct cg *cgp; 1761 struct ufsmount *ump; 1762 ufs1_daddr_t bno; 1763 ufs2_daddr_t blkno; 1764 u_int8_t *blksfree; 1765 int i, cgbpref; 1766 1767 fs = ip->i_fs; 1768 ump = ip->i_ump; 1769 mtx_assert(UFS_MTX(ump), MA_OWNED); 1770 cgp = (struct cg *)bp->b_data; 1771 blksfree = cg_blksfree(cgp); 1772 if (bpref == 0) { 1773 bpref = cgbase(fs, cgp->cg_cgx) + cgp->cg_rotor + fs->fs_frag; 1774 } else if ((cgbpref = dtog(fs, bpref)) != cgp->cg_cgx) { 1775 /* map bpref to correct zone in this cg */ 1776 if (bpref < cgdata(fs, cgbpref)) 1777 bpref = cgmeta(fs, cgp->cg_cgx); 1778 else 1779 bpref = cgdata(fs, cgp->cg_cgx); 1780 } 1781 /* 1782 * if the requested block is available, use it 1783 */ 1784 bno = dtogd(fs, blknum(fs, bpref)); 1785 if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno))) 1786 goto gotit; 1787 /* 1788 * Take the next available block in this cylinder group. 1789 */ 1790 bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag); 1791 if (bno < 0) 1792 return (0); 1793 /* Update cg_rotor only if allocated from the data zone */ 1794 if (bno >= dtogd(fs, cgdata(fs, cgp->cg_cgx))) 1795 cgp->cg_rotor = bno; 1796gotit: 1797 blkno = fragstoblks(fs, bno); 1798 ffs_clrblock(fs, blksfree, (long)blkno); 1799 ffs_clusteracct(fs, cgp, blkno, -1); 1800 cgp->cg_cs.cs_nbfree--; 1801 fs->fs_cstotal.cs_nbfree--; 1802 fs->fs_cs(fs, cgp->cg_cgx).cs_nbfree--; 1803 fs->fs_fmod = 1; 1804 blkno = cgbase(fs, cgp->cg_cgx) + bno; 1805 /* 1806 * If the caller didn't want the whole block free the frags here. 1807 */ 1808 size = numfrags(fs, size); 1809 if (size != fs->fs_frag) { 1810 bno = dtogd(fs, blkno); 1811 for (i = size; i < fs->fs_frag; i++) 1812 setbit(blksfree, bno + i); 1813 i = fs->fs_frag - size; 1814 cgp->cg_cs.cs_nffree += i; 1815 fs->fs_cstotal.cs_nffree += i; 1816 fs->fs_cs(fs, cgp->cg_cgx).cs_nffree += i; 1817 fs->fs_fmod = 1; 1818 cgp->cg_frsum[i]++; 1819 } 1820 /* XXX Fixme. */ 1821 UFS_UNLOCK(ump); 1822 if (DOINGSOFTDEP(ITOV(ip))) 1823 softdep_setup_blkmapdep(bp, UFSTOVFS(ump), blkno, 1824 size, 0); 1825 UFS_LOCK(ump); 1826 return (blkno); 1827} 1828 1829/* 1830 * Determine whether a cluster can be allocated. 1831 * 1832 * We do not currently check for optimal rotational layout if there 1833 * are multiple choices in the same cylinder group. Instead we just 1834 * take the first one that we find following bpref. 1835 */ 1836static ufs2_daddr_t 1837ffs_clusteralloc(ip, cg, bpref, len) 1838 struct inode *ip; 1839 u_int cg; 1840 ufs2_daddr_t bpref; 1841 int len; 1842{ 1843 struct fs *fs; 1844 struct cg *cgp; 1845 struct buf *bp; 1846 struct ufsmount *ump; 1847 int i, run, bit, map, got; 1848 ufs2_daddr_t bno; 1849 u_char *mapp; 1850 int32_t *lp; 1851 u_int8_t *blksfree; 1852 1853 fs = ip->i_fs; 1854 ump = ip->i_ump; 1855 if (fs->fs_maxcluster[cg] < len) 1856 return (0); 1857 UFS_UNLOCK(ump); 1858 if (bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), (int)fs->fs_cgsize, 1859 NOCRED, &bp)) 1860 goto fail_lock; 1861 cgp = (struct cg *)bp->b_data; 1862 if (!cg_chkmagic(cgp)) 1863 goto fail_lock; 1864 bp->b_xflags |= BX_BKGRDWRITE; 1865 /* 1866 * Check to see if a cluster of the needed size (or bigger) is 1867 * available in this cylinder group. 1868 */ 1869 lp = &cg_clustersum(cgp)[len]; 1870 for (i = len; i <= fs->fs_contigsumsize; i++) 1871 if (*lp++ > 0) 1872 break; 1873 if (i > fs->fs_contigsumsize) { 1874 /* 1875 * This is the first time looking for a cluster in this 1876 * cylinder group. Update the cluster summary information 1877 * to reflect the true maximum sized cluster so that 1878 * future cluster allocation requests can avoid reading 1879 * the cylinder group map only to find no clusters. 1880 */ 1881 lp = &cg_clustersum(cgp)[len - 1]; 1882 for (i = len - 1; i > 0; i--) 1883 if (*lp-- > 0) 1884 break; 1885 UFS_LOCK(ump); 1886 fs->fs_maxcluster[cg] = i; 1887 goto fail; 1888 } 1889 /* 1890 * Search the cluster map to find a big enough cluster. 1891 * We take the first one that we find, even if it is larger 1892 * than we need as we prefer to get one close to the previous 1893 * block allocation. We do not search before the current 1894 * preference point as we do not want to allocate a block 1895 * that is allocated before the previous one (as we will 1896 * then have to wait for another pass of the elevator 1897 * algorithm before it will be read). We prefer to fail and 1898 * be recalled to try an allocation in the next cylinder group. 1899 */ 1900 if (dtog(fs, bpref) != cg) 1901 bpref = cgdata(fs, cg); 1902 else 1903 bpref = blknum(fs, bpref); 1904 bpref = fragstoblks(fs, dtogd(fs, bpref)); 1905 mapp = &cg_clustersfree(cgp)[bpref / NBBY]; 1906 map = *mapp++; 1907 bit = 1 << (bpref % NBBY); 1908 for (run = 0, got = bpref; got < cgp->cg_nclusterblks; got++) { 1909 if ((map & bit) == 0) { 1910 run = 0; 1911 } else { 1912 run++; 1913 if (run == len) 1914 break; 1915 } 1916 if ((got & (NBBY - 1)) != (NBBY - 1)) { 1917 bit <<= 1; 1918 } else { 1919 map = *mapp++; 1920 bit = 1; 1921 } 1922 } 1923 if (got >= cgp->cg_nclusterblks) 1924 goto fail_lock; 1925 /* 1926 * Allocate the cluster that we have found. 1927 */ 1928 blksfree = cg_blksfree(cgp); 1929 for (i = 1; i <= len; i++) 1930 if (!ffs_isblock(fs, blksfree, got - run + i)) 1931 panic("ffs_clusteralloc: map mismatch"); 1932 bno = cgbase(fs, cg) + blkstofrags(fs, got - run + 1); 1933 if (dtog(fs, bno) != cg) 1934 panic("ffs_clusteralloc: allocated out of group"); 1935 len = blkstofrags(fs, len); 1936 UFS_LOCK(ump); 1937 for (i = 0; i < len; i += fs->fs_frag) 1938 if (ffs_alloccgblk(ip, bp, bno + i, fs->fs_bsize) != bno + i) 1939 panic("ffs_clusteralloc: lost block"); 1940 ACTIVECLEAR(fs, cg); 1941 UFS_UNLOCK(ump); 1942 bdwrite(bp); 1943 return (bno); 1944 1945fail_lock: 1946 UFS_LOCK(ump); 1947fail: 1948 brelse(bp); 1949 return (0); 1950} 1951 1952static inline struct buf * 1953getinobuf(struct inode *ip, u_int cg, u_int32_t cginoblk, int gbflags) 1954{ 1955 struct fs *fs; 1956 1957 fs = ip->i_fs; 1958 return (getblk(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, 1959 cg * fs->fs_ipg + cginoblk)), (int)fs->fs_bsize, 0, 0, 1960 gbflags)); 1961} 1962 1963/* 1964 * Determine whether an inode can be allocated. 1965 * 1966 * Check to see if an inode is available, and if it is, 1967 * allocate it using the following policy: 1968 * 1) allocate the requested inode. 1969 * 2) allocate the next available inode after the requested 1970 * inode in the specified cylinder group. 1971 */ 1972static ufs2_daddr_t 1973ffs_nodealloccg(ip, cg, ipref, mode, unused) 1974 struct inode *ip; 1975 u_int cg; 1976 ufs2_daddr_t ipref; 1977 int mode; 1978 int unused; 1979{ 1980 struct fs *fs; 1981 struct cg *cgp; 1982 struct buf *bp, *ibp; 1983 struct ufsmount *ump; 1984 u_int8_t *inosused, *loc; 1985 struct ufs2_dinode *dp2; 1986 int error, start, len, i; 1987 u_int32_t old_initediblk; 1988 1989 fs = ip->i_fs; 1990 ump = ip->i_ump; 1991check_nifree: 1992 if (fs->fs_cs(fs, cg).cs_nifree == 0) 1993 return (0); 1994 UFS_UNLOCK(ump); 1995 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 1996 (int)fs->fs_cgsize, NOCRED, &bp); 1997 if (error) { 1998 brelse(bp); 1999 UFS_LOCK(ump); 2000 return (0); 2001 } 2002 cgp = (struct cg *)bp->b_data; 2003restart: 2004 if (!cg_chkmagic(cgp) || cgp->cg_cs.cs_nifree == 0) { 2005 brelse(bp); 2006 UFS_LOCK(ump); 2007 return (0); 2008 } 2009 bp->b_xflags |= BX_BKGRDWRITE; 2010 inosused = cg_inosused(cgp); 2011 if (ipref) { 2012 ipref %= fs->fs_ipg; 2013 if (isclr(inosused, ipref)) 2014 goto gotit; 2015 } 2016 start = cgp->cg_irotor / NBBY; 2017 len = howmany(fs->fs_ipg - cgp->cg_irotor, NBBY); 2018 loc = memcchr(&inosused[start], 0xff, len); 2019 if (loc == NULL) { 2020 len = start + 1; 2021 start = 0; 2022 loc = memcchr(&inosused[start], 0xff, len); 2023 if (loc == NULL) { 2024 printf("cg = %d, irotor = %ld, fs = %s\n", 2025 cg, (long)cgp->cg_irotor, fs->fs_fsmnt); 2026 panic("ffs_nodealloccg: map corrupted"); 2027 /* NOTREACHED */ 2028 } 2029 } 2030 ipref = (loc - inosused) * NBBY + ffs(~*loc) - 1; 2031gotit: 2032 /* 2033 * Check to see if we need to initialize more inodes. 2034 */ 2035 if (fs->fs_magic == FS_UFS2_MAGIC && 2036 ipref + INOPB(fs) > cgp->cg_initediblk && 2037 cgp->cg_initediblk < cgp->cg_niblk) { 2038 old_initediblk = cgp->cg_initediblk; 2039 2040 /* 2041 * Free the cylinder group lock before writing the 2042 * initialized inode block. Entering the 2043 * babarrierwrite() with the cylinder group lock 2044 * causes lock order violation between the lock and 2045 * snaplk. 2046 * 2047 * Another thread can decide to initialize the same 2048 * inode block, but whichever thread first gets the 2049 * cylinder group lock after writing the newly 2050 * allocated inode block will update it and the other 2051 * will realize that it has lost and leave the 2052 * cylinder group unchanged. 2053 */ 2054 ibp = getinobuf(ip, cg, old_initediblk, GB_LOCK_NOWAIT); 2055 brelse(bp); 2056 if (ibp == NULL) { 2057 /* 2058 * The inode block buffer is already owned by 2059 * another thread, which must initialize it. 2060 * Wait on the buffer to allow another thread 2061 * to finish the updates, with dropped cg 2062 * buffer lock, then retry. 2063 */ 2064 ibp = getinobuf(ip, cg, old_initediblk, 0); 2065 brelse(ibp); 2066 UFS_LOCK(ump); 2067 goto check_nifree; 2068 } 2069 bzero(ibp->b_data, (int)fs->fs_bsize); 2070 dp2 = (struct ufs2_dinode *)(ibp->b_data); 2071 for (i = 0; i < INOPB(fs); i++) { 2072 dp2->di_gen = arc4random() / 2 + 1; 2073 dp2++; 2074 } 2075 /* 2076 * Rather than adding a soft updates dependency to ensure 2077 * that the new inode block is written before it is claimed 2078 * by the cylinder group map, we just do a barrier write 2079 * here. The barrier write will ensure that the inode block 2080 * gets written before the updated cylinder group map can be 2081 * written. The barrier write should only slow down bulk 2082 * loading of newly created filesystems. 2083 */ 2084 babarrierwrite(ibp); 2085 2086 /* 2087 * After the inode block is written, try to update the 2088 * cg initediblk pointer. If another thread beat us 2089 * to it, then leave it unchanged as the other thread 2090 * has already set it correctly. 2091 */ 2092 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, cg)), 2093 (int)fs->fs_cgsize, NOCRED, &bp); 2094 UFS_LOCK(ump); 2095 ACTIVECLEAR(fs, cg); 2096 UFS_UNLOCK(ump); 2097 if (error != 0) { 2098 brelse(bp); 2099 return (error); 2100 } 2101 cgp = (struct cg *)bp->b_data; 2102 if (cgp->cg_initediblk == old_initediblk) 2103 cgp->cg_initediblk += INOPB(fs); 2104 goto restart; 2105 } 2106 cgp->cg_old_time = cgp->cg_time = time_second; 2107 cgp->cg_irotor = ipref; 2108 UFS_LOCK(ump); 2109 ACTIVECLEAR(fs, cg); 2110 setbit(inosused, ipref); 2111 cgp->cg_cs.cs_nifree--; 2112 fs->fs_cstotal.cs_nifree--; 2113 fs->fs_cs(fs, cg).cs_nifree--; 2114 fs->fs_fmod = 1; 2115 if ((mode & IFMT) == IFDIR) { 2116 cgp->cg_cs.cs_ndir++; 2117 fs->fs_cstotal.cs_ndir++; 2118 fs->fs_cs(fs, cg).cs_ndir++; 2119 } 2120 UFS_UNLOCK(ump); 2121 if (DOINGSOFTDEP(ITOV(ip))) 2122 softdep_setup_inomapdep(bp, ip, cg * fs->fs_ipg + ipref, mode); 2123 bdwrite(bp); 2124 return ((ino_t)(cg * fs->fs_ipg + ipref)); 2125} 2126 2127/* 2128 * Free a block or fragment. 2129 * 2130 * The specified block or fragment is placed back in the 2131 * free map. If a fragment is deallocated, a possible 2132 * block reassembly is checked. 2133 */ 2134static void 2135ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd) 2136 struct ufsmount *ump; 2137 struct fs *fs; 2138 struct vnode *devvp; 2139 ufs2_daddr_t bno; 2140 long size; 2141 ino_t inum; 2142 struct workhead *dephd; 2143{ 2144 struct mount *mp; 2145 struct cg *cgp; 2146 struct buf *bp; 2147 ufs1_daddr_t fragno, cgbno; 2148 ufs2_daddr_t cgblkno; 2149 int i, blk, frags, bbase; 2150 u_int cg; 2151 u_int8_t *blksfree; 2152 struct cdev *dev; 2153 2154 cg = dtog(fs, bno); 2155 if (devvp->v_type == VREG) { 2156 /* devvp is a snapshot */ 2157 dev = VTOI(devvp)->i_devvp->v_rdev; 2158 cgblkno = fragstoblks(fs, cgtod(fs, cg)); 2159 } else { 2160 /* devvp is a normal disk device */ 2161 dev = devvp->v_rdev; 2162 cgblkno = fsbtodb(fs, cgtod(fs, cg)); 2163 ASSERT_VOP_LOCKED(devvp, "ffs_blkfree_cg"); 2164 } 2165#ifdef INVARIANTS 2166 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 || 2167 fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) { 2168 printf("dev=%s, bno = %jd, bsize = %ld, size = %ld, fs = %s\n", 2169 devtoname(dev), (intmax_t)bno, (long)fs->fs_bsize, 2170 size, fs->fs_fsmnt); 2171 panic("ffs_blkfree_cg: bad size"); 2172 } 2173#endif 2174 if ((u_int)bno >= fs->fs_size) { 2175 printf("bad block %jd, ino %lu\n", (intmax_t)bno, 2176 (u_long)inum); 2177 ffs_fserr(fs, inum, "bad block"); 2178 return; 2179 } 2180 if (bread(devvp, cgblkno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2181 brelse(bp); 2182 return; 2183 } 2184 cgp = (struct cg *)bp->b_data; 2185 if (!cg_chkmagic(cgp)) { 2186 brelse(bp); 2187 return; 2188 } 2189 bp->b_xflags |= BX_BKGRDWRITE; 2190 cgp->cg_old_time = cgp->cg_time = time_second; 2191 cgbno = dtogd(fs, bno); 2192 blksfree = cg_blksfree(cgp); 2193 UFS_LOCK(ump); 2194 if (size == fs->fs_bsize) { 2195 fragno = fragstoblks(fs, cgbno); 2196 if (!ffs_isfreeblock(fs, blksfree, fragno)) { 2197 if (devvp->v_type == VREG) { 2198 UFS_UNLOCK(ump); 2199 /* devvp is a snapshot */ 2200 brelse(bp); 2201 return; 2202 } 2203 printf("dev = %s, block = %jd, fs = %s\n", 2204 devtoname(dev), (intmax_t)bno, fs->fs_fsmnt); 2205 panic("ffs_blkfree_cg: freeing free block"); 2206 } 2207 ffs_setblock(fs, blksfree, fragno); 2208 ffs_clusteracct(fs, cgp, fragno, 1); 2209 cgp->cg_cs.cs_nbfree++; 2210 fs->fs_cstotal.cs_nbfree++; 2211 fs->fs_cs(fs, cg).cs_nbfree++; 2212 } else { 2213 bbase = cgbno - fragnum(fs, cgbno); 2214 /* 2215 * decrement the counts associated with the old frags 2216 */ 2217 blk = blkmap(fs, blksfree, bbase); 2218 ffs_fragacct(fs, blk, cgp->cg_frsum, -1); 2219 /* 2220 * deallocate the fragment 2221 */ 2222 frags = numfrags(fs, size); 2223 for (i = 0; i < frags; i++) { 2224 if (isset(blksfree, cgbno + i)) { 2225 printf("dev = %s, block = %jd, fs = %s\n", 2226 devtoname(dev), (intmax_t)(bno + i), 2227 fs->fs_fsmnt); 2228 panic("ffs_blkfree_cg: freeing free frag"); 2229 } 2230 setbit(blksfree, cgbno + i); 2231 } 2232 cgp->cg_cs.cs_nffree += i; 2233 fs->fs_cstotal.cs_nffree += i; 2234 fs->fs_cs(fs, cg).cs_nffree += i; 2235 /* 2236 * add back in counts associated with the new frags 2237 */ 2238 blk = blkmap(fs, blksfree, bbase); 2239 ffs_fragacct(fs, blk, cgp->cg_frsum, 1); 2240 /* 2241 * if a complete block has been reassembled, account for it 2242 */ 2243 fragno = fragstoblks(fs, bbase); 2244 if (ffs_isblock(fs, blksfree, fragno)) { 2245 cgp->cg_cs.cs_nffree -= fs->fs_frag; 2246 fs->fs_cstotal.cs_nffree -= fs->fs_frag; 2247 fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag; 2248 ffs_clusteracct(fs, cgp, fragno, 1); 2249 cgp->cg_cs.cs_nbfree++; 2250 fs->fs_cstotal.cs_nbfree++; 2251 fs->fs_cs(fs, cg).cs_nbfree++; 2252 } 2253 } 2254 fs->fs_fmod = 1; 2255 ACTIVECLEAR(fs, cg); 2256 UFS_UNLOCK(ump); 2257 mp = UFSTOVFS(ump); 2258 if (MOUNTEDSOFTDEP(mp) && devvp->v_type != VREG) 2259 softdep_setup_blkfree(UFSTOVFS(ump), bp, bno, 2260 numfrags(fs, size), dephd); 2261 bdwrite(bp); 2262} 2263 2264struct ffs_blkfree_trim_params { 2265 struct task task; 2266 struct ufsmount *ump; 2267 struct vnode *devvp; 2268 ufs2_daddr_t bno; 2269 long size; 2270 ino_t inum; 2271 struct workhead *pdephd; 2272 struct workhead dephd; 2273}; 2274 2275static void 2276ffs_blkfree_trim_task(ctx, pending) 2277 void *ctx; 2278 int pending; 2279{ 2280 struct ffs_blkfree_trim_params *tp; 2281 2282 tp = ctx; 2283 ffs_blkfree_cg(tp->ump, tp->ump->um_fs, tp->devvp, tp->bno, tp->size, 2284 tp->inum, tp->pdephd); 2285 vn_finished_secondary_write(UFSTOVFS(tp->ump)); 2286 atomic_add_int(&tp->ump->um_trim_inflight, -1); 2287 free(tp, M_TEMP); 2288} 2289 2290static void 2291ffs_blkfree_trim_completed(bip) 2292 struct bio *bip; 2293{ 2294 struct ffs_blkfree_trim_params *tp; 2295 2296 tp = bip->bio_caller2; 2297 g_destroy_bio(bip); 2298 TASK_INIT(&tp->task, 0, ffs_blkfree_trim_task, tp); 2299 taskqueue_enqueue(tp->ump->um_trim_tq, &tp->task); 2300} 2301 2302void 2303ffs_blkfree(ump, fs, devvp, bno, size, inum, vtype, dephd) 2304 struct ufsmount *ump; 2305 struct fs *fs; 2306 struct vnode *devvp; 2307 ufs2_daddr_t bno; 2308 long size; 2309 ino_t inum; 2310 enum vtype vtype; 2311 struct workhead *dephd; 2312{ 2313 struct mount *mp; 2314 struct bio *bip; 2315 struct ffs_blkfree_trim_params *tp; 2316 2317 /* 2318 * Check to see if a snapshot wants to claim the block. 2319 * Check that devvp is a normal disk device, not a snapshot, 2320 * it has a snapshot(s) associated with it, and one of the 2321 * snapshots wants to claim the block. 2322 */ 2323 if (devvp->v_type != VREG && 2324 (devvp->v_vflag & VV_COPYONWRITE) && 2325 ffs_snapblkfree(fs, devvp, bno, size, inum, vtype, dephd)) { 2326 return; 2327 } 2328 /* 2329 * Nothing to delay if TRIM is disabled, or the operation is 2330 * performed on the snapshot. 2331 */ 2332 if (!ump->um_candelete || devvp->v_type == VREG) { 2333 ffs_blkfree_cg(ump, fs, devvp, bno, size, inum, dephd); 2334 return; 2335 } 2336 2337 /* 2338 * Postpone the set of the free bit in the cg bitmap until the 2339 * BIO_DELETE is completed. Otherwise, due to disk queue 2340 * reordering, TRIM might be issued after we reuse the block 2341 * and write some new data into it. 2342 */ 2343 atomic_add_int(&ump->um_trim_inflight, 1); 2344 tp = malloc(sizeof(struct ffs_blkfree_trim_params), M_TEMP, M_WAITOK); 2345 tp->ump = ump; 2346 tp->devvp = devvp; 2347 tp->bno = bno; 2348 tp->size = size; 2349 tp->inum = inum; 2350 if (dephd != NULL) { 2351 LIST_INIT(&tp->dephd); 2352 LIST_SWAP(dephd, &tp->dephd, worklist, wk_list); 2353 tp->pdephd = &tp->dephd; 2354 } else 2355 tp->pdephd = NULL; 2356 2357 bip = g_alloc_bio(); 2358 bip->bio_cmd = BIO_DELETE; 2359 bip->bio_offset = dbtob(fsbtodb(fs, bno)); 2360 bip->bio_done = ffs_blkfree_trim_completed; 2361 bip->bio_length = size; 2362 bip->bio_caller2 = tp; 2363 2364 mp = UFSTOVFS(ump); 2365 vn_start_secondary_write(NULL, &mp, 0); 2366 g_io_request(bip, (struct g_consumer *)devvp->v_bufobj.bo_private); 2367} 2368 2369#ifdef INVARIANTS 2370/* 2371 * Verify allocation of a block or fragment. Returns true if block or 2372 * fragment is allocated, false if it is free. 2373 */ 2374static int 2375ffs_checkblk(ip, bno, size) 2376 struct inode *ip; 2377 ufs2_daddr_t bno; 2378 long size; 2379{ 2380 struct fs *fs; 2381 struct cg *cgp; 2382 struct buf *bp; 2383 ufs1_daddr_t cgbno; 2384 int i, error, frags, free; 2385 u_int8_t *blksfree; 2386 2387 fs = ip->i_fs; 2388 if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) { 2389 printf("bsize = %ld, size = %ld, fs = %s\n", 2390 (long)fs->fs_bsize, size, fs->fs_fsmnt); 2391 panic("ffs_checkblk: bad size"); 2392 } 2393 if ((u_int)bno >= fs->fs_size) 2394 panic("ffs_checkblk: bad block %jd", (intmax_t)bno); 2395 error = bread(ip->i_devvp, fsbtodb(fs, cgtod(fs, dtog(fs, bno))), 2396 (int)fs->fs_cgsize, NOCRED, &bp); 2397 if (error) 2398 panic("ffs_checkblk: cg bread failed"); 2399 cgp = (struct cg *)bp->b_data; 2400 if (!cg_chkmagic(cgp)) 2401 panic("ffs_checkblk: cg magic mismatch"); 2402 bp->b_xflags |= BX_BKGRDWRITE; 2403 blksfree = cg_blksfree(cgp); 2404 cgbno = dtogd(fs, bno); 2405 if (size == fs->fs_bsize) { 2406 free = ffs_isblock(fs, blksfree, fragstoblks(fs, cgbno)); 2407 } else { 2408 frags = numfrags(fs, size); 2409 for (free = 0, i = 0; i < frags; i++) 2410 if (isset(blksfree, cgbno + i)) 2411 free++; 2412 if (free != 0 && free != frags) 2413 panic("ffs_checkblk: partially free fragment"); 2414 } 2415 brelse(bp); 2416 return (!free); 2417} 2418#endif /* INVARIANTS */ 2419 2420/* 2421 * Free an inode. 2422 */ 2423int 2424ffs_vfree(pvp, ino, mode) 2425 struct vnode *pvp; 2426 ino_t ino; 2427 int mode; 2428{ 2429 struct inode *ip; 2430 2431 if (DOINGSOFTDEP(pvp)) { 2432 softdep_freefile(pvp, ino, mode); 2433 return (0); 2434 } 2435 ip = VTOI(pvp); 2436 return (ffs_freefile(ip->i_ump, ip->i_fs, ip->i_devvp, ino, mode, 2437 NULL)); 2438} 2439 2440/* 2441 * Do the actual free operation. 2442 * The specified inode is placed back in the free map. 2443 */ 2444int 2445ffs_freefile(ump, fs, devvp, ino, mode, wkhd) 2446 struct ufsmount *ump; 2447 struct fs *fs; 2448 struct vnode *devvp; 2449 ino_t ino; 2450 int mode; 2451 struct workhead *wkhd; 2452{ 2453 struct cg *cgp; 2454 struct buf *bp; 2455 ufs2_daddr_t cgbno; 2456 int error; 2457 u_int cg; 2458 u_int8_t *inosused; 2459 struct cdev *dev; 2460 2461 cg = ino_to_cg(fs, ino); 2462 if (devvp->v_type == VREG) { 2463 /* devvp is a snapshot */ 2464 dev = VTOI(devvp)->i_devvp->v_rdev; 2465 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2466 } else { 2467 /* devvp is a normal disk device */ 2468 dev = devvp->v_rdev; 2469 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2470 } 2471 if (ino >= fs->fs_ipg * fs->fs_ncg) 2472 panic("ffs_freefile: range: dev = %s, ino = %ju, fs = %s", 2473 devtoname(dev), (uintmax_t)ino, fs->fs_fsmnt); 2474 if ((error = bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp))) { 2475 brelse(bp); 2476 return (error); 2477 } 2478 cgp = (struct cg *)bp->b_data; 2479 if (!cg_chkmagic(cgp)) { 2480 brelse(bp); 2481 return (0); 2482 } 2483 bp->b_xflags |= BX_BKGRDWRITE; 2484 cgp->cg_old_time = cgp->cg_time = time_second; 2485 inosused = cg_inosused(cgp); 2486 ino %= fs->fs_ipg; 2487 if (isclr(inosused, ino)) { 2488 printf("dev = %s, ino = %ju, fs = %s\n", devtoname(dev), 2489 (uintmax_t)(ino + cg * fs->fs_ipg), fs->fs_fsmnt); 2490 if (fs->fs_ronly == 0) 2491 panic("ffs_freefile: freeing free inode"); 2492 } 2493 clrbit(inosused, ino); 2494 if (ino < cgp->cg_irotor) 2495 cgp->cg_irotor = ino; 2496 cgp->cg_cs.cs_nifree++; 2497 UFS_LOCK(ump); 2498 fs->fs_cstotal.cs_nifree++; 2499 fs->fs_cs(fs, cg).cs_nifree++; 2500 if ((mode & IFMT) == IFDIR) { 2501 cgp->cg_cs.cs_ndir--; 2502 fs->fs_cstotal.cs_ndir--; 2503 fs->fs_cs(fs, cg).cs_ndir--; 2504 } 2505 fs->fs_fmod = 1; 2506 ACTIVECLEAR(fs, cg); 2507 UFS_UNLOCK(ump); 2508 if (MOUNTEDSOFTDEP(UFSTOVFS(ump)) && devvp->v_type != VREG) 2509 softdep_setup_inofree(UFSTOVFS(ump), bp, 2510 ino + cg * fs->fs_ipg, wkhd); 2511 bdwrite(bp); 2512 return (0); 2513} 2514 2515/* 2516 * Check to see if a file is free. 2517 */ 2518int 2519ffs_checkfreefile(fs, devvp, ino) 2520 struct fs *fs; 2521 struct vnode *devvp; 2522 ino_t ino; 2523{ 2524 struct cg *cgp; 2525 struct buf *bp; 2526 ufs2_daddr_t cgbno; 2527 int ret; 2528 u_int cg; 2529 u_int8_t *inosused; 2530 2531 cg = ino_to_cg(fs, ino); 2532 if (devvp->v_type == VREG) { 2533 /* devvp is a snapshot */ 2534 cgbno = fragstoblks(fs, cgtod(fs, cg)); 2535 } else { 2536 /* devvp is a normal disk device */ 2537 cgbno = fsbtodb(fs, cgtod(fs, cg)); 2538 } 2539 if (ino >= fs->fs_ipg * fs->fs_ncg) 2540 return (1); 2541 if (bread(devvp, cgbno, (int)fs->fs_cgsize, NOCRED, &bp)) { 2542 brelse(bp); 2543 return (1); 2544 } 2545 cgp = (struct cg *)bp->b_data; 2546 if (!cg_chkmagic(cgp)) { 2547 brelse(bp); 2548 return (1); 2549 } 2550 inosused = cg_inosused(cgp); 2551 ino %= fs->fs_ipg; 2552 ret = isclr(inosused, ino); 2553 brelse(bp); 2554 return (ret); 2555} 2556 2557/* 2558 * Find a block of the specified size in the specified cylinder group. 2559 * 2560 * It is a panic if a request is made to find a block if none are 2561 * available. 2562 */ 2563static ufs1_daddr_t 2564ffs_mapsearch(fs, cgp, bpref, allocsiz) 2565 struct fs *fs; 2566 struct cg *cgp; 2567 ufs2_daddr_t bpref; 2568 int allocsiz; 2569{ 2570 ufs1_daddr_t bno; 2571 int start, len, loc, i; 2572 int blk, field, subfield, pos; 2573 u_int8_t *blksfree; 2574 2575 /* 2576 * find the fragment by searching through the free block 2577 * map for an appropriate bit pattern 2578 */ 2579 if (bpref) 2580 start = dtogd(fs, bpref) / NBBY; 2581 else 2582 start = cgp->cg_frotor / NBBY; 2583 blksfree = cg_blksfree(cgp); 2584 len = howmany(fs->fs_fpg, NBBY) - start; 2585 loc = scanc((u_int)len, (u_char *)&blksfree[start], 2586 fragtbl[fs->fs_frag], 2587 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2588 if (loc == 0) { 2589 len = start + 1; 2590 start = 0; 2591 loc = scanc((u_int)len, (u_char *)&blksfree[0], 2592 fragtbl[fs->fs_frag], 2593 (u_char)(1 << (allocsiz - 1 + (fs->fs_frag % NBBY)))); 2594 if (loc == 0) { 2595 printf("start = %d, len = %d, fs = %s\n", 2596 start, len, fs->fs_fsmnt); 2597 panic("ffs_alloccg: map corrupted"); 2598 /* NOTREACHED */ 2599 } 2600 } 2601 bno = (start + len - loc) * NBBY; 2602 cgp->cg_frotor = bno; 2603 /* 2604 * found the byte in the map 2605 * sift through the bits to find the selected frag 2606 */ 2607 for (i = bno + NBBY; bno < i; bno += fs->fs_frag) { 2608 blk = blkmap(fs, blksfree, bno); 2609 blk <<= 1; 2610 field = around[allocsiz]; 2611 subfield = inside[allocsiz]; 2612 for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) { 2613 if ((blk & field) == subfield) 2614 return (bno + pos); 2615 field <<= 1; 2616 subfield <<= 1; 2617 } 2618 } 2619 printf("bno = %lu, fs = %s\n", (u_long)bno, fs->fs_fsmnt); 2620 panic("ffs_alloccg: block not in map"); 2621 return (-1); 2622} 2623 2624/* 2625 * Fserr prints the name of a filesystem with an error diagnostic. 2626 * 2627 * The form of the error message is: 2628 * fs: error message 2629 */ 2630void 2631ffs_fserr(fs, inum, cp) 2632 struct fs *fs; 2633 ino_t inum; 2634 char *cp; 2635{ 2636 struct thread *td = curthread; /* XXX */ 2637 struct proc *p = td->td_proc; 2638 2639 log(LOG_ERR, "pid %d (%s), uid %d inumber %ju on %s: %s\n", 2640 p->p_pid, p->p_comm, td->td_ucred->cr_uid, (uintmax_t)inum, 2641 fs->fs_fsmnt, cp); 2642} 2643 2644/* 2645 * This function provides the capability for the fsck program to 2646 * update an active filesystem. Fourteen operations are provided: 2647 * 2648 * adjrefcnt(inode, amt) - adjusts the reference count on the 2649 * specified inode by the specified amount. Under normal 2650 * operation the count should always go down. Decrementing 2651 * the count to zero will cause the inode to be freed. 2652 * adjblkcnt(inode, amt) - adjust the number of blocks used by the 2653 * inode by the specified amount. 2654 * adjndir, adjbfree, adjifree, adjffree, adjnumclusters(amt) - 2655 * adjust the superblock summary. 2656 * freedirs(inode, count) - directory inodes [inode..inode + count - 1] 2657 * are marked as free. Inodes should never have to be marked 2658 * as in use. 2659 * freefiles(inode, count) - file inodes [inode..inode + count - 1] 2660 * are marked as free. Inodes should never have to be marked 2661 * as in use. 2662 * freeblks(blockno, size) - blocks [blockno..blockno + size - 1] 2663 * are marked as free. Blocks should never have to be marked 2664 * as in use. 2665 * setflags(flags, set/clear) - the fs_flags field has the specified 2666 * flags set (second parameter +1) or cleared (second parameter -1). 2667 * setcwd(dirinode) - set the current directory to dirinode in the 2668 * filesystem associated with the snapshot. 2669 * setdotdot(oldvalue, newvalue) - Verify that the inode number for ".." 2670 * in the current directory is oldvalue then change it to newvalue. 2671 * unlink(nameptr, oldvalue) - Verify that the inode number associated 2672 * with nameptr in the current directory is oldvalue then unlink it. 2673 * 2674 * The following functions may only be used on a quiescent filesystem 2675 * by the soft updates journal. They are not safe to be run on an active 2676 * filesystem. 2677 * 2678 * setinode(inode, dip) - the specified disk inode is replaced with the 2679 * contents pointed to by dip. 2680 * setbufoutput(fd, flags) - output associated with the specified file 2681 * descriptor (which must reference the character device supporting 2682 * the filesystem) switches from using physio to running through the 2683 * buffer cache when flags is set to 1. The descriptor reverts to 2684 * physio for output when flags is set to zero. 2685 */ 2686 2687static int sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS); 2688 2689SYSCTL_PROC(_vfs_ffs, FFS_ADJ_REFCNT, adjrefcnt, CTLFLAG_WR|CTLTYPE_STRUCT, 2690 0, 0, sysctl_ffs_fsck, "S,fsck", "Adjust Inode Reference Count"); 2691 2692static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_BLKCNT, adjblkcnt, CTLFLAG_WR, 2693 sysctl_ffs_fsck, "Adjust Inode Used Blocks Count"); 2694 2695static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NDIR, adjndir, CTLFLAG_WR, 2696 sysctl_ffs_fsck, "Adjust number of directories"); 2697 2698static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NBFREE, adjnbfree, CTLFLAG_WR, 2699 sysctl_ffs_fsck, "Adjust number of free blocks"); 2700 2701static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NIFREE, adjnifree, CTLFLAG_WR, 2702 sysctl_ffs_fsck, "Adjust number of free inodes"); 2703 2704static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NFFREE, adjnffree, CTLFLAG_WR, 2705 sysctl_ffs_fsck, "Adjust number of free frags"); 2706 2707static SYSCTL_NODE(_vfs_ffs, FFS_ADJ_NUMCLUSTERS, adjnumclusters, CTLFLAG_WR, 2708 sysctl_ffs_fsck, "Adjust number of free clusters"); 2709 2710static SYSCTL_NODE(_vfs_ffs, FFS_DIR_FREE, freedirs, CTLFLAG_WR, 2711 sysctl_ffs_fsck, "Free Range of Directory Inodes"); 2712 2713static SYSCTL_NODE(_vfs_ffs, FFS_FILE_FREE, freefiles, CTLFLAG_WR, 2714 sysctl_ffs_fsck, "Free Range of File Inodes"); 2715 2716static SYSCTL_NODE(_vfs_ffs, FFS_BLK_FREE, freeblks, CTLFLAG_WR, 2717 sysctl_ffs_fsck, "Free Range of Blocks"); 2718 2719static SYSCTL_NODE(_vfs_ffs, FFS_SET_FLAGS, setflags, CTLFLAG_WR, 2720 sysctl_ffs_fsck, "Change Filesystem Flags"); 2721 2722static SYSCTL_NODE(_vfs_ffs, FFS_SET_CWD, setcwd, CTLFLAG_WR, 2723 sysctl_ffs_fsck, "Set Current Working Directory"); 2724 2725static SYSCTL_NODE(_vfs_ffs, FFS_SET_DOTDOT, setdotdot, CTLFLAG_WR, 2726 sysctl_ffs_fsck, "Change Value of .. Entry"); 2727 2728static SYSCTL_NODE(_vfs_ffs, FFS_UNLINK, unlink, CTLFLAG_WR, 2729 sysctl_ffs_fsck, "Unlink a Duplicate Name"); 2730 2731static SYSCTL_NODE(_vfs_ffs, FFS_SET_INODE, setinode, CTLFLAG_WR, 2732 sysctl_ffs_fsck, "Update an On-Disk Inode"); 2733 2734static SYSCTL_NODE(_vfs_ffs, FFS_SET_BUFOUTPUT, setbufoutput, CTLFLAG_WR, 2735 sysctl_ffs_fsck, "Set Buffered Writing for Descriptor"); 2736 2737#define DEBUG 1 2738#ifdef DEBUG 2739static int fsckcmds = 0; 2740SYSCTL_INT(_debug, OID_AUTO, fsckcmds, CTLFLAG_RW, &fsckcmds, 0, ""); 2741#endif /* DEBUG */ 2742 2743static int buffered_write(struct file *, struct uio *, struct ucred *, 2744 int, struct thread *); 2745 2746static int 2747sysctl_ffs_fsck(SYSCTL_HANDLER_ARGS) 2748{ 2749 struct thread *td = curthread; 2750 struct fsck_cmd cmd; 2751 struct ufsmount *ump; 2752 struct vnode *vp, *vpold, *dvp, *fdvp; 2753 struct inode *ip, *dp; 2754 struct mount *mp; 2755 struct fs *fs; 2756 ufs2_daddr_t blkno; 2757 long blkcnt, blksize; 2758 struct filedesc *fdp; 2759 struct file *fp, *vfp; 2760 cap_rights_t rights; 2761 int filetype, error; 2762 static struct fileops *origops, bufferedops; 2763 2764 if (req->newlen > sizeof cmd) 2765 return (EBADRPC); 2766 if ((error = SYSCTL_IN(req, &cmd, sizeof cmd)) != 0) 2767 return (error); 2768 if (cmd.version != FFS_CMD_VERSION) 2769 return (ERPCMISMATCH); 2770 if ((error = getvnode(td->td_proc->p_fd, cmd.handle, 2771 cap_rights_init(&rights, CAP_FSCK), &fp)) != 0) 2772 return (error); 2773 vp = fp->f_data; 2774 if (vp->v_type != VREG && vp->v_type != VDIR) { 2775 fdrop(fp, td); 2776 return (EINVAL); 2777 } 2778 vn_start_write(vp, &mp, V_WAIT); 2779 if (mp == NULL || 2780 strncmp(mp->mnt_stat.f_fstypename, "ufs", MFSNAMELEN)) { 2781 vn_finished_write(mp); 2782 fdrop(fp, td); 2783 return (EINVAL); 2784 } 2785 ump = VFSTOUFS(mp); 2786 if ((mp->mnt_flag & MNT_RDONLY) && 2787 ump->um_fsckpid != td->td_proc->p_pid) { 2788 vn_finished_write(mp); 2789 fdrop(fp, td); 2790 return (EROFS); 2791 } 2792 fs = ump->um_fs; 2793 filetype = IFREG; 2794 2795 switch (oidp->oid_number) { 2796 2797 case FFS_SET_FLAGS: 2798#ifdef DEBUG 2799 if (fsckcmds) 2800 printf("%s: %s flags\n", mp->mnt_stat.f_mntonname, 2801 cmd.size > 0 ? "set" : "clear"); 2802#endif /* DEBUG */ 2803 if (cmd.size > 0) 2804 fs->fs_flags |= (long)cmd.value; 2805 else 2806 fs->fs_flags &= ~(long)cmd.value; 2807 break; 2808 2809 case FFS_ADJ_REFCNT: 2810#ifdef DEBUG 2811 if (fsckcmds) { 2812 printf("%s: adjust inode %jd link count by %jd\n", 2813 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2814 (intmax_t)cmd.size); 2815 } 2816#endif /* DEBUG */ 2817 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2818 break; 2819 ip = VTOI(vp); 2820 ip->i_nlink += cmd.size; 2821 DIP_SET(ip, i_nlink, ip->i_nlink); 2822 ip->i_effnlink += cmd.size; 2823 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2824 error = ffs_update(vp, 1); 2825 if (DOINGSOFTDEP(vp)) 2826 softdep_change_linkcnt(ip); 2827 vput(vp); 2828 break; 2829 2830 case FFS_ADJ_BLKCNT: 2831#ifdef DEBUG 2832 if (fsckcmds) { 2833 printf("%s: adjust inode %jd block count by %jd\n", 2834 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2835 (intmax_t)cmd.size); 2836 } 2837#endif /* DEBUG */ 2838 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 2839 break; 2840 ip = VTOI(vp); 2841 DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + cmd.size); 2842 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 2843 error = ffs_update(vp, 1); 2844 vput(vp); 2845 break; 2846 2847 case FFS_DIR_FREE: 2848 filetype = IFDIR; 2849 /* fall through */ 2850 2851 case FFS_FILE_FREE: 2852#ifdef DEBUG 2853 if (fsckcmds) { 2854 if (cmd.size == 1) 2855 printf("%s: free %s inode %ju\n", 2856 mp->mnt_stat.f_mntonname, 2857 filetype == IFDIR ? "directory" : "file", 2858 (uintmax_t)cmd.value); 2859 else 2860 printf("%s: free %s inodes %ju-%ju\n", 2861 mp->mnt_stat.f_mntonname, 2862 filetype == IFDIR ? "directory" : "file", 2863 (uintmax_t)cmd.value, 2864 (uintmax_t)(cmd.value + cmd.size - 1)); 2865 } 2866#endif /* DEBUG */ 2867 while (cmd.size > 0) { 2868 if ((error = ffs_freefile(ump, fs, ump->um_devvp, 2869 cmd.value, filetype, NULL))) 2870 break; 2871 cmd.size -= 1; 2872 cmd.value += 1; 2873 } 2874 break; 2875 2876 case FFS_BLK_FREE: 2877#ifdef DEBUG 2878 if (fsckcmds) { 2879 if (cmd.size == 1) 2880 printf("%s: free block %jd\n", 2881 mp->mnt_stat.f_mntonname, 2882 (intmax_t)cmd.value); 2883 else 2884 printf("%s: free blocks %jd-%jd\n", 2885 mp->mnt_stat.f_mntonname, 2886 (intmax_t)cmd.value, 2887 (intmax_t)cmd.value + cmd.size - 1); 2888 } 2889#endif /* DEBUG */ 2890 blkno = cmd.value; 2891 blkcnt = cmd.size; 2892 blksize = fs->fs_frag - (blkno % fs->fs_frag); 2893 while (blkcnt > 0) { 2894 if (blksize > blkcnt) 2895 blksize = blkcnt; 2896 ffs_blkfree(ump, fs, ump->um_devvp, blkno, 2897 blksize * fs->fs_fsize, ROOTINO, VDIR, NULL); 2898 blkno += blksize; 2899 blkcnt -= blksize; 2900 blksize = fs->fs_frag; 2901 } 2902 break; 2903 2904 /* 2905 * Adjust superblock summaries. fsck(8) is expected to 2906 * submit deltas when necessary. 2907 */ 2908 case FFS_ADJ_NDIR: 2909#ifdef DEBUG 2910 if (fsckcmds) { 2911 printf("%s: adjust number of directories by %jd\n", 2912 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2913 } 2914#endif /* DEBUG */ 2915 fs->fs_cstotal.cs_ndir += cmd.value; 2916 break; 2917 2918 case FFS_ADJ_NBFREE: 2919#ifdef DEBUG 2920 if (fsckcmds) { 2921 printf("%s: adjust number of free blocks by %+jd\n", 2922 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2923 } 2924#endif /* DEBUG */ 2925 fs->fs_cstotal.cs_nbfree += cmd.value; 2926 break; 2927 2928 case FFS_ADJ_NIFREE: 2929#ifdef DEBUG 2930 if (fsckcmds) { 2931 printf("%s: adjust number of free inodes by %+jd\n", 2932 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2933 } 2934#endif /* DEBUG */ 2935 fs->fs_cstotal.cs_nifree += cmd.value; 2936 break; 2937 2938 case FFS_ADJ_NFFREE: 2939#ifdef DEBUG 2940 if (fsckcmds) { 2941 printf("%s: adjust number of free frags by %+jd\n", 2942 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2943 } 2944#endif /* DEBUG */ 2945 fs->fs_cstotal.cs_nffree += cmd.value; 2946 break; 2947 2948 case FFS_ADJ_NUMCLUSTERS: 2949#ifdef DEBUG 2950 if (fsckcmds) { 2951 printf("%s: adjust number of free clusters by %+jd\n", 2952 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2953 } 2954#endif /* DEBUG */ 2955 fs->fs_cstotal.cs_numclusters += cmd.value; 2956 break; 2957 2958 case FFS_SET_CWD: 2959#ifdef DEBUG 2960 if (fsckcmds) { 2961 printf("%s: set current directory to inode %jd\n", 2962 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 2963 } 2964#endif /* DEBUG */ 2965 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_SHARED, &vp))) 2966 break; 2967 AUDIT_ARG_VNODE1(vp); 2968 if ((error = change_dir(vp, td)) != 0) { 2969 vput(vp); 2970 break; 2971 } 2972 VOP_UNLOCK(vp, 0); 2973 fdp = td->td_proc->p_fd; 2974 FILEDESC_XLOCK(fdp); 2975 vpold = fdp->fd_cdir; 2976 fdp->fd_cdir = vp; 2977 FILEDESC_XUNLOCK(fdp); 2978 vrele(vpold); 2979 break; 2980 2981 case FFS_SET_DOTDOT: 2982#ifdef DEBUG 2983 if (fsckcmds) { 2984 printf("%s: change .. in cwd from %jd to %jd\n", 2985 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value, 2986 (intmax_t)cmd.size); 2987 } 2988#endif /* DEBUG */ 2989 /* 2990 * First we have to get and lock the parent directory 2991 * to which ".." points. 2992 */ 2993 error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &fdvp); 2994 if (error) 2995 break; 2996 /* 2997 * Now we get and lock the child directory containing "..". 2998 */ 2999 FILEDESC_SLOCK(td->td_proc->p_fd); 3000 dvp = td->td_proc->p_fd->fd_cdir; 3001 FILEDESC_SUNLOCK(td->td_proc->p_fd); 3002 if ((error = vget(dvp, LK_EXCLUSIVE, td)) != 0) { 3003 vput(fdvp); 3004 break; 3005 } 3006 dp = VTOI(dvp); 3007 dp->i_offset = 12; /* XXX mastertemplate.dot_reclen */ 3008 error = ufs_dirrewrite(dp, VTOI(fdvp), (ino_t)cmd.size, 3009 DT_DIR, 0); 3010 cache_purge(fdvp); 3011 cache_purge(dvp); 3012 vput(dvp); 3013 vput(fdvp); 3014 break; 3015 3016 case FFS_UNLINK: 3017#ifdef DEBUG 3018 if (fsckcmds) { 3019 char buf[32]; 3020 3021 if (copyinstr((char *)(intptr_t)cmd.value, buf,32,NULL)) 3022 strncpy(buf, "Name_too_long", 32); 3023 printf("%s: unlink %s (inode %jd)\n", 3024 mp->mnt_stat.f_mntonname, buf, (intmax_t)cmd.size); 3025 } 3026#endif /* DEBUG */ 3027 /* 3028 * kern_unlinkat will do its own start/finish writes and 3029 * they do not nest, so drop ours here. Setting mp == NULL 3030 * indicates that vn_finished_write is not needed down below. 3031 */ 3032 vn_finished_write(mp); 3033 mp = NULL; 3034 error = kern_unlinkat(td, AT_FDCWD, (char *)(intptr_t)cmd.value, 3035 UIO_USERSPACE, (ino_t)cmd.size); 3036 break; 3037 3038 case FFS_SET_INODE: 3039 if (ump->um_fsckpid != td->td_proc->p_pid) { 3040 error = EPERM; 3041 break; 3042 } 3043#ifdef DEBUG 3044 if (fsckcmds) { 3045 printf("%s: update inode %jd\n", 3046 mp->mnt_stat.f_mntonname, (intmax_t)cmd.value); 3047 } 3048#endif /* DEBUG */ 3049 if ((error = ffs_vget(mp, (ino_t)cmd.value, LK_EXCLUSIVE, &vp))) 3050 break; 3051 AUDIT_ARG_VNODE1(vp); 3052 ip = VTOI(vp); 3053 if (ip->i_ump->um_fstype == UFS1) 3054 error = copyin((void *)(intptr_t)cmd.size, ip->i_din1, 3055 sizeof(struct ufs1_dinode)); 3056 else 3057 error = copyin((void *)(intptr_t)cmd.size, ip->i_din2, 3058 sizeof(struct ufs2_dinode)); 3059 if (error) { 3060 vput(vp); 3061 break; 3062 } 3063 ip->i_flag |= IN_CHANGE | IN_MODIFIED; 3064 error = ffs_update(vp, 1); 3065 vput(vp); 3066 break; 3067 3068 case FFS_SET_BUFOUTPUT: 3069 if (ump->um_fsckpid != td->td_proc->p_pid) { 3070 error = EPERM; 3071 break; 3072 } 3073 if (VTOI(vp)->i_ump != ump) { 3074 error = EINVAL; 3075 break; 3076 } 3077#ifdef DEBUG 3078 if (fsckcmds) { 3079 printf("%s: %s buffered output for descriptor %jd\n", 3080 mp->mnt_stat.f_mntonname, 3081 cmd.size == 1 ? "enable" : "disable", 3082 (intmax_t)cmd.value); 3083 } 3084#endif /* DEBUG */ 3085 if ((error = getvnode(td->td_proc->p_fd, cmd.value, 3086 cap_rights_init(&rights, CAP_FSCK), &vfp)) != 0) 3087 break; 3088 if (vfp->f_vnode->v_type != VCHR) { 3089 fdrop(vfp, td); 3090 error = EINVAL; 3091 break; 3092 } 3093 if (origops == NULL) { 3094 origops = vfp->f_ops; 3095 bcopy((void *)origops, (void *)&bufferedops, 3096 sizeof(bufferedops)); 3097 bufferedops.fo_write = buffered_write; 3098 } 3099 if (cmd.size == 1) 3100 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 3101 (uintptr_t)&bufferedops); 3102 else 3103 atomic_store_rel_ptr((volatile uintptr_t *)&vfp->f_ops, 3104 (uintptr_t)origops); 3105 fdrop(vfp, td); 3106 break; 3107 3108 default: 3109#ifdef DEBUG 3110 if (fsckcmds) { 3111 printf("Invalid request %d from fsck\n", 3112 oidp->oid_number); 3113 } 3114#endif /* DEBUG */ 3115 error = EINVAL; 3116 break; 3117 3118 } 3119 fdrop(fp, td); 3120 vn_finished_write(mp); 3121 return (error); 3122} 3123 3124/* 3125 * Function to switch a descriptor to use the buffer cache to stage 3126 * its I/O. This is needed so that writes to the filesystem device 3127 * will give snapshots a chance to copy modified blocks for which it 3128 * needs to retain copies. 3129 */ 3130static int 3131buffered_write(fp, uio, active_cred, flags, td) 3132 struct file *fp; 3133 struct uio *uio; 3134 struct ucred *active_cred; 3135 int flags; 3136 struct thread *td; 3137{ 3138 struct vnode *devvp, *vp; 3139 struct inode *ip; 3140 struct buf *bp; 3141 struct fs *fs; 3142 struct filedesc *fdp; 3143 int error; 3144 daddr_t lbn; 3145 3146 /* 3147 * The devvp is associated with the /dev filesystem. To discover 3148 * the filesystem with which the device is associated, we depend 3149 * on the application setting the current directory to a location 3150 * within the filesystem being written. Yes, this is an ugly hack. 3151 */ 3152 devvp = fp->f_vnode; 3153 if (!vn_isdisk(devvp, NULL)) 3154 return (EINVAL); 3155 fdp = td->td_proc->p_fd; 3156 FILEDESC_SLOCK(fdp); 3157 vp = fdp->fd_cdir; 3158 vref(vp); 3159 FILEDESC_SUNLOCK(fdp); 3160 vn_lock(vp, LK_SHARED | LK_RETRY); 3161 /* 3162 * Check that the current directory vnode indeed belongs to 3163 * UFS before trying to dereference UFS-specific v_data fields. 3164 */ 3165 if (vp->v_op != &ffs_vnodeops1 && vp->v_op != &ffs_vnodeops2) { 3166 vput(vp); 3167 return (EINVAL); 3168 } 3169 ip = VTOI(vp); 3170 if (ip->i_devvp != devvp) { 3171 vput(vp); 3172 return (EINVAL); 3173 } 3174 fs = ip->i_fs; 3175 vput(vp); 3176 foffset_lock_uio(fp, uio, flags); 3177 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); 3178#ifdef DEBUG 3179 if (fsckcmds) { 3180 printf("%s: buffered write for block %jd\n", 3181 fs->fs_fsmnt, (intmax_t)btodb(uio->uio_offset)); 3182 } 3183#endif /* DEBUG */ 3184 /* 3185 * All I/O must be contained within a filesystem block, start on 3186 * a fragment boundary, and be a multiple of fragments in length. 3187 */ 3188 if (uio->uio_resid > fs->fs_bsize - (uio->uio_offset % fs->fs_bsize) || 3189 fragoff(fs, uio->uio_offset) != 0 || 3190 fragoff(fs, uio->uio_resid) != 0) { 3191 error = EINVAL; 3192 goto out; 3193 } 3194 lbn = numfrags(fs, uio->uio_offset); 3195 bp = getblk(devvp, lbn, uio->uio_resid, 0, 0, 0); 3196 bp->b_flags |= B_RELBUF; 3197 if ((error = uiomove((char *)bp->b_data, uio->uio_resid, uio)) != 0) { 3198 brelse(bp); 3199 goto out; 3200 } 3201 error = bwrite(bp); 3202out: 3203 VOP_UNLOCK(devvp, 0); 3204 foffset_unlock_uio(fp, uio, flags | FOF_NEXTOFF); 3205 return (error); 3206} 3207