vfs_bio.c revision 40726
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.181 1998/10/28 13:36:59 dg Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 */ 27 28#define VMIO 29#include <sys/param.h> 30#include <sys/systm.h> 31#include <sys/sysproto.h> 32#include <sys/kernel.h> 33#include <sys/sysctl.h> 34#include <sys/proc.h> 35#include <sys/vnode.h> 36#include <sys/vmmeter.h> 37#include <sys/lock.h> 38#include <miscfs/specfs/specdev.h> 39#include <vm/vm.h> 40#include <vm/vm_param.h> 41#include <vm/vm_prot.h> 42#include <vm/vm_kern.h> 43#include <vm/vm_pageout.h> 44#include <vm/vm_page.h> 45#include <vm/vm_object.h> 46#include <vm/vm_extern.h> 47#include <vm/vm_map.h> 48#include <sys/buf.h> 49#include <sys/mount.h> 50#include <sys/malloc.h> 51#include <sys/resourcevar.h> 52 53static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 54 55struct bio_ops bioops; /* I/O operation notification */ 56 57#if 0 /* replaced bu sched_sync */ 58static void vfs_update __P((void)); 59static struct proc *updateproc; 60static struct kproc_desc up_kp = { 61 "update", 62 vfs_update, 63 &updateproc 64}; 65SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 66#endif 67 68struct buf *buf; /* buffer header pool */ 69struct swqueue bswlist; 70 71static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 72 vm_offset_t to); 73static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 74 vm_offset_t to); 75static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 76 vm_offset_t off, vm_offset_t size, 77 vm_page_t m); 78static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 79 int pageno, vm_page_t m); 80static void vfs_clean_pages(struct buf * bp); 81static void vfs_setdirty(struct buf *bp); 82static void vfs_vmio_release(struct buf *bp); 83static void flushdirtybuffers(int slpflag, int slptimeo); 84 85int needsbuffer; 86 87/* 88 * Internal update daemon, process 3 89 * The variable vfs_update_wakeup allows for internal syncs. 90 */ 91int vfs_update_wakeup; 92 93 94/* 95 * buffers base kva 96 */ 97 98/* 99 * bogus page -- for I/O to/from partially complete buffers 100 * this is a temporary solution to the problem, but it is not 101 * really that bad. it would be better to split the buffer 102 * for input in the case of buffers partially already in memory, 103 * but the code is intricate enough already. 104 */ 105vm_page_t bogus_page; 106static vm_offset_t bogus_offset; 107 108static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 109 bufmallocspace, maxbufmallocspace; 110int numdirtybuffers; 111static int lodirtybuffers, hidirtybuffers; 112static int numfreebuffers, lofreebuffers, hifreebuffers; 113static int kvafreespace; 114 115SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 116 &numdirtybuffers, 0, ""); 117SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 118 &lodirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 120 &hidirtybuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 122 &numfreebuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 124 &lofreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 126 &hifreebuffers, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 128 &maxbufspace, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 130 &bufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 132 &maxvmiobufspace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 134 &vmiospace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 136 &maxbufmallocspace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 138 &bufmallocspace, 0, ""); 139SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 140 &kvafreespace, 0, ""); 141 142static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 143struct bqueues bufqueues[BUFFER_QUEUES] = {0}; 144 145extern int vm_swap_size; 146 147#define BUF_MAXUSE 24 148 149#define VFS_BIO_NEED_ANY 1 150#define VFS_BIO_NEED_LOWLIMIT 2 151#define VFS_BIO_NEED_FREE 4 152 153/* 154 * Initialize buffer headers and related structures. 155 */ 156void 157bufinit() 158{ 159 struct buf *bp; 160 int i; 161 162 TAILQ_INIT(&bswlist); 163 LIST_INIT(&invalhash); 164 165 /* first, make a null hash table */ 166 for (i = 0; i < BUFHSZ; i++) 167 LIST_INIT(&bufhashtbl[i]); 168 169 /* next, make a null set of free lists */ 170 for (i = 0; i < BUFFER_QUEUES; i++) 171 TAILQ_INIT(&bufqueues[i]); 172 173 /* finally, initialize each buffer header and stick on empty q */ 174 for (i = 0; i < nbuf; i++) { 175 bp = &buf[i]; 176 bzero(bp, sizeof *bp); 177 bp->b_flags = B_INVAL; /* we're just an empty header */ 178 bp->b_dev = NODEV; 179 bp->b_rcred = NOCRED; 180 bp->b_wcred = NOCRED; 181 bp->b_qindex = QUEUE_EMPTY; 182 bp->b_vnbufs.le_next = NOLIST; 183 LIST_INIT(&bp->b_dep); 184 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 185 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 186 } 187/* 188 * maxbufspace is currently calculated to support all filesystem blocks 189 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 190 * cache is still the same as it would be for 8K filesystems. This 191 * keeps the size of the buffer cache "in check" for big block filesystems. 192 */ 193 maxbufspace = (nbuf + 8) * DFLTBSIZE; 194/* 195 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 196 */ 197 maxvmiobufspace = 2 * maxbufspace / 3; 198/* 199 * Limit the amount of malloc memory since it is wired permanently into 200 * the kernel space. Even though this is accounted for in the buffer 201 * allocation, we don't want the malloced region to grow uncontrolled. 202 * The malloc scheme improves memory utilization significantly on average 203 * (small) directories. 204 */ 205 maxbufmallocspace = maxbufspace / 20; 206 207/* 208 * Remove the probability of deadlock conditions by limiting the 209 * number of dirty buffers. 210 */ 211 hidirtybuffers = nbuf / 8 + 20; 212 lodirtybuffers = nbuf / 16 + 10; 213 numdirtybuffers = 0; 214 lofreebuffers = nbuf / 18 + 5; 215 hifreebuffers = 2 * lofreebuffers; 216 numfreebuffers = nbuf; 217 kvafreespace = 0; 218 219 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 220 bogus_page = vm_page_alloc(kernel_object, 221 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 222 VM_ALLOC_NORMAL); 223 224} 225 226/* 227 * Free the kva allocation for a buffer 228 * Must be called only at splbio or higher, 229 * as this is the only locking for buffer_map. 230 */ 231static void 232bfreekva(struct buf * bp) 233{ 234 if (bp->b_kvasize == 0) 235 return; 236 237 vm_map_delete(buffer_map, 238 (vm_offset_t) bp->b_kvabase, 239 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 240 241 bp->b_kvasize = 0; 242 243} 244 245/* 246 * remove the buffer from the appropriate free list 247 */ 248void 249bremfree(struct buf * bp) 250{ 251 int s = splbio(); 252 253 if (bp->b_qindex != QUEUE_NONE) { 254 if (bp->b_qindex == QUEUE_EMPTY) { 255 kvafreespace -= bp->b_kvasize; 256 } 257 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 258 bp->b_qindex = QUEUE_NONE; 259 } else { 260#if !defined(MAX_PERF) 261 panic("bremfree: removing a buffer when not on a queue"); 262#endif 263 } 264 if ((bp->b_flags & B_INVAL) || 265 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 266 --numfreebuffers; 267 splx(s); 268} 269 270 271/* 272 * Get a buffer with the specified data. Look in the cache first. 273 */ 274int 275bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 276 struct buf ** bpp) 277{ 278 struct buf *bp; 279 280 bp = getblk(vp, blkno, size, 0, 0); 281 *bpp = bp; 282 283 /* if not found in cache, do some I/O */ 284 if ((bp->b_flags & B_CACHE) == 0) { 285 if (curproc != NULL) 286 curproc->p_stats->p_ru.ru_inblock++; 287 bp->b_flags |= B_READ; 288 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 289 if (bp->b_rcred == NOCRED) { 290 if (cred != NOCRED) 291 crhold(cred); 292 bp->b_rcred = cred; 293 } 294 vfs_busy_pages(bp, 0); 295 VOP_STRATEGY(vp, bp); 296 return (biowait(bp)); 297 } 298 return (0); 299} 300 301/* 302 * Operates like bread, but also starts asynchronous I/O on 303 * read-ahead blocks. 304 */ 305int 306breadn(struct vnode * vp, daddr_t blkno, int size, 307 daddr_t * rablkno, int *rabsize, 308 int cnt, struct ucred * cred, struct buf ** bpp) 309{ 310 struct buf *bp, *rabp; 311 int i; 312 int rv = 0, readwait = 0; 313 314 *bpp = bp = getblk(vp, blkno, size, 0, 0); 315 316 /* if not found in cache, do some I/O */ 317 if ((bp->b_flags & B_CACHE) == 0) { 318 if (curproc != NULL) 319 curproc->p_stats->p_ru.ru_inblock++; 320 bp->b_flags |= B_READ; 321 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 322 if (bp->b_rcred == NOCRED) { 323 if (cred != NOCRED) 324 crhold(cred); 325 bp->b_rcred = cred; 326 } 327 vfs_busy_pages(bp, 0); 328 VOP_STRATEGY(vp, bp); 329 ++readwait; 330 } 331 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 332 if (inmem(vp, *rablkno)) 333 continue; 334 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 335 336 if ((rabp->b_flags & B_CACHE) == 0) { 337 if (curproc != NULL) 338 curproc->p_stats->p_ru.ru_inblock++; 339 rabp->b_flags |= B_READ | B_ASYNC; 340 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 341 if (rabp->b_rcred == NOCRED) { 342 if (cred != NOCRED) 343 crhold(cred); 344 rabp->b_rcred = cred; 345 } 346 vfs_busy_pages(rabp, 0); 347 VOP_STRATEGY(vp, rabp); 348 } else { 349 brelse(rabp); 350 } 351 } 352 353 if (readwait) { 354 rv = biowait(bp); 355 } 356 return (rv); 357} 358 359/* 360 * Write, release buffer on completion. (Done by iodone 361 * if async.) 362 */ 363int 364bwrite(struct buf * bp) 365{ 366 int oldflags, s; 367 struct vnode *vp; 368 struct mount *mp; 369 370 371 if (bp->b_flags & B_INVAL) { 372 brelse(bp); 373 return (0); 374 } 375 376 oldflags = bp->b_flags; 377 378#if !defined(MAX_PERF) 379 if ((bp->b_flags & B_BUSY) == 0) 380 panic("bwrite: buffer is not busy???"); 381#endif 382 383 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 384 bp->b_flags |= B_WRITEINPROG; 385 386 s = splbio(); 387 if ((oldflags & B_DELWRI) == B_DELWRI) { 388 --numdirtybuffers; 389 reassignbuf(bp, bp->b_vp); 390 } 391 392 bp->b_vp->v_numoutput++; 393 vfs_busy_pages(bp, 1); 394 if (curproc != NULL) 395 curproc->p_stats->p_ru.ru_oublock++; 396 splx(s); 397 VOP_STRATEGY(bp->b_vp, bp); 398 399 /* 400 * Collect statistics on synchronous and asynchronous writes. 401 * Writes to block devices are charged to their associated 402 * filesystem (if any). 403 */ 404 if ((vp = bp->b_vp) != NULL) { 405 if (vp->v_type == VBLK) 406 mp = vp->v_specmountpoint; 407 else 408 mp = vp->v_mount; 409 if (mp != NULL) 410 if ((oldflags & B_ASYNC) == 0) 411 mp->mnt_stat.f_syncwrites++; 412 else 413 mp->mnt_stat.f_asyncwrites++; 414 } 415 416 if ((oldflags & B_ASYNC) == 0) { 417 int rtval = biowait(bp); 418 brelse(bp); 419 return (rtval); 420 } 421 return (0); 422} 423 424__inline void 425vfs_bio_need_satisfy(void) { 426 ++numfreebuffers; 427 if (!needsbuffer) 428 return; 429 if (numdirtybuffers < lodirtybuffers) { 430 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 431 } else { 432 needsbuffer &= ~VFS_BIO_NEED_ANY; 433 } 434 if (numfreebuffers >= hifreebuffers) { 435 needsbuffer &= ~VFS_BIO_NEED_FREE; 436 } 437 wakeup(&needsbuffer); 438} 439 440/* 441 * Delayed write. (Buffer is marked dirty). 442 */ 443void 444bdwrite(struct buf * bp) 445{ 446 struct vnode *vp; 447 448#if !defined(MAX_PERF) 449 if ((bp->b_flags & B_BUSY) == 0) { 450 panic("bdwrite: buffer is not busy"); 451 } 452#endif 453 454 if (bp->b_flags & B_INVAL) { 455 brelse(bp); 456 return; 457 } 458 bp->b_flags &= ~(B_READ|B_RELBUF); 459 if ((bp->b_flags & B_DELWRI) == 0) { 460 bp->b_flags |= B_DONE | B_DELWRI; 461 reassignbuf(bp, bp->b_vp); 462 ++numdirtybuffers; 463 } 464 465 /* 466 * This bmap keeps the system from needing to do the bmap later, 467 * perhaps when the system is attempting to do a sync. Since it 468 * is likely that the indirect block -- or whatever other datastructure 469 * that the filesystem needs is still in memory now, it is a good 470 * thing to do this. Note also, that if the pageout daemon is 471 * requesting a sync -- there might not be enough memory to do 472 * the bmap then... So, this is important to do. 473 */ 474 if (bp->b_lblkno == bp->b_blkno) { 475 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 476 } 477 478 /* 479 * Set the *dirty* buffer range based upon the VM system dirty pages. 480 */ 481 vfs_setdirty(bp); 482 483 /* 484 * We need to do this here to satisfy the vnode_pager and the 485 * pageout daemon, so that it thinks that the pages have been 486 * "cleaned". Note that since the pages are in a delayed write 487 * buffer -- the VFS layer "will" see that the pages get written 488 * out on the next sync, or perhaps the cluster will be completed. 489 */ 490 vfs_clean_pages(bp); 491 bqrelse(bp); 492 493 /* 494 * XXX The soft dependency code is not prepared to 495 * have I/O done when a bdwrite is requested. For 496 * now we just let the write be delayed if it is 497 * requested by the soft dependency code. 498 */ 499 if ((vp = bp->b_vp) && 500 (vp->v_type == VBLK && vp->v_specmountpoint && 501 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 502 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))) 503 return; 504 505 if (numdirtybuffers >= hidirtybuffers) 506 flushdirtybuffers(0, 0); 507 508 return; 509} 510 511 512/* 513 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 514 * Check how this compares with vfs_setdirty(); XXX [JRE] 515 */ 516void 517bdirty(bp) 518 struct buf *bp; 519{ 520 521 bp->b_flags &= ~(B_READ|B_RELBUF); /* XXX ??? check this */ 522 if ((bp->b_flags & B_DELWRI) == 0) { 523 bp->b_flags |= B_DONE | B_DELWRI; /* why done? XXX JRE */ 524 reassignbuf(bp, bp->b_vp); 525 ++numdirtybuffers; 526 } 527} 528 529/* 530 * Asynchronous write. 531 * Start output on a buffer, but do not wait for it to complete. 532 * The buffer is released when the output completes. 533 */ 534void 535bawrite(struct buf * bp) 536{ 537 bp->b_flags |= B_ASYNC; 538 (void) VOP_BWRITE(bp); 539} 540 541/* 542 * Ordered write. 543 * Start output on a buffer, and flag it so that the device will write 544 * it in the order it was queued. The buffer is released when the output 545 * completes. 546 */ 547int 548bowrite(struct buf * bp) 549{ 550 bp->b_flags |= B_ORDERED|B_ASYNC; 551 return (VOP_BWRITE(bp)); 552} 553 554/* 555 * Release a buffer. 556 */ 557void 558brelse(struct buf * bp) 559{ 560 int s; 561 562 if (bp->b_flags & B_CLUSTER) { 563 relpbuf(bp); 564 return; 565 } 566 567 s = splbio(); 568 569 /* anyone need this block? */ 570 if (bp->b_flags & B_WANTED) { 571 bp->b_flags &= ~(B_WANTED | B_AGE); 572 wakeup(bp); 573 } 574 575 if (bp->b_flags & B_LOCKED) 576 bp->b_flags &= ~B_ERROR; 577 578 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_FREEBUF)) || 579 (bp->b_bufsize <= 0)) { 580 bp->b_flags |= B_INVAL; 581 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 582 (*bioops.io_deallocate)(bp); 583 if (bp->b_flags & B_DELWRI) 584 --numdirtybuffers; 585 bp->b_flags &= ~(B_DELWRI | B_CACHE | B_FREEBUF); 586 if ((bp->b_flags & B_VMIO) == 0) { 587 if (bp->b_bufsize) 588 allocbuf(bp, 0); 589 if (bp->b_vp) 590 brelvp(bp); 591 } 592 } 593 594 /* 595 * We must clear B_RELBUF if B_DELWRI is set. If vfs_vmio_release() 596 * is called with B_DELWRI set, the underlying pages may wind up 597 * getting freed causing a previous write (bdwrite()) to get 'lost' 598 * because pages associated with a B_DELWRI bp are marked clean. 599 * 600 * We still allow the B_INVAL case to call vfs_vmio_release(), even 601 * if B_DELWRI is set. 602 */ 603 604 if (bp->b_flags & B_DELWRI) 605 bp->b_flags &= ~B_RELBUF; 606 607 /* 608 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 609 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 610 * but the VM object is kept around. The B_NOCACHE flag is used to 611 * invalidate the pages in the VM object. 612 * 613 * If the buffer is a partially filled NFS buffer, keep it 614 * since invalidating it now will lose informatio. The valid 615 * flags in the vm_pages have only DEV_BSIZE resolution but 616 * the b_validoff, b_validend fields have byte resolution. 617 * This can avoid unnecessary re-reads of the buffer. 618 * XXX this seems to cause performance problems. 619 */ 620 if ((bp->b_flags & B_VMIO) 621 && !(bp->b_vp->v_tag == VT_NFS && 622 bp->b_vp->v_type != VBLK && 623 (bp->b_flags & B_DELWRI) != 0) 624#ifdef notdef 625 && (bp->b_vp->v_tag != VT_NFS 626 || bp->b_vp->v_type == VBLK 627 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 628 || bp->b_validend == 0 629 || (bp->b_validoff == 0 630 && bp->b_validend == bp->b_bufsize)) 631#endif 632 ) { 633 634 int i, j, resid; 635 vm_page_t m; 636 off_t foff; 637 vm_pindex_t poff; 638 vm_object_t obj; 639 struct vnode *vp; 640 641 vp = bp->b_vp; 642 643 resid = bp->b_bufsize; 644 foff = bp->b_offset; 645 646 for (i = 0; i < bp->b_npages; i++) { 647 m = bp->b_pages[i]; 648 vm_page_flag_clear(m, PG_ZERO); 649 if (m == bogus_page) { 650 651 obj = (vm_object_t) vp->v_object; 652 poff = OFF_TO_IDX(bp->b_offset); 653 654 for (j = i; j < bp->b_npages; j++) { 655 m = bp->b_pages[j]; 656 if (m == bogus_page) { 657 m = vm_page_lookup(obj, poff + j); 658#if !defined(MAX_PERF) 659 if (!m) { 660 panic("brelse: page missing\n"); 661 } 662#endif 663 bp->b_pages[j] = m; 664 } 665 } 666 667 if ((bp->b_flags & B_INVAL) == 0) { 668 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 669 } 670 } 671 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 672 int poffset = foff & PAGE_MASK; 673 int presid = resid > (PAGE_SIZE - poffset) ? 674 (PAGE_SIZE - poffset) : resid; 675 vm_page_set_invalid(m, poffset, presid); 676 } 677 resid -= PAGE_SIZE; 678 } 679 680 if (bp->b_flags & (B_INVAL | B_RELBUF)) 681 vfs_vmio_release(bp); 682 683 } else if (bp->b_flags & B_VMIO) { 684 685 if (bp->b_flags & (B_INVAL | B_RELBUF)) 686 vfs_vmio_release(bp); 687 688 } 689 690#if !defined(MAX_PERF) 691 if (bp->b_qindex != QUEUE_NONE) 692 panic("brelse: free buffer onto another queue???"); 693#endif 694 695 /* enqueue */ 696 /* buffers with no memory */ 697 if (bp->b_bufsize == 0) { 698 bp->b_flags |= B_INVAL; 699 bp->b_qindex = QUEUE_EMPTY; 700 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 701 LIST_REMOVE(bp, b_hash); 702 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 703 bp->b_dev = NODEV; 704 kvafreespace += bp->b_kvasize; 705 706 /* buffers with junk contents */ 707 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 708 bp->b_flags |= B_INVAL; 709 bp->b_qindex = QUEUE_AGE; 710 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 711 LIST_REMOVE(bp, b_hash); 712 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 713 bp->b_dev = NODEV; 714 715 /* buffers that are locked */ 716 } else if (bp->b_flags & B_LOCKED) { 717 bp->b_qindex = QUEUE_LOCKED; 718 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 719 720 /* buffers with stale but valid contents */ 721 } else if (bp->b_flags & B_AGE) { 722 bp->b_qindex = QUEUE_AGE; 723 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 724 725 /* buffers with valid and quite potentially reuseable contents */ 726 } else { 727 bp->b_qindex = QUEUE_LRU; 728 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 729 } 730 731 if ((bp->b_flags & B_INVAL) || 732 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 733 if (bp->b_flags & B_DELWRI) { 734 --numdirtybuffers; 735 bp->b_flags &= ~B_DELWRI; 736 } 737 vfs_bio_need_satisfy(); 738 } 739 740 /* unlock */ 741 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 742 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 743 splx(s); 744} 745 746/* 747 * Release a buffer. 748 */ 749void 750bqrelse(struct buf * bp) 751{ 752 int s; 753 754 s = splbio(); 755 756 /* anyone need this block? */ 757 if (bp->b_flags & B_WANTED) { 758 bp->b_flags &= ~(B_WANTED | B_AGE); 759 wakeup(bp); 760 } 761 762#if !defined(MAX_PERF) 763 if (bp->b_qindex != QUEUE_NONE) 764 panic("bqrelse: free buffer onto another queue???"); 765#endif 766 767 if (bp->b_flags & B_LOCKED) { 768 bp->b_flags &= ~B_ERROR; 769 bp->b_qindex = QUEUE_LOCKED; 770 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 771 /* buffers with stale but valid contents */ 772 } else { 773 bp->b_qindex = QUEUE_LRU; 774 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 775 } 776 777 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 778 vfs_bio_need_satisfy(); 779 } 780 781 /* unlock */ 782 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 783 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 784 splx(s); 785} 786 787static void 788vfs_vmio_release(bp) 789 struct buf *bp; 790{ 791 int i; 792 vm_page_t m; 793 794 for (i = 0; i < bp->b_npages; i++) { 795 m = bp->b_pages[i]; 796 bp->b_pages[i] = NULL; 797 vm_page_unwire(m, (bp->b_flags & B_ASYNC) == 0 ? 0 : 1); 798 799 /* 800 * We don't mess with busy pages, it is 801 * the responsibility of the process that 802 * busied the pages to deal with them. 803 */ 804 if ((m->flags & PG_BUSY) || (m->busy != 0)) 805 continue; 806 807 if (m->wire_count == 0) { 808 809 vm_page_flag_clear(m, PG_ZERO); 810 /* 811 * If this is an async free -- we cannot place 812 * pages onto the cache queue. If it is an 813 * async free, then we don't modify any queues. 814 * This is probably in error (for perf reasons), 815 * and we will eventually need to build 816 * a more complete infrastructure to support I/O 817 * rundown. 818 */ 819 if ((bp->b_flags & B_ASYNC) == 0) { 820 821 /* 822 * In the case of sync buffer frees, we can do pretty much 823 * anything to any of the memory queues. Specifically, 824 * the cache queue is okay to be modified. 825 */ 826 if (m->valid) { 827 if(m->dirty == 0) 828 vm_page_test_dirty(m); 829 /* 830 * this keeps pressure off of the process memory 831 */ 832 if (m->dirty == 0 && m->hold_count == 0) 833 vm_page_cache(m); 834 } else if (m->hold_count == 0) { 835 vm_page_busy(m); 836 vm_page_protect(m, VM_PROT_NONE); 837 vm_page_free(m); 838 } 839 } else { 840 /* 841 * If async, then at least we clear the 842 * act_count. 843 */ 844 m->act_count = 0; 845 } 846 } 847 } 848 bufspace -= bp->b_bufsize; 849 vmiospace -= bp->b_bufsize; 850 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 851 bp->b_npages = 0; 852 bp->b_bufsize = 0; 853 bp->b_flags &= ~B_VMIO; 854 if (bp->b_vp) 855 brelvp(bp); 856} 857 858/* 859 * Check to see if a block is currently memory resident. 860 */ 861struct buf * 862gbincore(struct vnode * vp, daddr_t blkno) 863{ 864 struct buf *bp; 865 struct bufhashhdr *bh; 866 867 bh = BUFHASH(vp, blkno); 868 bp = bh->lh_first; 869 870 /* Search hash chain */ 871 while (bp != NULL) { 872 /* hit */ 873 if (bp->b_vp == vp && bp->b_lblkno == blkno && 874 (bp->b_flags & B_INVAL) == 0) { 875 break; 876 } 877 bp = bp->b_hash.le_next; 878 } 879 return (bp); 880} 881 882/* 883 * this routine implements clustered async writes for 884 * clearing out B_DELWRI buffers... This is much better 885 * than the old way of writing only one buffer at a time. 886 */ 887int 888vfs_bio_awrite(struct buf * bp) 889{ 890 int i; 891 daddr_t lblkno = bp->b_lblkno; 892 struct vnode *vp = bp->b_vp; 893 int s; 894 int ncl; 895 struct buf *bpa; 896 int nwritten; 897 int size; 898 int maxcl; 899 900 s = splbio(); 901 /* 902 * right now we support clustered writing only to regular files 903 */ 904 if ((vp->v_type == VREG) && 905 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 906 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 907 908 size = vp->v_mount->mnt_stat.f_iosize; 909 maxcl = MAXPHYS / size; 910 911 for (i = 1; i < maxcl; i++) { 912 if ((bpa = gbincore(vp, lblkno + i)) && 913 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 914 (B_DELWRI | B_CLUSTEROK)) && 915 (bpa->b_bufsize == size)) { 916 if ((bpa->b_blkno == bpa->b_lblkno) || 917 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 918 break; 919 } else { 920 break; 921 } 922 } 923 ncl = i; 924 /* 925 * this is a possible cluster write 926 */ 927 if (ncl != 1) { 928 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 929 splx(s); 930 return nwritten; 931 } 932 } 933 934 bremfree(bp); 935 bp->b_flags |= B_BUSY | B_ASYNC; 936 937 splx(s); 938 /* 939 * default (old) behavior, writing out only one block 940 */ 941 nwritten = bp->b_bufsize; 942 (void) VOP_BWRITE(bp); 943 return nwritten; 944} 945 946 947/* 948 * Find a buffer header which is available for use. 949 */ 950static struct buf * 951getnewbuf(struct vnode *vp, daddr_t blkno, 952 int slpflag, int slptimeo, int size, int maxsize) 953{ 954 struct buf *bp, *bp1; 955 int nbyteswritten = 0; 956 vm_offset_t addr; 957 static int writerecursion = 0; 958 959start: 960 if (bufspace >= maxbufspace) 961 goto trytofreespace; 962 963 /* can we constitute a new buffer? */ 964 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 965#if !defined(MAX_PERF) 966 if (bp->b_qindex != QUEUE_EMPTY) 967 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 968 bp->b_qindex); 969#endif 970 bp->b_flags |= B_BUSY; 971 bremfree(bp); 972 goto fillbuf; 973 } 974trytofreespace: 975 /* 976 * We keep the file I/O from hogging metadata I/O 977 * This is desirable because file data is cached in the 978 * VM/Buffer cache even if a buffer is freed. 979 */ 980 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 981#if !defined(MAX_PERF) 982 if (bp->b_qindex != QUEUE_AGE) 983 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 984 bp->b_qindex); 985#endif 986 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 987#if !defined(MAX_PERF) 988 if (bp->b_qindex != QUEUE_LRU) 989 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 990 bp->b_qindex); 991#endif 992 } 993 if (!bp) { 994 /* wait for a free buffer of any kind */ 995 needsbuffer |= VFS_BIO_NEED_ANY; 996 do 997 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 998 slptimeo); 999 while (needsbuffer & VFS_BIO_NEED_ANY); 1000 return (0); 1001 } 1002 1003#if defined(DIAGNOSTIC) 1004 if (bp->b_flags & B_BUSY) { 1005 panic("getnewbuf: busy buffer on free list\n"); 1006 } 1007#endif 1008 1009 /* 1010 * We are fairly aggressive about freeing VMIO buffers, but since 1011 * the buffering is intact without buffer headers, there is not 1012 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 1013 */ 1014 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 1015 if ((bp->b_flags & B_VMIO) == 0 || 1016 (vmiospace < maxvmiobufspace)) { 1017 --bp->b_usecount; 1018 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1019 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1020 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1021 goto start; 1022 } 1023 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1024 } 1025 } 1026 1027 1028 /* if we are a delayed write, convert to an async write */ 1029 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1030 1031 /* 1032 * If our delayed write is likely to be used soon, then 1033 * recycle back onto the LRU queue. 1034 */ 1035 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 1036 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 1037 1038 if (bp->b_usecount > 0) { 1039 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 1040 1041 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1042 1043 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1044 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1045 bp->b_usecount--; 1046 goto start; 1047 } 1048 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1049 } 1050 } 1051 } 1052 1053 /* 1054 * Certain layered filesystems can recursively re-enter the vfs_bio 1055 * code, due to delayed writes. This helps keep the system from 1056 * deadlocking. 1057 */ 1058 if (writerecursion > 0) { 1059 if (writerecursion > 5) { 1060 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1061 while (bp) { 1062 if ((bp->b_flags & B_DELWRI) == 0) 1063 break; 1064 bp = TAILQ_NEXT(bp, b_freelist); 1065 } 1066 if (bp == NULL) { 1067 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1068 while (bp) { 1069 if ((bp->b_flags & B_DELWRI) == 0) 1070 break; 1071 bp = TAILQ_NEXT(bp, b_freelist); 1072 } 1073 } 1074 if (bp == NULL) 1075 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1076 } else { 1077 bremfree(bp); 1078 bp->b_flags |= B_BUSY | B_AGE | B_ASYNC; 1079 nbyteswritten += bp->b_bufsize; 1080 ++writerecursion; 1081 VOP_BWRITE(bp); 1082 --writerecursion; 1083 if (!slpflag && !slptimeo) { 1084 return (0); 1085 } 1086 goto start; 1087 } 1088 } else { 1089 ++writerecursion; 1090 nbyteswritten += vfs_bio_awrite(bp); 1091 --writerecursion; 1092 if (!slpflag && !slptimeo) { 1093 return (0); 1094 } 1095 goto start; 1096 } 1097 } 1098 1099 if (bp->b_flags & B_WANTED) { 1100 bp->b_flags &= ~B_WANTED; 1101 wakeup(bp); 1102 } 1103 bremfree(bp); 1104 bp->b_flags |= B_BUSY; 1105 1106 if (bp->b_flags & B_VMIO) { 1107 bp->b_flags &= ~B_ASYNC; 1108 vfs_vmio_release(bp); 1109 } 1110 1111 if (bp->b_vp) 1112 brelvp(bp); 1113 1114fillbuf: 1115 1116 /* we are not free, nor do we contain interesting data */ 1117 if (bp->b_rcred != NOCRED) { 1118 crfree(bp->b_rcred); 1119 bp->b_rcred = NOCRED; 1120 } 1121 if (bp->b_wcred != NOCRED) { 1122 crfree(bp->b_wcred); 1123 bp->b_wcred = NOCRED; 1124 } 1125 if (LIST_FIRST(&bp->b_dep) != NULL && 1126 bioops.io_deallocate) 1127 (*bioops.io_deallocate)(bp); 1128 1129 LIST_REMOVE(bp, b_hash); 1130 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1131 if (bp->b_bufsize) { 1132 allocbuf(bp, 0); 1133 } 1134 bp->b_flags = B_BUSY; 1135 bp->b_dev = NODEV; 1136 bp->b_vp = NULL; 1137 bp->b_blkno = bp->b_lblkno = 0; 1138 bp->b_offset = NOOFFSET; 1139 bp->b_iodone = 0; 1140 bp->b_error = 0; 1141 bp->b_resid = 0; 1142 bp->b_bcount = 0; 1143 bp->b_npages = 0; 1144 bp->b_dirtyoff = bp->b_dirtyend = 0; 1145 bp->b_validoff = bp->b_validend = 0; 1146 bp->b_usecount = 5; 1147 /* Here, not kern_physio.c, is where this should be done*/ 1148 LIST_INIT(&bp->b_dep); 1149 1150 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1151 1152 /* 1153 * we assume that buffer_map is not at address 0 1154 */ 1155 addr = 0; 1156 if (maxsize != bp->b_kvasize) { 1157 bfreekva(bp); 1158 1159findkvaspace: 1160 /* 1161 * See if we have buffer kva space 1162 */ 1163 if (vm_map_findspace(buffer_map, 1164 vm_map_min(buffer_map), maxsize, &addr)) { 1165 if (kvafreespace > 0) { 1166 int totfree = 0, freed; 1167 do { 1168 freed = 0; 1169 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1170 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1171 if (bp1->b_kvasize != 0) { 1172 totfree += bp1->b_kvasize; 1173 freed = bp1->b_kvasize; 1174 bremfree(bp1); 1175 bfreekva(bp1); 1176 brelse(bp1); 1177 break; 1178 } 1179 } 1180 } while (freed); 1181 /* 1182 * if we found free space, then retry with the same buffer. 1183 */ 1184 if (totfree) 1185 goto findkvaspace; 1186 } 1187 bp->b_flags |= B_INVAL; 1188 brelse(bp); 1189 goto trytofreespace; 1190 } 1191 } 1192 1193 /* 1194 * See if we are below are allocated minimum 1195 */ 1196 if (bufspace >= (maxbufspace + nbyteswritten)) { 1197 bp->b_flags |= B_INVAL; 1198 brelse(bp); 1199 goto trytofreespace; 1200 } 1201 1202 /* 1203 * create a map entry for the buffer -- in essence 1204 * reserving the kva space. 1205 */ 1206 if (addr) { 1207 vm_map_insert(buffer_map, NULL, 0, 1208 addr, addr + maxsize, 1209 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1210 1211 bp->b_kvabase = (caddr_t) addr; 1212 bp->b_kvasize = maxsize; 1213 } 1214 bp->b_data = bp->b_kvabase; 1215 1216 return (bp); 1217} 1218 1219static void 1220waitfreebuffers(int slpflag, int slptimeo) { 1221 while (numfreebuffers < hifreebuffers) { 1222 flushdirtybuffers(slpflag, slptimeo); 1223 if (numfreebuffers < hifreebuffers) 1224 break; 1225 needsbuffer |= VFS_BIO_NEED_FREE; 1226 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1227 break; 1228 } 1229} 1230 1231static void 1232flushdirtybuffers(int slpflag, int slptimeo) { 1233 int s; 1234 static pid_t flushing = 0; 1235 1236 s = splbio(); 1237 1238 if (flushing) { 1239 if (flushing == curproc->p_pid) { 1240 splx(s); 1241 return; 1242 } 1243 while (flushing) { 1244 if (tsleep(&flushing, (PRIBIO + 4)|slpflag, "biofls", slptimeo)) { 1245 splx(s); 1246 return; 1247 } 1248 } 1249 } 1250 flushing = curproc->p_pid; 1251 1252 while (numdirtybuffers > lodirtybuffers) { 1253 struct buf *bp; 1254 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1255 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1256 if (bp == NULL) 1257 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1258 1259 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1260 bp = TAILQ_NEXT(bp, b_freelist); 1261 } 1262 1263 if (bp) { 1264 vfs_bio_awrite(bp); 1265 continue; 1266 } 1267 break; 1268 } 1269 1270 flushing = 0; 1271 wakeup(&flushing); 1272 splx(s); 1273} 1274 1275/* 1276 * Check to see if a block is currently memory resident. 1277 */ 1278struct buf * 1279incore(struct vnode * vp, daddr_t blkno) 1280{ 1281 struct buf *bp; 1282 1283 int s = splbio(); 1284 bp = gbincore(vp, blkno); 1285 splx(s); 1286 return (bp); 1287} 1288 1289/* 1290 * Returns true if no I/O is needed to access the 1291 * associated VM object. This is like incore except 1292 * it also hunts around in the VM system for the data. 1293 */ 1294 1295int 1296inmem(struct vnode * vp, daddr_t blkno) 1297{ 1298 vm_object_t obj; 1299 vm_offset_t toff, tinc; 1300 vm_page_t m; 1301 vm_ooffset_t off; 1302 1303 if (incore(vp, blkno)) 1304 return 1; 1305 if (vp->v_mount == NULL) 1306 return 0; 1307 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1308 return 0; 1309 1310 obj = vp->v_object; 1311 tinc = PAGE_SIZE; 1312 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1313 tinc = vp->v_mount->mnt_stat.f_iosize; 1314 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1315 1316 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1317 1318 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1319 if (!m) 1320 return 0; 1321 if (vm_page_is_valid(m, 1322 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1323 return 0; 1324 } 1325 return 1; 1326} 1327 1328/* 1329 * now we set the dirty range for the buffer -- 1330 * for NFS -- if the file is mapped and pages have 1331 * been written to, let it know. We want the 1332 * entire range of the buffer to be marked dirty if 1333 * any of the pages have been written to for consistancy 1334 * with the b_validoff, b_validend set in the nfs write 1335 * code, and used by the nfs read code. 1336 */ 1337static void 1338vfs_setdirty(struct buf *bp) { 1339 int i; 1340 vm_object_t object; 1341 vm_offset_t boffset, offset; 1342 /* 1343 * We qualify the scan for modified pages on whether the 1344 * object has been flushed yet. The OBJ_WRITEABLE flag 1345 * is not cleared simply by protecting pages off. 1346 */ 1347 if ((bp->b_flags & B_VMIO) && 1348 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1349 /* 1350 * test the pages to see if they have been modified directly 1351 * by users through the VM system. 1352 */ 1353 for (i = 0; i < bp->b_npages; i++) { 1354 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 1355 vm_page_test_dirty(bp->b_pages[i]); 1356 } 1357 1358 /* 1359 * scan forwards for the first page modified 1360 */ 1361 for (i = 0; i < bp->b_npages; i++) { 1362 if (bp->b_pages[i]->dirty) { 1363 break; 1364 } 1365 } 1366 boffset = (i << PAGE_SHIFT); 1367 if (boffset < bp->b_dirtyoff) { 1368 bp->b_dirtyoff = boffset; 1369 } 1370 1371 /* 1372 * scan backwards for the last page modified 1373 */ 1374 for (i = bp->b_npages - 1; i >= 0; --i) { 1375 if (bp->b_pages[i]->dirty) { 1376 break; 1377 } 1378 } 1379 boffset = (i + 1); 1380 offset = boffset + bp->b_pages[0]->pindex; 1381 if (offset >= object->size) 1382 boffset = object->size - bp->b_pages[0]->pindex; 1383 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1384 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1385 } 1386} 1387 1388/* 1389 * Get a block given a specified block and offset into a file/device. 1390 */ 1391struct buf * 1392getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1393{ 1394 struct buf *bp; 1395 int i, s; 1396 struct bufhashhdr *bh; 1397 int maxsize; 1398 int checksize; 1399 1400 if (vp->v_mount) { 1401 maxsize = vp->v_mount->mnt_stat.f_iosize; 1402 /* 1403 * This happens on mount points. 1404 */ 1405 if (maxsize < size) 1406 maxsize = size; 1407 } else { 1408 maxsize = size; 1409 } 1410 1411#if !defined(MAX_PERF) 1412 if (size > MAXBSIZE) 1413 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1414#endif 1415 1416 s = splbio(); 1417loop: 1418 if (numfreebuffers < lofreebuffers) { 1419 waitfreebuffers(slpflag, slptimeo); 1420 } 1421 1422 if ((bp = gbincore(vp, blkno))) { 1423 if (bp->b_flags & B_BUSY) { 1424 1425 bp->b_flags |= B_WANTED; 1426 if (bp->b_usecount < BUF_MAXUSE) 1427 ++bp->b_usecount; 1428 1429 if (!tsleep(bp, 1430 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) { 1431 goto loop; 1432 } 1433 1434 splx(s); 1435 return (struct buf *) NULL; 1436 } 1437 bp->b_flags |= B_BUSY | B_CACHE; 1438 bremfree(bp); 1439 1440 /* 1441 * check for size inconsistancies (note that they shouldn't 1442 * happen but do when filesystems don't handle the size changes 1443 * correctly.) We are conservative on metadata and don't just 1444 * extend the buffer but write (if needed) and re-constitute it. 1445 */ 1446 1447 if (bp->b_bcount != size) { 1448 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1449 allocbuf(bp, size); 1450 } else { 1451 if (bp->b_flags & B_DELWRI) { 1452 bp->b_flags |= B_NOCACHE; 1453 VOP_BWRITE(bp); 1454 } else { 1455 if ((bp->b_flags & B_VMIO) && 1456 (LIST_FIRST(&bp->b_dep) == NULL)) { 1457 bp->b_flags |= B_RELBUF; 1458 brelse(bp); 1459 } else { 1460 bp->b_flags |= B_NOCACHE; 1461 VOP_BWRITE(bp); 1462 } 1463 } 1464 goto loop; 1465 } 1466 } 1467 1468#ifdef DIAGNOSTIC 1469 if (bp->b_offset == NOOFFSET) 1470 panic("getblk: no buffer offset"); 1471#endif 1472 1473 /* 1474 * Check that the constituted buffer really deserves for the 1475 * B_CACHE bit to be set. B_VMIO type buffers might not 1476 * contain fully valid pages. Normal (old-style) buffers 1477 * should be fully valid. 1478 */ 1479 if (bp->b_flags & B_VMIO) { 1480 checksize = bp->b_bufsize; 1481 for (i = 0; i < bp->b_npages; i++) { 1482 int resid; 1483 int poffset; 1484 poffset = bp->b_offset & PAGE_MASK; 1485 resid = (checksize > (PAGE_SIZE - poffset)) ? 1486 (PAGE_SIZE - poffset) : checksize; 1487 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) { 1488 bp->b_flags &= ~(B_CACHE | B_DONE); 1489 break; 1490 } 1491 checksize -= resid; 1492 } 1493 } 1494 1495 if (bp->b_usecount < BUF_MAXUSE) 1496 ++bp->b_usecount; 1497 splx(s); 1498 return (bp); 1499 } else { 1500 vm_object_t obj; 1501 1502 if ((bp = getnewbuf(vp, blkno, 1503 slpflag, slptimeo, size, maxsize)) == 0) { 1504 if (slpflag || slptimeo) { 1505 splx(s); 1506 return NULL; 1507 } 1508 goto loop; 1509 } 1510 1511 /* 1512 * This code is used to make sure that a buffer is not 1513 * created while the getnewbuf routine is blocked. 1514 * Normally the vnode is locked so this isn't a problem. 1515 * VBLK type I/O requests, however, don't lock the vnode. 1516 */ 1517 if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE && gbincore(vp, blkno)) { 1518 bp->b_flags |= B_INVAL; 1519 brelse(bp); 1520 goto loop; 1521 } 1522 1523 /* 1524 * Insert the buffer into the hash, so that it can 1525 * be found by incore. 1526 */ 1527 bp->b_blkno = bp->b_lblkno = blkno; 1528 1529 if (vp->v_type != VBLK) 1530 bp->b_offset = (off_t) blkno * maxsize; 1531 else 1532 bp->b_offset = (off_t) blkno * DEV_BSIZE; 1533 1534 bgetvp(vp, bp); 1535 LIST_REMOVE(bp, b_hash); 1536 bh = BUFHASH(vp, blkno); 1537 LIST_INSERT_HEAD(bh, bp, b_hash); 1538 1539 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) { 1540 bp->b_flags |= (B_VMIO | B_CACHE); 1541#if defined(VFS_BIO_DEBUG) 1542 if (vp->v_type != VREG && vp->v_type != VBLK) 1543 printf("getblk: vmioing file type %d???\n", vp->v_type); 1544#endif 1545 } else { 1546 bp->b_flags &= ~B_VMIO; 1547 } 1548 1549 allocbuf(bp, size); 1550 1551 splx(s); 1552 return (bp); 1553 } 1554} 1555 1556/* 1557 * Get an empty, disassociated buffer of given size. 1558 */ 1559struct buf * 1560geteblk(int size) 1561{ 1562 struct buf *bp; 1563 int s; 1564 1565 s = splbio(); 1566 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1567 splx(s); 1568 allocbuf(bp, size); 1569 bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ 1570 return (bp); 1571} 1572 1573 1574/* 1575 * This code constitutes the buffer memory from either anonymous system 1576 * memory (in the case of non-VMIO operations) or from an associated 1577 * VM object (in the case of VMIO operations). 1578 * 1579 * Note that this code is tricky, and has many complications to resolve 1580 * deadlock or inconsistant data situations. Tread lightly!!! 1581 * 1582 * Modify the length of a buffer's underlying buffer storage without 1583 * destroying information (unless, of course the buffer is shrinking). 1584 */ 1585int 1586allocbuf(struct buf * bp, int size) 1587{ 1588 1589 int s; 1590 int newbsize, mbsize; 1591 int i; 1592 1593#if !defined(MAX_PERF) 1594 if (!(bp->b_flags & B_BUSY)) 1595 panic("allocbuf: buffer not busy"); 1596 1597 if (bp->b_kvasize < size) 1598 panic("allocbuf: buffer too small"); 1599#endif 1600 1601 if ((bp->b_flags & B_VMIO) == 0) { 1602 caddr_t origbuf; 1603 int origbufsize; 1604 /* 1605 * Just get anonymous memory from the kernel 1606 */ 1607 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1608#if !defined(NO_B_MALLOC) 1609 if (bp->b_flags & B_MALLOC) 1610 newbsize = mbsize; 1611 else 1612#endif 1613 newbsize = round_page(size); 1614 1615 if (newbsize < bp->b_bufsize) { 1616#if !defined(NO_B_MALLOC) 1617 /* 1618 * malloced buffers are not shrunk 1619 */ 1620 if (bp->b_flags & B_MALLOC) { 1621 if (newbsize) { 1622 bp->b_bcount = size; 1623 } else { 1624 free(bp->b_data, M_BIOBUF); 1625 bufspace -= bp->b_bufsize; 1626 bufmallocspace -= bp->b_bufsize; 1627 bp->b_data = bp->b_kvabase; 1628 bp->b_bufsize = 0; 1629 bp->b_bcount = 0; 1630 bp->b_flags &= ~B_MALLOC; 1631 } 1632 return 1; 1633 } 1634#endif 1635 vm_hold_free_pages( 1636 bp, 1637 (vm_offset_t) bp->b_data + newbsize, 1638 (vm_offset_t) bp->b_data + bp->b_bufsize); 1639 } else if (newbsize > bp->b_bufsize) { 1640#if !defined(NO_B_MALLOC) 1641 /* 1642 * We only use malloced memory on the first allocation. 1643 * and revert to page-allocated memory when the buffer grows. 1644 */ 1645 if ( (bufmallocspace < maxbufmallocspace) && 1646 (bp->b_bufsize == 0) && 1647 (mbsize <= PAGE_SIZE/2)) { 1648 1649 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1650 bp->b_bufsize = mbsize; 1651 bp->b_bcount = size; 1652 bp->b_flags |= B_MALLOC; 1653 bufspace += mbsize; 1654 bufmallocspace += mbsize; 1655 return 1; 1656 } 1657#endif 1658 origbuf = NULL; 1659 origbufsize = 0; 1660#if !defined(NO_B_MALLOC) 1661 /* 1662 * If the buffer is growing on its other-than-first allocation, 1663 * then we revert to the page-allocation scheme. 1664 */ 1665 if (bp->b_flags & B_MALLOC) { 1666 origbuf = bp->b_data; 1667 origbufsize = bp->b_bufsize; 1668 bp->b_data = bp->b_kvabase; 1669 bufspace -= bp->b_bufsize; 1670 bufmallocspace -= bp->b_bufsize; 1671 bp->b_bufsize = 0; 1672 bp->b_flags &= ~B_MALLOC; 1673 newbsize = round_page(newbsize); 1674 } 1675#endif 1676 vm_hold_load_pages( 1677 bp, 1678 (vm_offset_t) bp->b_data + bp->b_bufsize, 1679 (vm_offset_t) bp->b_data + newbsize); 1680#if !defined(NO_B_MALLOC) 1681 if (origbuf) { 1682 bcopy(origbuf, bp->b_data, origbufsize); 1683 free(origbuf, M_BIOBUF); 1684 } 1685#endif 1686 } 1687 } else { 1688 vm_page_t m; 1689 int desiredpages; 1690 1691 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1692 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1693 1694#if !defined(NO_B_MALLOC) 1695 if (bp->b_flags & B_MALLOC) 1696 panic("allocbuf: VMIO buffer can't be malloced"); 1697#endif 1698 1699 if (newbsize < bp->b_bufsize) { 1700 if (desiredpages < bp->b_npages) { 1701 for (i = desiredpages; i < bp->b_npages; i++) { 1702 /* 1703 * the page is not freed here -- it 1704 * is the responsibility of vnode_pager_setsize 1705 */ 1706 m = bp->b_pages[i]; 1707#if defined(DIAGNOSTIC) 1708 if (m == bogus_page) 1709 panic("allocbuf: bogus page found"); 1710#endif 1711 vm_page_sleep(m, "biodep", &m->busy); 1712 1713 bp->b_pages[i] = NULL; 1714 vm_page_unwire(m, 0); 1715 } 1716 pmap_qremove((vm_offset_t) trunc_page((vm_offset_t)bp->b_data) + 1717 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1718 bp->b_npages = desiredpages; 1719 } 1720 } else if (newbsize > bp->b_bufsize) { 1721 vm_object_t obj; 1722 vm_offset_t tinc, toff; 1723 vm_ooffset_t off; 1724 vm_pindex_t objoff; 1725 int pageindex, curbpnpages; 1726 struct vnode *vp; 1727 int bsize; 1728 int orig_validoff = bp->b_validoff; 1729 int orig_validend = bp->b_validend; 1730 1731 vp = bp->b_vp; 1732 1733 if (vp->v_type == VBLK) 1734 bsize = DEV_BSIZE; 1735 else 1736 bsize = vp->v_mount->mnt_stat.f_iosize; 1737 1738 if (bp->b_npages < desiredpages) { 1739 obj = vp->v_object; 1740 tinc = PAGE_SIZE; 1741 if (tinc > bsize) 1742 tinc = bsize; 1743 1744 off = bp->b_offset; 1745#ifdef DIAGNOSTIC 1746 if (bp->b_offset == NOOFFSET) 1747 panic("allocbuf: no buffer offset"); 1748#endif 1749 1750 curbpnpages = bp->b_npages; 1751 doretry: 1752 bp->b_validoff = orig_validoff; 1753 bp->b_validend = orig_validend; 1754 bp->b_flags |= B_CACHE; 1755 for (toff = 0; toff < newbsize; toff += tinc) { 1756 int bytesinpage; 1757 1758 pageindex = toff >> PAGE_SHIFT; 1759 objoff = OFF_TO_IDX(off + toff); 1760 if (pageindex < curbpnpages) { 1761 1762 m = bp->b_pages[pageindex]; 1763#ifdef VFS_BIO_DIAG 1764 if (m->pindex != objoff) 1765 panic("allocbuf: page changed offset??!!!?"); 1766#endif 1767 bytesinpage = tinc; 1768 if (tinc > (newbsize - toff)) 1769 bytesinpage = newbsize - toff; 1770 if (bp->b_flags & B_CACHE) 1771 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1772 continue; 1773 } 1774 m = vm_page_lookup(obj, objoff); 1775 if (!m) { 1776 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1777 if (!m) { 1778 VM_WAIT; 1779 vm_pageout_deficit += (desiredpages - bp->b_npages); 1780 goto doretry; 1781 } 1782 1783 vm_page_wire(m); 1784 vm_page_flag_clear(m, PG_BUSY); 1785 bp->b_flags &= ~B_CACHE; 1786 1787 } else if (m->flags & PG_BUSY) { 1788 s = splvm(); 1789 if (m->flags & PG_BUSY) { 1790 vm_page_flag_set(m, PG_WANTED); 1791 tsleep(m, PVM, "pgtblk", 0); 1792 } 1793 splx(s); 1794 goto doretry; 1795 } else { 1796 if ((curproc != pageproc) && 1797 ((m->queue - m->pc) == PQ_CACHE) && 1798 ((cnt.v_free_count + cnt.v_cache_count) < 1799 (cnt.v_free_min + cnt.v_cache_min))) { 1800 pagedaemon_wakeup(); 1801 } 1802 bytesinpage = tinc; 1803 if (tinc > (newbsize - toff)) 1804 bytesinpage = newbsize - toff; 1805 if (bp->b_flags & B_CACHE) 1806 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1807 vm_page_flag_clear(m, PG_ZERO); 1808 vm_page_wire(m); 1809 } 1810 bp->b_pages[pageindex] = m; 1811 curbpnpages = pageindex + 1; 1812 } 1813 if (vp->v_tag == VT_NFS && 1814 vp->v_type != VBLK) { 1815 if (bp->b_dirtyend > 0) { 1816 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1817 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1818 } 1819 if (bp->b_validend == 0) 1820 bp->b_flags &= ~B_CACHE; 1821 } 1822 bp->b_data = (caddr_t) trunc_page((vm_offset_t)bp->b_data); 1823 bp->b_npages = curbpnpages; 1824 pmap_qenter((vm_offset_t) bp->b_data, 1825 bp->b_pages, bp->b_npages); 1826 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1827 } 1828 } 1829 } 1830 if (bp->b_flags & B_VMIO) 1831 vmiospace += (newbsize - bp->b_bufsize); 1832 bufspace += (newbsize - bp->b_bufsize); 1833 bp->b_bufsize = newbsize; 1834 bp->b_bcount = size; 1835 return 1; 1836} 1837 1838/* 1839 * Wait for buffer I/O completion, returning error status. 1840 */ 1841int 1842biowait(register struct buf * bp) 1843{ 1844 int s; 1845 1846 s = splbio(); 1847 while ((bp->b_flags & B_DONE) == 0) 1848#if defined(NO_SCHEDULE_MODS) 1849 tsleep(bp, PRIBIO, "biowait", 0); 1850#else 1851 if (bp->b_flags & B_READ) 1852 tsleep(bp, PRIBIO, "biord", 0); 1853 else 1854 tsleep(bp, PRIBIO, "biowr", 0); 1855#endif 1856 splx(s); 1857 if (bp->b_flags & B_EINTR) { 1858 bp->b_flags &= ~B_EINTR; 1859 return (EINTR); 1860 } 1861 if (bp->b_flags & B_ERROR) { 1862 return (bp->b_error ? bp->b_error : EIO); 1863 } else { 1864 return (0); 1865 } 1866} 1867 1868/* 1869 * Finish I/O on a buffer, calling an optional function. 1870 * This is usually called from interrupt level, so process blocking 1871 * is not *a good idea*. 1872 */ 1873void 1874biodone(register struct buf * bp) 1875{ 1876 int s; 1877 1878 s = splbio(); 1879 1880#if !defined(MAX_PERF) 1881 if (!(bp->b_flags & B_BUSY)) 1882 panic("biodone: buffer not busy"); 1883#endif 1884 1885 if (bp->b_flags & B_DONE) { 1886 splx(s); 1887#if !defined(MAX_PERF) 1888 printf("biodone: buffer already done\n"); 1889#endif 1890 return; 1891 } 1892 bp->b_flags |= B_DONE; 1893 1894 if (bp->b_flags & B_FREEBUF) { 1895 brelse(bp); 1896 splx(s); 1897 return; 1898 } 1899 1900 if ((bp->b_flags & B_READ) == 0) { 1901 vwakeup(bp); 1902 } 1903 1904 /* call optional completion function if requested */ 1905 if (bp->b_flags & B_CALL) { 1906 bp->b_flags &= ~B_CALL; 1907 (*bp->b_iodone) (bp); 1908 splx(s); 1909 return; 1910 } 1911 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1912 (*bioops.io_complete)(bp); 1913 1914 if (bp->b_flags & B_VMIO) { 1915 int i, resid; 1916 vm_ooffset_t foff; 1917 vm_page_t m; 1918 vm_object_t obj; 1919 int iosize; 1920 struct vnode *vp = bp->b_vp; 1921 1922 obj = vp->v_object; 1923 1924#if defined(VFS_BIO_DEBUG) 1925 if (vp->v_usecount == 0) { 1926 panic("biodone: zero vnode ref count"); 1927 } 1928 1929 if (vp->v_object == NULL) { 1930 panic("biodone: missing VM object"); 1931 } 1932 1933 if ((vp->v_flag & VOBJBUF) == 0) { 1934 panic("biodone: vnode is not setup for merged cache"); 1935 } 1936#endif 1937 1938 foff = bp->b_offset; 1939#ifdef DIAGNOSTIC 1940 if (bp->b_offset == NOOFFSET) 1941 panic("biodone: no buffer offset"); 1942#endif 1943 1944#if !defined(MAX_PERF) 1945 if (!obj) { 1946 panic("biodone: no object"); 1947 } 1948#endif 1949#if defined(VFS_BIO_DEBUG) 1950 if (obj->paging_in_progress < bp->b_npages) { 1951 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1952 obj->paging_in_progress, bp->b_npages); 1953 } 1954#endif 1955 iosize = bp->b_bufsize; 1956 for (i = 0; i < bp->b_npages; i++) { 1957 int bogusflag = 0; 1958 m = bp->b_pages[i]; 1959 if (m == bogus_page) { 1960 bogusflag = 1; 1961 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1962 if (!m) { 1963#if defined(VFS_BIO_DEBUG) 1964 printf("biodone: page disappeared\n"); 1965#endif 1966 vm_object_pip_subtract(obj, 1); 1967 continue; 1968 } 1969 bp->b_pages[i] = m; 1970 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 1971 } 1972#if defined(VFS_BIO_DEBUG) 1973 if (OFF_TO_IDX(foff) != m->pindex) { 1974 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1975 } 1976#endif 1977 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1978 if (resid > iosize) 1979 resid = iosize; 1980 1981 /* 1982 * In the write case, the valid and clean bits are 1983 * already changed correctly, so we only need to do this 1984 * here in the read case. 1985 */ 1986 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1987 vfs_page_set_valid(bp, foff, i, m); 1988 } 1989 vm_page_flag_clear(m, PG_ZERO); 1990 1991 /* 1992 * when debugging new filesystems or buffer I/O methods, this 1993 * is the most common error that pops up. if you see this, you 1994 * have not set the page busy flag correctly!!! 1995 */ 1996 if (m->busy == 0) { 1997#if !defined(MAX_PERF) 1998 printf("biodone: page busy < 0, " 1999 "pindex: %d, foff: 0x(%x,%x), " 2000 "resid: %d, index: %d\n", 2001 (int) m->pindex, (int)(foff >> 32), 2002 (int) foff & 0xffffffff, resid, i); 2003#endif 2004 if (vp->v_type != VBLK) 2005#if !defined(MAX_PERF) 2006 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2007 bp->b_vp->v_mount->mnt_stat.f_iosize, 2008 (int) bp->b_lblkno, 2009 bp->b_flags, bp->b_npages); 2010 else 2011 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2012 (int) bp->b_lblkno, 2013 bp->b_flags, bp->b_npages); 2014 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2015 m->valid, m->dirty, m->wire_count); 2016#endif 2017 panic("biodone: page busy < 0\n"); 2018 } 2019 vm_page_io_finish(m); 2020 vm_object_pip_subtract(obj, 1); 2021 foff += resid; 2022 iosize -= resid; 2023 } 2024 if (obj && 2025 (obj->paging_in_progress == 0) && 2026 (obj->flags & OBJ_PIPWNT)) { 2027 vm_object_clear_flag(obj, OBJ_PIPWNT); 2028 wakeup(obj); 2029 } 2030 } 2031 /* 2032 * For asynchronous completions, release the buffer now. The brelse 2033 * checks for B_WANTED and will do the wakeup there if necessary - so 2034 * no need to do a wakeup here in the async case. 2035 */ 2036 2037 if (bp->b_flags & B_ASYNC) { 2038 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2039 brelse(bp); 2040 else 2041 bqrelse(bp); 2042 } else { 2043 bp->b_flags &= ~B_WANTED; 2044 wakeup(bp); 2045 } 2046 splx(s); 2047} 2048 2049#if 0 /* not with kirks code */ 2050static int vfs_update_interval = 30; 2051 2052static void 2053vfs_update() 2054{ 2055 while (1) { 2056 tsleep(&vfs_update_wakeup, PUSER, "update", 2057 hz * vfs_update_interval); 2058 vfs_update_wakeup = 0; 2059 sync(curproc, NULL); 2060 } 2061} 2062 2063static int 2064sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2065{ 2066 int error = sysctl_handle_int(oidp, 2067 oidp->oid_arg1, oidp->oid_arg2, req); 2068 if (!error) 2069 wakeup(&vfs_update_wakeup); 2070 return error; 2071} 2072 2073SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2074 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2075 2076#endif 2077 2078 2079/* 2080 * This routine is called in lieu of iodone in the case of 2081 * incomplete I/O. This keeps the busy status for pages 2082 * consistant. 2083 */ 2084void 2085vfs_unbusy_pages(struct buf * bp) 2086{ 2087 int i; 2088 2089 if (bp->b_flags & B_VMIO) { 2090 struct vnode *vp = bp->b_vp; 2091 vm_object_t obj = vp->v_object; 2092 2093 for (i = 0; i < bp->b_npages; i++) { 2094 vm_page_t m = bp->b_pages[i]; 2095 2096 if (m == bogus_page) { 2097 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2098#if !defined(MAX_PERF) 2099 if (!m) { 2100 panic("vfs_unbusy_pages: page missing\n"); 2101 } 2102#endif 2103 bp->b_pages[i] = m; 2104 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2105 } 2106 vm_object_pip_subtract(obj, 1); 2107 vm_page_flag_clear(m, PG_ZERO); 2108 vm_page_io_finish(m); 2109 } 2110 if (obj->paging_in_progress == 0 && 2111 (obj->flags & OBJ_PIPWNT)) { 2112 vm_object_clear_flag(obj, OBJ_PIPWNT); 2113 wakeup(obj); 2114 } 2115 } 2116} 2117 2118/* 2119 * Set NFS' b_validoff and b_validend fields from the valid bits 2120 * of a page. If the consumer is not NFS, and the page is not 2121 * valid for the entire range, clear the B_CACHE flag to force 2122 * the consumer to re-read the page. 2123 */ 2124static void 2125vfs_buf_set_valid(struct buf *bp, 2126 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2127 vm_page_t m) 2128{ 2129 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2130 vm_offset_t svalid, evalid; 2131 int validbits = m->valid; 2132 2133 /* 2134 * This only bothers with the first valid range in the 2135 * page. 2136 */ 2137 svalid = off; 2138 while (validbits && !(validbits & 1)) { 2139 svalid += DEV_BSIZE; 2140 validbits >>= 1; 2141 } 2142 evalid = svalid; 2143 while (validbits & 1) { 2144 evalid += DEV_BSIZE; 2145 validbits >>= 1; 2146 } 2147 /* 2148 * Make sure this range is contiguous with the range 2149 * built up from previous pages. If not, then we will 2150 * just use the range from the previous pages. 2151 */ 2152 if (svalid == bp->b_validend) { 2153 bp->b_validoff = min(bp->b_validoff, svalid); 2154 bp->b_validend = max(bp->b_validend, evalid); 2155 } 2156 } else if (!vm_page_is_valid(m, 2157 (vm_offset_t) ((foff + off) & PAGE_MASK), 2158 size)) { 2159 bp->b_flags &= ~B_CACHE; 2160 } 2161} 2162 2163/* 2164 * Set the valid bits in a page, taking care of the b_validoff, 2165 * b_validend fields which NFS uses to optimise small reads. Off is 2166 * the offset within the file and pageno is the page index within the buf. 2167 */ 2168static void 2169vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2170{ 2171 struct vnode *vp = bp->b_vp; 2172 vm_ooffset_t soff, eoff; 2173 2174 soff = off; 2175 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2176 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2177 vm_ooffset_t sv, ev; 2178 vm_page_set_invalid(m, 2179 (vm_offset_t) (soff & PAGE_MASK), 2180 (vm_offset_t) (eoff - soff)); 2181 off = off - pageno * PAGE_SIZE; 2182 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2183 ev = off + ((bp->b_validend + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2184 soff = qmax(sv, soff); 2185 eoff = qmin(ev, eoff); 2186 } 2187 if (eoff > soff) 2188 vm_page_set_validclean(m, 2189 (vm_offset_t) (soff & PAGE_MASK), 2190 (vm_offset_t) (eoff - soff)); 2191} 2192 2193/* 2194 * This routine is called before a device strategy routine. 2195 * It is used to tell the VM system that paging I/O is in 2196 * progress, and treat the pages associated with the buffer 2197 * almost as being PG_BUSY. Also the object paging_in_progress 2198 * flag is handled to make sure that the object doesn't become 2199 * inconsistant. 2200 */ 2201void 2202vfs_busy_pages(struct buf * bp, int clear_modify) 2203{ 2204 int i; 2205 2206 if (bp->b_flags & B_VMIO) { 2207 struct vnode *vp = bp->b_vp; 2208 vm_object_t obj = vp->v_object; 2209 vm_ooffset_t foff; 2210 2211 foff = bp->b_offset; 2212#ifdef DIAGNOSTIC 2213 if (bp->b_offset == NOOFFSET) 2214 panic("vfs_busy_pages: no buffer offset"); 2215#endif 2216 2217 vfs_setdirty(bp); 2218 2219retry: 2220 for (i = 0; i < bp->b_npages; i++) { 2221 vm_page_t m = bp->b_pages[i]; 2222 if (vm_page_sleep(m, "vbpage", NULL)) 2223 goto retry; 2224 } 2225 2226 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2227 vm_page_t m = bp->b_pages[i]; 2228 2229 vm_page_flag_clear(m, PG_ZERO); 2230 if ((bp->b_flags & B_CLUSTER) == 0) { 2231 vm_object_pip_add(obj, 1); 2232 vm_page_io_start(m); 2233 } 2234 2235 vm_page_protect(m, VM_PROT_NONE); 2236 if (clear_modify) 2237 vfs_page_set_valid(bp, foff, i, m); 2238 else if (bp->b_bcount >= PAGE_SIZE) { 2239 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2240 bp->b_pages[i] = bogus_page; 2241 pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages); 2242 } 2243 } 2244 } 2245 } 2246} 2247 2248/* 2249 * Tell the VM system that the pages associated with this buffer 2250 * are clean. This is used for delayed writes where the data is 2251 * going to go to disk eventually without additional VM intevention. 2252 */ 2253void 2254vfs_clean_pages(struct buf * bp) 2255{ 2256 int i; 2257 2258 if (bp->b_flags & B_VMIO) { 2259 vm_ooffset_t foff; 2260 foff = bp->b_offset; 2261 2262#ifdef DIAGNOSTIC 2263 if (bp->b_offset == NOOFFSET) 2264 panic("vfs_clean_pages: no buffer offset"); 2265#endif 2266 2267 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2268 vm_page_t m = bp->b_pages[i]; 2269 vfs_page_set_valid(bp, foff, i, m); 2270 } 2271 } 2272} 2273 2274void 2275vfs_bio_clrbuf(struct buf *bp) { 2276 int i; 2277 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2278 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2279 int mask; 2280 mask = 0; 2281 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2282 mask |= (1 << (i/DEV_BSIZE)); 2283 if(((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2284 (bp->b_pages[0]->valid != mask)) { 2285 bzero(bp->b_data, bp->b_bufsize); 2286 } 2287 bp->b_pages[0]->valid = mask; 2288 bp->b_resid = 0; 2289 return; 2290 } 2291 for(i=0;i<bp->b_npages;i++) { 2292 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2293 continue; 2294 if( bp->b_pages[i]->valid == 0) { 2295 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2296 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2297 } 2298 } else { 2299 int j; 2300 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2301 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2302 (bp->b_pages[i]->valid & (1<<j)) == 0) 2303 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2304 } 2305 } 2306 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 2307 vm_page_flag_clear(bp->b_pages[i], PG_ZERO); 2308 } 2309 bp->b_resid = 0; 2310 } else { 2311 clrbuf(bp); 2312 } 2313} 2314 2315/* 2316 * vm_hold_load_pages and vm_hold_unload pages get pages into 2317 * a buffers address space. The pages are anonymous and are 2318 * not associated with a file object. 2319 */ 2320void 2321vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2322{ 2323 vm_offset_t pg; 2324 vm_page_t p; 2325 int index; 2326 2327 to = round_page(to); 2328 from = round_page(from); 2329 index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2330 2331 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2332 2333tryagain: 2334 2335 p = vm_page_alloc(kernel_object, 2336 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2337 VM_ALLOC_NORMAL); 2338 if (!p) { 2339 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2340 VM_WAIT; 2341 goto tryagain; 2342 } 2343 vm_page_wire(p); 2344 p->valid = VM_PAGE_BITS_ALL; 2345 vm_page_flag_clear(p, PG_ZERO); 2346 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2347 bp->b_pages[index] = p; 2348 vm_page_wakeup(p); 2349 } 2350 bp->b_npages = index; 2351} 2352 2353void 2354vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2355{ 2356 vm_offset_t pg; 2357 vm_page_t p; 2358 int index, newnpages; 2359 2360 from = round_page(from); 2361 to = round_page(to); 2362 newnpages = index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT; 2363 2364 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2365 p = bp->b_pages[index]; 2366 if (p && (index < bp->b_npages)) { 2367#if !defined(MAX_PERF) 2368 if (p->busy) { 2369 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2370 bp->b_blkno, bp->b_lblkno); 2371 } 2372#endif 2373 bp->b_pages[index] = NULL; 2374 pmap_kremove(pg); 2375 vm_page_busy(p); 2376 vm_page_unwire(p, 0); 2377 vm_page_free(p); 2378 } 2379 } 2380 bp->b_npages = newnpages; 2381} 2382 2383 2384#include "opt_ddb.h" 2385#ifdef DDB 2386#include <ddb/ddb.h> 2387 2388DB_SHOW_COMMAND(buffer, db_show_buffer) 2389{ 2390 /* get args */ 2391 struct buf *bp = (struct buf *)addr; 2392 2393 if (!have_addr) { 2394 db_printf("usage: show buffer <addr>\n"); 2395 return; 2396 } 2397 2398 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2399 (u_int)bp->b_flags, PRINT_BUF_FLAGS); 2400 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2401 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2402 "b_blkno = %d, b_pblkno = %d\n", 2403 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2404 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2405 if (bp->b_npages) { 2406 int i; 2407 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2408 for (i = 0; i < bp->b_npages; i++) { 2409 vm_page_t m; 2410 m = bp->b_pages[i]; 2411 db_printf("(%p, 0x%lx, 0x%lx)", (void *)m->object, 2412 (u_long)m->pindex, (u_long)VM_PAGE_TO_PHYS(m)); 2413 if ((i + 1) < bp->b_npages) 2414 db_printf(","); 2415 } 2416 db_printf("\n"); 2417 } 2418} 2419#endif /* DDB */ 2420