vfs_bio.c revision 6539
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.27 1995/02/03 03:35:56 davidg Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/proc.h> 40#include <sys/vnode.h> 41#include <vm/vm.h> 42#include <vm/vm_pageout.h> 43#include <vm/vm_page.h> 44#include <vm/vm_object.h> 45#include <sys/buf.h> 46#include <sys/mount.h> 47#include <sys/malloc.h> 48#include <sys/resourcevar.h> 49#include <sys/proc.h> 50 51#include <miscfs/specfs/specdev.h> 52 53struct buf *buf; /* buffer header pool */ 54int nbuf; /* number of buffer headers calculated 55 * elsewhere */ 56struct swqueue bswlist; 57int nvmio, nlru; 58 59extern vm_map_t buffer_map, io_map, kernel_map, pager_map; 60 61void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 62void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 63void vfs_dirty_pages(struct buf * bp); 64void vfs_busy_pages(struct buf *, int clear_modify); 65 66int needsbuffer; 67 68/* 69 * Internal update daemon, process 3 70 * The variable vfs_update_wakeup allows for internal syncs. 71 */ 72int vfs_update_wakeup; 73 74 75/* 76 * buffers base kva 77 */ 78caddr_t buffers_kva; 79 80/* 81 * bogus page -- for I/O to/from partially complete buffers 82 */ 83vm_page_t bogus_page; 84vm_offset_t bogus_offset; 85 86int bufspace, maxbufspace; 87 88/* 89 * Initialize buffer headers and related structures. 90 */ 91void 92bufinit() 93{ 94 struct buf *bp; 95 int i; 96 97 TAILQ_INIT(&bswlist); 98 LIST_INIT(&invalhash); 99 100 /* first, make a null hash table */ 101 for (i = 0; i < BUFHSZ; i++) 102 LIST_INIT(&bufhashtbl[i]); 103 104 /* next, make a null set of free lists */ 105 for (i = 0; i < BUFFER_QUEUES; i++) 106 TAILQ_INIT(&bufqueues[i]); 107 108 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 109 /* finally, initialize each buffer header and stick on empty q */ 110 for (i = 0; i < nbuf; i++) { 111 bp = &buf[i]; 112 bzero(bp, sizeof *bp); 113 bp->b_flags = B_INVAL; /* we're just an empty header */ 114 bp->b_dev = NODEV; 115 bp->b_vp = NULL; 116 bp->b_rcred = NOCRED; 117 bp->b_wcred = NOCRED; 118 bp->b_qindex = QUEUE_EMPTY; 119 bp->b_vnbufs.le_next = NOLIST; 120 bp->b_data = buffers_kva + i * MAXBSIZE; 121 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 122 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 123 } 124/* 125 * this will change later!!! 126 */ 127 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 128 129 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 130 bogus_page = vm_page_alloc(kernel_object, bogus_offset - VM_MIN_KERNEL_ADDRESS, 0); 131 132} 133 134/* 135 * remove the buffer from the appropriate free list 136 */ 137void 138bremfree(struct buf * bp) 139{ 140 int s = splbio(); 141 142 if (bp->b_qindex != QUEUE_NONE) { 143 if (bp->b_qindex == QUEUE_LRU) 144 --nlru; 145 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 146 bp->b_qindex = QUEUE_NONE; 147 } else { 148 panic("bremfree: removing a buffer when not on a queue"); 149 } 150 splx(s); 151} 152 153/* 154 * Get a buffer with the specified data. Look in the cache first. 155 */ 156int 157bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 158 struct buf ** bpp) 159{ 160 struct buf *bp; 161 162 bp = getblk(vp, blkno, size, 0, 0); 163 *bpp = bp; 164 165 /* if not found in cache, do some I/O */ 166 if ((bp->b_flags & B_CACHE) == 0) { 167 if (curproc && curproc->p_stats) /* count block I/O */ 168 curproc->p_stats->p_ru.ru_inblock++; 169 bp->b_flags |= B_READ; 170 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 171 if (bp->b_rcred == NOCRED) { 172 if (cred != NOCRED) 173 crhold(cred); 174 bp->b_rcred = cred; 175 } 176 vfs_busy_pages(bp, 0); 177 VOP_STRATEGY(bp); 178 return (biowait(bp)); 179 } else if (bp->b_lblkno == bp->b_blkno) { 180 VOP_BMAP(vp, bp->b_lblkno, (struct vnode **) 0, 181 &bp->b_blkno, (int *) 0); 182 } 183 return (0); 184} 185 186/* 187 * Operates like bread, but also starts asynchronous I/O on 188 * read-ahead blocks. 189 */ 190int 191breadn(struct vnode * vp, daddr_t blkno, int size, 192 daddr_t * rablkno, int *rabsize, 193 int cnt, struct ucred * cred, struct buf ** bpp) 194{ 195 struct buf *bp, *rabp; 196 int i; 197 int rv = 0, readwait = 0; 198 199 *bpp = bp = getblk(vp, blkno, size, 0, 0); 200 201 /* if not found in cache, do some I/O */ 202 if ((bp->b_flags & B_CACHE) == 0) { 203 if (curproc && curproc->p_stats) /* count block I/O */ 204 curproc->p_stats->p_ru.ru_inblock++; 205 bp->b_flags |= B_READ; 206 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 207 if (bp->b_rcred == NOCRED) { 208 if (cred != NOCRED) 209 crhold(cred); 210 bp->b_rcred = cred; 211 } 212 vfs_busy_pages(bp, 0); 213 VOP_STRATEGY(bp); 214 ++readwait; 215 } else if (bp->b_lblkno == bp->b_blkno) { 216 VOP_BMAP(vp, bp->b_lblkno, (struct vnode **) 0, 217 &bp->b_blkno, (int *) 0); 218 } 219 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 220 if (inmem(vp, *rablkno)) 221 continue; 222 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 223 224 if ((rabp->b_flags & B_CACHE) == 0) { 225 if (curproc && curproc->p_stats) 226 curproc->p_stats->p_ru.ru_inblock++; 227 rabp->b_flags |= B_READ | B_ASYNC; 228 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 229 if (rabp->b_rcred == NOCRED) { 230 if (cred != NOCRED) 231 crhold(cred); 232 rabp->b_rcred = cred; 233 } 234 vfs_busy_pages(rabp, 0); 235 VOP_STRATEGY(rabp); 236 } else { 237 brelse(rabp); 238 } 239 } 240 241 if (readwait) { 242 rv = biowait(bp); 243 } 244 return (rv); 245} 246 247/* 248 * Write, release buffer on completion. (Done by iodone 249 * if async.) 250 */ 251int 252bwrite(struct buf * bp) 253{ 254 int oldflags = bp->b_flags; 255 256 if (bp->b_flags & B_INVAL) { 257 brelse(bp); 258 return (0); 259 } 260 if (!(bp->b_flags & B_BUSY)) 261 panic("bwrite: buffer is not busy???"); 262 263 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 264 bp->b_flags |= B_WRITEINPROG; 265 266 if (oldflags & B_ASYNC) { 267 if (oldflags & B_DELWRI) { 268 reassignbuf(bp, bp->b_vp); 269 } else if (curproc) { 270 ++curproc->p_stats->p_ru.ru_oublock; 271 } 272 } 273 bp->b_vp->v_numoutput++; 274 vfs_busy_pages(bp, 1); 275 VOP_STRATEGY(bp); 276 277 if ((oldflags & B_ASYNC) == 0) { 278 int rtval = biowait(bp); 279 280 if (oldflags & B_DELWRI) { 281 reassignbuf(bp, bp->b_vp); 282 } else if (curproc) { 283 ++curproc->p_stats->p_ru.ru_oublock; 284 } 285 brelse(bp); 286 return (rtval); 287 } 288 return (0); 289} 290 291int 292vn_bwrite(ap) 293 struct vop_bwrite_args *ap; 294{ 295 return (bwrite(ap->a_bp)); 296} 297 298/* 299 * Delayed write. (Buffer is marked dirty). 300 */ 301void 302bdwrite(struct buf * bp) 303{ 304 305 if ((bp->b_flags & B_BUSY) == 0) { 306 panic("bdwrite: buffer is not busy"); 307 } 308 if (bp->b_flags & B_INVAL) { 309 brelse(bp); 310 return; 311 } 312 if (bp->b_flags & B_TAPE) { 313 bawrite(bp); 314 return; 315 } 316 bp->b_flags &= ~B_READ; 317 vfs_dirty_pages(bp); 318 if ((bp->b_flags & B_DELWRI) == 0) { 319 if (curproc) 320 ++curproc->p_stats->p_ru.ru_oublock; 321 bp->b_flags |= B_DONE | B_DELWRI; 322 reassignbuf(bp, bp->b_vp); 323 } 324 brelse(bp); 325 return; 326} 327 328/* 329 * Asynchronous write. 330 * Start output on a buffer, but do not wait for it to complete. 331 * The buffer is released when the output completes. 332 */ 333void 334bawrite(struct buf * bp) 335{ 336#ifdef EVILFORNOW 337 /* 338 * #ifdef EXTRA_DEADLOCKS is appropriate for this code for now :-) 339 */ 340 if (((bp->b_flags & B_DELWRI) == 0) && (bp->b_vp->v_numoutput > 24)) { 341 int s = splbio(); 342 343 while (bp->b_vp->v_numoutput > 16) { 344 bp->b_vp->v_flag |= VBWAIT; 345 tsleep((caddr_t) &bp->b_vp->v_numoutput, PRIBIO, "bawnmo", 0); 346 } 347 splx(s); 348 } 349#endif 350 bp->b_flags |= B_ASYNC; 351 (void) bwrite(bp); 352} 353 354/* 355 * Release a buffer. 356 */ 357void 358brelse(struct buf * bp) 359{ 360 int s; 361 362 if (bp->b_flags & B_CLUSTER) { 363 relpbuf(bp); 364 return; 365 } 366 /* anyone need a "free" block? */ 367 s = splbio(); 368 369 if (needsbuffer) { 370 needsbuffer = 0; 371 wakeup((caddr_t) &needsbuffer); 372 } 373 /* anyone need this block? */ 374 if (bp->b_flags & B_WANTED) { 375 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_AGE); 376 wakeup((caddr_t) bp); 377 } else if (bp->b_flags & B_VMIO) { 378 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 379 wakeup((caddr_t) bp); 380 } 381 if (bp->b_flags & B_LOCKED) 382 bp->b_flags &= ~B_ERROR; 383 384 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 385 (bp->b_bufsize <= 0)) { 386 bp->b_flags |= B_INVAL; 387 bp->b_flags &= ~(B_DELWRI | B_CACHE); 388 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 389 brelvp(bp); 390 } 391 if (bp->b_flags & B_VMIO) { 392 vm_offset_t foff; 393 vm_object_t obj; 394 int i, resid; 395 vm_page_t m; 396 int iototal = bp->b_bufsize; 397 398 foff = 0; 399 obj = 0; 400 if (bp->b_npages) { 401 if (bp->b_vp && bp->b_vp->v_mount) { 402 foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 403 } else { 404 /* 405 * vnode pointer has been ripped away -- 406 * probably file gone... 407 */ 408 foff = bp->b_pages[0]->offset; 409 } 410 } 411 for (i = 0; i < bp->b_npages; i++) { 412 m = bp->b_pages[i]; 413 if (m == bogus_page) { 414 panic("brelse: bogus page found"); 415 } 416 resid = (m->offset + PAGE_SIZE) - foff; 417 if (resid > iototal) 418 resid = iototal; 419 if (resid > 0) { 420 if (bp->b_flags & (B_ERROR | B_NOCACHE)) { 421 vm_page_set_invalid(m, foff, resid); 422 } else if ((bp->b_flags & B_DELWRI) == 0) { 423 vm_page_set_clean(m, foff, resid); 424 vm_page_set_valid(m, foff, resid); 425 } 426 } else { 427 vm_page_test_dirty(m); 428 } 429 if (bp->b_flags & B_INVAL) { 430 if (m->bmapped == 0) { 431 panic("brelse: bmapped is zero for page\n"); 432 } 433 --m->bmapped; 434 if (m->bmapped == 0) { 435 PAGE_WAKEUP(m); 436 if (m->valid == 0) { 437 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 438 vm_page_free(m); 439 } else if ((m->dirty & m->valid) == 0 && 440 (m->flags & PG_REFERENCED) == 0 && 441 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) 442 vm_page_cache(m); 443 else if( (m->flags & PG_ACTIVE) == 0) 444 vm_page_activate(m); 445 } 446 } 447 foff += resid; 448 iototal -= resid; 449 } 450 451 if (bp->b_flags & B_INVAL) { 452 bufspace -= bp->b_bufsize; 453 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 454 bp->b_npages = 0; 455 bp->b_bufsize = 0; 456 bp->b_flags &= ~B_VMIO; 457 if (bp->b_vp) 458 brelvp(bp); 459 --nvmio; 460 } 461 } 462 if (bp->b_qindex != QUEUE_NONE) 463 panic("brelse: free buffer onto another queue???"); 464 465 /* enqueue */ 466 /* buffers with no memory */ 467 if (bp->b_bufsize == 0) { 468 bp->b_qindex = QUEUE_EMPTY; 469 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 470 LIST_REMOVE(bp, b_hash); 471 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 472 bp->b_dev = NODEV; 473 /* buffers with junk contents */ 474 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE)) { 475 bp->b_qindex = QUEUE_AGE; 476 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 477 LIST_REMOVE(bp, b_hash); 478 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 479 bp->b_dev = NODEV; 480 /* buffers that are locked */ 481 } else if (bp->b_flags & B_LOCKED) { 482 bp->b_qindex = QUEUE_LOCKED; 483 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 484 /* buffers with stale but valid contents */ 485 } else if (bp->b_flags & B_AGE) { 486 bp->b_qindex = QUEUE_AGE; 487 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 488 /* buffers with valid and quite potentially reuseable contents */ 489 } else { 490 if (bp->b_flags & B_VMIO) 491 bp->b_qindex = QUEUE_VMIO; 492 else { 493 bp->b_qindex = QUEUE_LRU; 494 ++nlru; 495 } 496 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 497 } 498 499 /* unlock */ 500 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE); 501 splx(s); 502} 503 504/* 505 * this routine implements clustered async writes for 506 * clearing out B_DELWRI buffers... 507 */ 508void 509vfs_bio_awrite(struct buf * bp) 510{ 511 int i; 512 daddr_t lblkno = bp->b_lblkno; 513 struct vnode *vp = bp->b_vp; 514 int s; 515 int ncl; 516 struct buf *bpa; 517 518 s = splbio(); 519 if( vp->v_mount && (vp->v_flag & VVMIO) && 520 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 521 int size = vp->v_mount->mnt_stat.f_iosize; 522 523 for (i = 1; i < MAXPHYS / size; i++) { 524 if ((bpa = incore(vp, lblkno + i)) && 525 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) && 526 (bpa->b_bufsize == size)) { 527 if ((bpa->b_blkno == bpa->b_lblkno) || 528 (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE)) 529 break; 530 } else { 531 break; 532 } 533 } 534 ncl = i; 535 /* 536 * this is a possible cluster write 537 */ 538 if (ncl != 1) { 539 cluster_wbuild(vp, NULL, size, lblkno, ncl, -1); 540 splx(s); 541 return; 542 } 543 } 544 /* 545 * default (old) behavior, writing out only one block 546 */ 547 bremfree(bp); 548 bp->b_flags |= B_BUSY | B_ASYNC; 549 bwrite(bp); 550 splx(s); 551} 552 553 554/* 555 * Find a buffer header which is available for use. 556 */ 557struct buf * 558getnewbuf(int slpflag, int slptimeo, int doingvmio) 559{ 560 struct buf *bp; 561 int s; 562 int firstbp = 1; 563 564 s = splbio(); 565start: 566 if (bufspace >= maxbufspace) 567 goto trytofreespace; 568 569 /* can we constitute a new buffer? */ 570 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 571 if (bp->b_qindex != QUEUE_EMPTY) 572 panic("getnewbuf: inconsistent EMPTY queue"); 573 bremfree(bp); 574 goto fillbuf; 575 } 576trytofreespace: 577 /* 578 * we keep the file I/O from hogging metadata I/O 579 */ 580 if (bp = bufqueues[QUEUE_AGE].tqh_first) { 581 if (bp->b_qindex != QUEUE_AGE) 582 panic("getnewbuf: inconsistent AGE queue"); 583 } else if ((nvmio > (nbuf / 2)) 584 && (bp = bufqueues[QUEUE_VMIO].tqh_first)) { 585 if (bp->b_qindex != QUEUE_VMIO) 586 panic("getnewbuf: inconsistent VMIO queue"); 587 } else if ((!doingvmio || (nlru > (nbuf / 2))) && 588 (bp = bufqueues[QUEUE_LRU].tqh_first)) { 589 if (bp->b_qindex != QUEUE_LRU) 590 panic("getnewbuf: inconsistent LRU queue"); 591 } 592 if (!bp) { 593 if (doingvmio) { 594 if (bp = bufqueues[QUEUE_VMIO].tqh_first) { 595 if (bp->b_qindex != QUEUE_VMIO) 596 panic("getnewbuf: inconsistent VMIO queue"); 597 } else if (bp = bufqueues[QUEUE_LRU].tqh_first) { 598 if (bp->b_qindex != QUEUE_LRU) 599 panic("getnewbuf: inconsistent LRU queue"); 600 } 601 } else { 602 if (bp = bufqueues[QUEUE_LRU].tqh_first) { 603 if (bp->b_qindex != QUEUE_LRU) 604 panic("getnewbuf: inconsistent LRU queue"); 605 } else if (bp = bufqueues[QUEUE_VMIO].tqh_first) { 606 if (bp->b_qindex != QUEUE_VMIO) 607 panic("getnewbuf: inconsistent VMIO queue"); 608 } 609 } 610 } 611 if (!bp) { 612 /* wait for a free buffer of any kind */ 613 needsbuffer = 1; 614 tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo); 615 splx(s); 616 return (0); 617 } 618 /* if we are a delayed write, convert to an async write */ 619 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 620 vfs_bio_awrite(bp); 621 if (!slpflag && !slptimeo) { 622 splx(s); 623 return (0); 624 } 625 goto start; 626 } 627 bremfree(bp); 628 629 if (bp->b_flags & B_VMIO) { 630 bp->b_flags |= B_INVAL | B_BUSY; 631 brelse(bp); 632 bremfree(bp); 633 } 634 if (bp->b_vp) 635 brelvp(bp); 636 637 /* we are not free, nor do we contain interesting data */ 638 if (bp->b_rcred != NOCRED) 639 crfree(bp->b_rcred); 640 if (bp->b_wcred != NOCRED) 641 crfree(bp->b_wcred); 642fillbuf: 643 bp->b_flags |= B_BUSY; 644 LIST_REMOVE(bp, b_hash); 645 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 646 splx(s); 647 if (bp->b_bufsize) { 648 allocbuf(bp, 0, 0); 649 } 650 bp->b_flags = B_BUSY; 651 bp->b_dev = NODEV; 652 bp->b_vp = NULL; 653 bp->b_blkno = bp->b_lblkno = 0; 654 bp->b_iodone = 0; 655 bp->b_error = 0; 656 bp->b_resid = 0; 657 bp->b_bcount = 0; 658 bp->b_npages = 0; 659 bp->b_wcred = bp->b_rcred = NOCRED; 660 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 661 bp->b_dirtyoff = bp->b_dirtyend = 0; 662 bp->b_validoff = bp->b_validend = 0; 663 if (bufspace >= maxbufspace) { 664 s = splbio(); 665 bp->b_flags |= B_INVAL; 666 brelse(bp); 667 goto trytofreespace; 668 } 669 return (bp); 670} 671 672/* 673 * Check to see if a block is currently memory resident. 674 */ 675struct buf * 676incore(struct vnode * vp, daddr_t blkno) 677{ 678 struct buf *bp; 679 struct bufhashhdr *bh; 680 681 int s = splbio(); 682 683 bh = BUFHASH(vp, blkno); 684 bp = bh->lh_first; 685 686 /* Search hash chain */ 687 while (bp) { 688 /* hit */ 689 if (bp->b_lblkno == blkno && bp->b_vp == vp 690 && (bp->b_flags & B_INVAL) == 0) { 691 splx(s); 692 return (bp); 693 } 694 bp = bp->b_hash.le_next; 695 } 696 splx(s); 697 698 return (0); 699} 700 701/* 702 * returns true if no I/O is needed to access the 703 * associated VM object. 704 */ 705 706int 707inmem(struct vnode * vp, daddr_t blkno) 708{ 709 vm_object_t obj; 710 vm_offset_t off, toff, tinc; 711 vm_page_t m; 712 713 if (incore(vp, blkno)) 714 return 1; 715 if (vp->v_mount == 0) 716 return 0; 717 if ((vp->v_vmdata == 0) || (vp->v_flag & VVMIO) == 0) 718 return 0; 719 720 obj = (vm_object_t) vp->v_vmdata; 721 tinc = PAGE_SIZE; 722 if (tinc > vp->v_mount->mnt_stat.f_iosize) 723 tinc = vp->v_mount->mnt_stat.f_iosize; 724 off = blkno * vp->v_mount->mnt_stat.f_iosize; 725 726 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 727 int mask; 728 729 m = vm_page_lookup(obj, trunc_page(toff + off)); 730 if (!m) 731 return 0; 732 if (vm_page_is_valid(m, toff + off, tinc) == 0) 733 return 0; 734 } 735 return 1; 736} 737 738/* 739 * Get a block given a specified block and offset into a file/device. 740 */ 741struct buf * 742getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 743{ 744 struct buf *bp; 745 int s; 746 struct bufhashhdr *bh; 747 vm_offset_t off; 748 int nleft; 749 750 s = splbio(); 751loop: 752 if ((cnt.v_free_count + cnt.v_cache_count) < 753 cnt.v_free_reserved + MAXBSIZE / PAGE_SIZE) 754 wakeup((caddr_t) &vm_pages_needed); 755 if (bp = incore(vp, blkno)) { 756 if (bp->b_flags & B_BUSY) { 757 bp->b_flags |= B_WANTED; 758 if (curproc == pageproc) { 759 bp->b_flags |= B_PDWANTED; 760 wakeup((caddr_t) &cnt.v_free_count); 761 } 762 if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo)) 763 goto loop; 764 splx(s); 765 return (struct buf *) NULL; 766 } 767 bp->b_flags |= B_BUSY | B_CACHE; 768 bremfree(bp); 769 /* 770 * check for size inconsistancies 771 */ 772 if (bp->b_bcount != size) { 773#if defined(VFS_BIO_DEBUG) 774 printf("getblk: invalid buffer size: %ld\n", bp->b_bcount); 775#endif 776 bp->b_flags |= B_INVAL; 777 bwrite(bp); 778 goto loop; 779 } 780 splx(s); 781 return (bp); 782 } else { 783 vm_object_t obj; 784 int doingvmio; 785 786 if ((obj = (vm_object_t) vp->v_vmdata) && (vp->v_flag & VVMIO)) { 787 doingvmio = 1; 788 } else { 789 doingvmio = 0; 790 } 791 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 792 if (slpflag || slptimeo) 793 return NULL; 794 goto loop; 795 } 796 if (incore(vp, blkno)) { 797 bp->b_flags |= B_INVAL; 798 brelse(bp); 799 goto loop; 800 } 801 bp->b_blkno = bp->b_lblkno = blkno; 802 bgetvp(vp, bp); 803 LIST_REMOVE(bp, b_hash); 804 bh = BUFHASH(vp, blkno); 805 LIST_INSERT_HEAD(bh, bp, b_hash); 806 if (doingvmio) { 807 bp->b_flags |= (B_VMIO | B_CACHE); 808#if defined(VFS_BIO_DEBUG) 809 if (vp->v_type != VREG) 810 printf("getblk: vmioing file type %d???\n", vp->v_type); 811#endif 812 ++nvmio; 813 } else { 814 if (bp->b_flags & B_VMIO) 815 --nvmio; 816 bp->b_flags &= ~B_VMIO; 817 } 818 splx(s); 819 if (!allocbuf(bp, size, 1)) { 820 s = splbio(); 821 goto loop; 822 } 823 return (bp); 824 } 825} 826 827/* 828 * Get an empty, disassociated buffer of given size. 829 */ 830struct buf * 831geteblk(int size) 832{ 833 struct buf *bp; 834 835 while ((bp = getnewbuf(0, 0, 0)) == 0); 836 allocbuf(bp, size, 0); 837 bp->b_flags |= B_INVAL; 838 return (bp); 839} 840 841/* 842 * Modify the length of a buffer's underlying buffer storage without 843 * destroying information (unless, of course the buffer is shrinking). 844 */ 845int 846allocbuf(struct buf * bp, int size, int vmio) 847{ 848 849 int s; 850 int newbsize, mbsize; 851 int i; 852 853 if ((bp->b_flags & B_VMIO) == 0) { 854 mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 855 newbsize = round_page(size); 856 857 if (newbsize == bp->b_bufsize) { 858 bp->b_bcount = size; 859 return 1; 860 } else if (newbsize < bp->b_bufsize) { 861 vm_hold_free_pages( 862 bp, 863 (vm_offset_t) bp->b_data + newbsize, 864 (vm_offset_t) bp->b_data + bp->b_bufsize); 865 bufspace -= (bp->b_bufsize - newbsize); 866 } else if (newbsize > bp->b_bufsize) { 867 vm_hold_load_pages( 868 bp, 869 (vm_offset_t) bp->b_data + bp->b_bufsize, 870 (vm_offset_t) bp->b_data + newbsize); 871 bufspace += (newbsize - bp->b_bufsize); 872 } 873 /* 874 * adjust buffer cache's idea of memory allocated to buffer 875 * contents 876 */ 877 } else { 878 vm_page_t m; 879 int desiredpages; 880 881 newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 882 desiredpages = round_page(newbsize) / PAGE_SIZE; 883 884 if (newbsize == bp->b_bufsize) { 885 bp->b_bcount = size; 886 return 1; 887 } else if (newbsize < bp->b_bufsize) { 888 if (desiredpages < bp->b_npages) { 889 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 890 desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages)); 891 for (i = desiredpages; i < bp->b_npages; i++) { 892 m = bp->b_pages[i]; 893 s = splhigh(); 894 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 895 m->flags |= PG_WANTED; 896 tsleep(m, PVM, "biodep", 0); 897 } 898 splx(s); 899 900 if (m->bmapped == 0) { 901 printf("allocbuf: bmapped is zero for page %d\n", i); 902 panic("allocbuf: error"); 903 } 904 --m->bmapped; 905 if (m->bmapped == 0) { 906 PAGE_WAKEUP(m); 907 if (m->valid == 0) { 908 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 909 vm_page_free(m); 910 } 911 } 912 bp->b_pages[i] = NULL; 913 } 914 bp->b_npages = desiredpages; 915 bufspace -= (bp->b_bufsize - newbsize); 916 } 917 } else { 918 vm_object_t obj; 919 vm_offset_t tinc, off, toff, objoff; 920 int pageindex, curbpnpages; 921 struct vnode *vp; 922 int bsize; 923 924 vp = bp->b_vp; 925 bsize = vp->v_mount->mnt_stat.f_iosize; 926 927 if (bp->b_npages < desiredpages) { 928 obj = (vm_object_t) vp->v_vmdata; 929 tinc = PAGE_SIZE; 930 if (tinc > bsize) 931 tinc = bsize; 932 off = bp->b_lblkno * bsize; 933 curbpnpages = bp->b_npages; 934 doretry: 935 for (toff = 0; toff < newbsize; toff += tinc) { 936 int mask; 937 int bytesinpage; 938 939 pageindex = toff / PAGE_SIZE; 940 objoff = trunc_page(toff + off); 941 if (pageindex < curbpnpages) { 942 int pb; 943 944 m = bp->b_pages[pageindex]; 945 if (m->offset != objoff) 946 panic("allocbuf: page changed offset??!!!?"); 947 bytesinpage = tinc; 948 if (tinc > (newbsize - toff)) 949 bytesinpage = newbsize - toff; 950 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 951 bp->b_flags &= ~B_CACHE; 952 } 953 if ((m->flags & PG_ACTIVE) == 0) 954 vm_page_activate(m); 955 continue; 956 } 957 m = vm_page_lookup(obj, objoff); 958 if (!m) { 959 m = vm_page_alloc(obj, objoff, 0); 960 if (!m) { 961 int j; 962 963 for (j = bp->b_npages; j < pageindex; j++) { 964 vm_page_t mt = bp->b_pages[j]; 965 966 PAGE_WAKEUP(mt); 967 if (mt->valid == 0 && mt->bmapped == 0) { 968 vm_page_free(mt); 969 } 970 } 971 VM_WAIT; 972 if (vmio && (bp->b_flags & B_PDWANTED)) { 973 bp->b_flags |= B_INVAL; 974 brelse(bp); 975 return 0; 976 } 977 curbpnpages = bp->b_npages; 978 goto doretry; 979 } 980 m->valid = 0; 981 vm_page_activate(m); 982 } else if ((m->valid == 0) || (m->flags & PG_BUSY)) { 983 int j; 984 int bufferdestroyed = 0; 985 986 for (j = bp->b_npages; j < pageindex; j++) { 987 vm_page_t mt = bp->b_pages[j]; 988 989 PAGE_WAKEUP(mt); 990 if (mt->valid == 0 && mt->bmapped == 0) { 991 vm_page_free(mt); 992 } 993 } 994 if (vmio && (bp->b_flags & B_PDWANTED)) { 995 bp->b_flags |= B_INVAL; 996 brelse(bp); 997 VM_WAIT; 998 bufferdestroyed = 1; 999 } 1000 s = splbio(); 1001 if (m->flags & PG_BUSY) { 1002 m->flags |= PG_WANTED; 1003 tsleep(m, PRIBIO, "pgtblk", 0); 1004 } else if( m->valid == 0 && m->bmapped == 0) { 1005 vm_page_free(m); 1006 } 1007 splx(s); 1008 if (bufferdestroyed) 1009 return 0; 1010 curbpnpages = bp->b_npages; 1011 goto doretry; 1012 } else { 1013 int pb; 1014 1015 if ((m->flags & PG_CACHE) && 1016 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) { 1017 int j; 1018 1019 for (j = bp->b_npages; j < pageindex; j++) { 1020 vm_page_t mt = bp->b_pages[j]; 1021 1022 PAGE_WAKEUP(mt); 1023 if (mt->valid == 0 && mt->bmapped == 0) { 1024 vm_page_free(mt); 1025 } 1026 } 1027 VM_WAIT; 1028 if (vmio && (bp->b_flags & B_PDWANTED)) { 1029 bp->b_flags |= B_INVAL; 1030 brelse(bp); 1031 return 0; 1032 } 1033 curbpnpages = bp->b_npages; 1034 goto doretry; 1035 } 1036 bytesinpage = tinc; 1037 if (tinc > (newbsize - toff)) 1038 bytesinpage = newbsize - toff; 1039 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 1040 bp->b_flags &= ~B_CACHE; 1041 } 1042 if ((m->flags & PG_ACTIVE) == 0) 1043 vm_page_activate(m); 1044 m->flags |= PG_BUSY; 1045 } 1046 bp->b_pages[pageindex] = m; 1047 curbpnpages = pageindex + 1; 1048 } 1049 if (bsize >= PAGE_SIZE) { 1050 for (i = bp->b_npages; i < curbpnpages; i++) { 1051 m = bp->b_pages[i]; 1052 if (m->valid == 0) { 1053 bp->b_flags &= ~B_CACHE; 1054 } 1055 m->bmapped++; 1056 PAGE_WAKEUP(m); 1057 } 1058#if 0 1059 if( bp->b_flags & B_CACHE) { 1060 for (i = bp->b_npages; i < curbpnpages; i++) { 1061 bp->b_pages[i]->flags |= PG_REFERENCED; 1062 } 1063 } 1064#endif 1065 } else { 1066 if (!vm_page_is_valid(bp->b_pages[0], off, bsize)) 1067 bp->b_flags &= ~B_CACHE; 1068 bp->b_pages[0]->bmapped++; 1069 PAGE_WAKEUP(bp->b_pages[0]); 1070 } 1071 bp->b_npages = curbpnpages; 1072 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1073 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1074 bp->b_data += off % PAGE_SIZE; 1075 } 1076 bufspace += (newbsize - bp->b_bufsize); 1077 } 1078 } 1079 bp->b_bufsize = newbsize; 1080 bp->b_bcount = size; 1081 return 1; 1082} 1083 1084/* 1085 * Wait for buffer I/O completion, returning error status. 1086 */ 1087int 1088biowait(register struct buf * bp) 1089{ 1090 int s; 1091 1092 s = splbio(); 1093 while ((bp->b_flags & B_DONE) == 0) 1094 tsleep((caddr_t) bp, PRIBIO, "biowait", 0); 1095 if ((bp->b_flags & B_ERROR) || bp->b_error) { 1096 if ((bp->b_flags & B_INVAL) == 0) { 1097 bp->b_flags |= B_INVAL; 1098 bp->b_dev = NODEV; 1099 LIST_REMOVE(bp, b_hash); 1100 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1101 wakeup((caddr_t) bp); 1102 } 1103 if (!bp->b_error) 1104 bp->b_error = EIO; 1105 else 1106 bp->b_flags |= B_ERROR; 1107 splx(s); 1108 return (bp->b_error); 1109 } else { 1110 splx(s); 1111 return (0); 1112 } 1113} 1114 1115/* 1116 * Finish I/O on a buffer, calling an optional function. 1117 * This is usually called from interrupt level, so process blocking 1118 * is not *a good idea*. 1119 */ 1120void 1121biodone(register struct buf * bp) 1122{ 1123 int s; 1124 1125 s = splbio(); 1126 if (bp->b_flags & B_DONE) 1127 printf("biodone: buffer already done\n"); 1128 bp->b_flags |= B_DONE; 1129 1130 if ((bp->b_flags & B_READ) == 0) { 1131 vwakeup(bp); 1132 } 1133#ifdef BOUNCE_BUFFERS 1134 if (bp->b_flags & B_BOUNCE) 1135 vm_bounce_free(bp); 1136#endif 1137 1138 /* call optional completion function if requested */ 1139 if (bp->b_flags & B_CALL) { 1140 bp->b_flags &= ~B_CALL; 1141 (*bp->b_iodone) (bp); 1142 splx(s); 1143 return; 1144 } 1145 if (bp->b_flags & B_VMIO) { 1146 int i, resid; 1147 vm_offset_t foff; 1148 vm_page_t m; 1149 vm_object_t obj; 1150 int iosize; 1151 struct vnode *vp = bp->b_vp; 1152 1153 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1154 obj = (vm_object_t) vp->v_vmdata; 1155 if (!obj) { 1156 return; 1157 } 1158#if defined(VFS_BIO_DEBUG) 1159 if (obj->paging_in_progress < bp->b_npages) { 1160 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1161 obj->paging_in_progress, bp->b_npages); 1162 } 1163#endif 1164 iosize = bp->b_bufsize; 1165 for (i = 0; i < bp->b_npages; i++) { 1166 m = bp->b_pages[i]; 1167 if (m == bogus_page) { 1168 m = vm_page_lookup(obj, foff); 1169 if (!m) { 1170#if defined(VFS_BIO_DEBUG) 1171 printf("biodone: page disappeared\n"); 1172#endif 1173 --obj->paging_in_progress; 1174 continue; 1175 } 1176 bp->b_pages[i] = m; 1177 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1178 } 1179#if defined(VFS_BIO_DEBUG) 1180 if (trunc_page(foff) != m->offset) { 1181 printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset); 1182 } 1183#endif 1184 resid = (m->offset + PAGE_SIZE) - foff; 1185 if (resid > iosize) 1186 resid = iosize; 1187 if (resid > 0) { 1188 vm_page_set_valid(m, foff, resid); 1189 vm_page_set_clean(m, foff, resid); 1190 } 1191 if (m->busy == 0) { 1192 printf("biodone: page busy < 0, off: %d, foff: %d, resid: %d, index: %d\n", 1193 m->offset, foff, resid, i); 1194 printf(" iosize: %d, lblkno: %d\n", 1195 bp->b_vp->v_mount->mnt_stat.f_iosize, bp->b_lblkno); 1196 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1197 m->valid, m->dirty, m->bmapped); 1198 panic("biodone: page busy < 0\n"); 1199 } 1200 --m->busy; 1201 PAGE_WAKEUP(m); 1202 --obj->paging_in_progress; 1203 foff += resid; 1204 iosize -= resid; 1205 } 1206 if (obj && obj->paging_in_progress == 0) 1207 wakeup((caddr_t) obj); 1208 } 1209 /* 1210 * For asynchronous completions, release the buffer now. The brelse 1211 * checks for B_WANTED and will do the wakeup there if necessary - so 1212 * no need to do a wakeup here in the async case. 1213 */ 1214 1215 if (bp->b_flags & B_ASYNC) { 1216 brelse(bp); 1217 } else { 1218 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 1219 wakeup((caddr_t) bp); 1220 } 1221 splx(s); 1222} 1223 1224int 1225count_lock_queue() 1226{ 1227 int count; 1228 struct buf *bp; 1229 1230 count = 0; 1231 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1232 bp != NULL; 1233 bp = bp->b_freelist.tqe_next) 1234 count++; 1235 return (count); 1236} 1237 1238int vfs_update_interval = 30; 1239 1240void 1241vfs_update() 1242{ 1243 (void) spl0(); 1244 while (1) { 1245 tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update", 1246 hz * vfs_update_interval); 1247 vfs_update_wakeup = 0; 1248 sync(curproc, NULL, NULL); 1249 } 1250} 1251 1252void 1253vfs_unbusy_pages(struct buf * bp) 1254{ 1255 int i; 1256 1257 if (bp->b_flags & B_VMIO) { 1258 struct vnode *vp = bp->b_vp; 1259 vm_object_t obj = (vm_object_t) vp->v_vmdata; 1260 vm_offset_t foff; 1261 1262 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1263 1264 for (i = 0; i < bp->b_npages; i++) { 1265 vm_page_t m = bp->b_pages[i]; 1266 1267 if (m == bogus_page) { 1268 m = vm_page_lookup(obj, foff); 1269 if (!m) { 1270 panic("vfs_unbusy_pages: page missing\n"); 1271 } 1272 bp->b_pages[i] = m; 1273 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1274 } 1275 --obj->paging_in_progress; 1276 --m->busy; 1277 PAGE_WAKEUP(m); 1278 } 1279 if (obj->paging_in_progress == 0) 1280 wakeup((caddr_t) obj); 1281 } 1282} 1283 1284void 1285vfs_busy_pages(struct buf * bp, int clear_modify) 1286{ 1287 int i; 1288 1289 if (bp->b_flags & B_VMIO) { 1290 vm_object_t obj = (vm_object_t) bp->b_vp->v_vmdata; 1291 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1292 int iocount = bp->b_bufsize; 1293 1294 for (i = 0; i < bp->b_npages; i++) { 1295 vm_page_t m = bp->b_pages[i]; 1296 int resid = (m->offset + PAGE_SIZE) - foff; 1297 1298 if (resid > iocount) 1299 resid = iocount; 1300 obj->paging_in_progress++; 1301 m->busy++; 1302 if (clear_modify) { 1303 vm_page_test_dirty(m); 1304 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 1305 } else if (bp->b_bcount >= PAGE_SIZE) { 1306 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1307 bp->b_pages[i] = bogus_page; 1308 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1309 } 1310 } 1311 foff += resid; 1312 iocount -= resid; 1313 } 1314 } 1315} 1316 1317void 1318vfs_dirty_pages(struct buf * bp) 1319{ 1320 int i; 1321 1322 if (bp->b_flags & B_VMIO) { 1323 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1324 int iocount = bp->b_bufsize; 1325 1326 for (i = 0; i < bp->b_npages; i++) { 1327 vm_page_t m = bp->b_pages[i]; 1328 int resid = (m->offset + PAGE_SIZE) - foff; 1329 1330 if (resid > iocount) 1331 resid = iocount; 1332 if (resid > 0) { 1333 vm_page_set_valid(m, foff, resid); 1334 vm_page_set_dirty(m, foff, resid); 1335 } 1336 PAGE_WAKEUP(m); 1337 foff += resid; 1338 iocount -= resid; 1339 } 1340 } 1341} 1342/* 1343 * these routines are not in the correct place (yet) 1344 * also they work *ONLY* for kernel_pmap!!! 1345 */ 1346void 1347vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1348{ 1349 vm_offset_t pg; 1350 vm_page_t p; 1351 vm_offset_t from = round_page(froma); 1352 vm_offset_t to = round_page(toa); 1353 1354tryagain0: 1355 if ((curproc != pageproc) && ((cnt.v_free_count + cnt.v_cache_count) <= 1356 cnt.v_free_reserved + (toa - froma) / PAGE_SIZE)) { 1357 VM_WAIT; 1358 goto tryagain0; 1359 } 1360 for (pg = from; pg < to; pg += PAGE_SIZE) { 1361 1362tryagain: 1363 1364 p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS, 0); 1365 if (!p) { 1366 VM_WAIT; 1367 goto tryagain; 1368 } 1369 vm_page_wire(p); 1370 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1371 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p; 1372 PAGE_WAKEUP(p); 1373 bp->b_npages++; 1374 } 1375} 1376 1377void 1378vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1379{ 1380 vm_offset_t pg; 1381 vm_page_t p; 1382 vm_offset_t from = round_page(froma); 1383 vm_offset_t to = round_page(toa); 1384 1385 for (pg = from; pg < to; pg += PAGE_SIZE) { 1386 p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE]; 1387 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0; 1388 pmap_kremove(pg); 1389 vm_page_free(p); 1390 --bp->b_npages; 1391 } 1392} 1393 1394void 1395bufstats() 1396{ 1397} 1398