vfs_bio.c revision 5918
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.25 1995/01/24 10:00:43 davidg Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/proc.h> 40#include <sys/vnode.h> 41#include <vm/vm.h> 42#include <vm/vm_pageout.h> 43#include <vm/vm_page.h> 44#include <vm/vm_object.h> 45#include <sys/buf.h> 46#include <sys/mount.h> 47#include <sys/malloc.h> 48#include <sys/resourcevar.h> 49#include <sys/proc.h> 50 51#include <miscfs/specfs/specdev.h> 52 53struct buf *buf; /* buffer header pool */ 54int nbuf; /* number of buffer headers calculated 55 * elsewhere */ 56struct swqueue bswlist; 57int nvmio, nlru; 58 59extern vm_map_t buffer_map, io_map, kernel_map, pager_map; 60 61void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 62void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 63void vfs_dirty_pages(struct buf * bp); 64void vfs_busy_pages(struct buf *, int clear_modify); 65 66int needsbuffer; 67 68/* 69 * Internal update daemon, process 3 70 * The variable vfs_update_wakeup allows for internal syncs. 71 */ 72int vfs_update_wakeup; 73 74 75/* 76 * buffers base kva 77 */ 78caddr_t buffers_kva; 79 80/* 81 * bogus page -- for I/O to/from partially complete buffers 82 */ 83vm_page_t bogus_page; 84vm_offset_t bogus_offset; 85 86int bufspace, maxbufspace; 87 88/* 89 * Initialize buffer headers and related structures. 90 */ 91void 92bufinit() 93{ 94 struct buf *bp; 95 int i; 96 97 TAILQ_INIT(&bswlist); 98 LIST_INIT(&invalhash); 99 100 /* first, make a null hash table */ 101 for (i = 0; i < BUFHSZ; i++) 102 LIST_INIT(&bufhashtbl[i]); 103 104 /* next, make a null set of free lists */ 105 for (i = 0; i < BUFFER_QUEUES; i++) 106 TAILQ_INIT(&bufqueues[i]); 107 108 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 109 /* finally, initialize each buffer header and stick on empty q */ 110 for (i = 0; i < nbuf; i++) { 111 bp = &buf[i]; 112 bzero(bp, sizeof *bp); 113 bp->b_flags = B_INVAL; /* we're just an empty header */ 114 bp->b_dev = NODEV; 115 bp->b_vp = NULL; 116 bp->b_rcred = NOCRED; 117 bp->b_wcred = NOCRED; 118 bp->b_qindex = QUEUE_EMPTY; 119 bp->b_vnbufs.le_next = NOLIST; 120 bp->b_data = buffers_kva + i * MAXBSIZE; 121 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 122 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 123 } 124/* 125 * this will change later!!! 126 */ 127 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 128 129 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 130 bogus_page = vm_page_alloc(kernel_object, bogus_offset - VM_MIN_KERNEL_ADDRESS, 0); 131 132} 133 134/* 135 * remove the buffer from the appropriate free list 136 */ 137void 138bremfree(struct buf * bp) 139{ 140 int s = splbio(); 141 142 if (bp->b_qindex != QUEUE_NONE) { 143 if (bp->b_qindex == QUEUE_LRU) 144 --nlru; 145 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 146 bp->b_qindex = QUEUE_NONE; 147 } else { 148 panic("bremfree: removing a buffer when not on a queue"); 149 } 150 splx(s); 151} 152 153/* 154 * Get a buffer with the specified data. Look in the cache first. 155 */ 156int 157bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 158 struct buf ** bpp) 159{ 160 struct buf *bp; 161 162 bp = getblk(vp, blkno, size, 0, 0); 163 *bpp = bp; 164 165 /* if not found in cache, do some I/O */ 166 if ((bp->b_flags & B_CACHE) == 0) { 167 if (curproc && curproc->p_stats) /* count block I/O */ 168 curproc->p_stats->p_ru.ru_inblock++; 169 bp->b_flags |= B_READ; 170 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 171 if (bp->b_rcred == NOCRED) { 172 if (cred != NOCRED) 173 crhold(cred); 174 bp->b_rcred = cred; 175 } 176 vfs_busy_pages(bp, 0); 177 VOP_STRATEGY(bp); 178 return (biowait(bp)); 179 } else if (bp->b_lblkno == bp->b_blkno) { 180 VOP_BMAP(vp, bp->b_lblkno, (struct vnode **) 0, 181 &bp->b_blkno, (int *) 0); 182 } 183 return (0); 184} 185 186/* 187 * Operates like bread, but also starts asynchronous I/O on 188 * read-ahead blocks. 189 */ 190int 191breadn(struct vnode * vp, daddr_t blkno, int size, 192 daddr_t * rablkno, int *rabsize, 193 int cnt, struct ucred * cred, struct buf ** bpp) 194{ 195 struct buf *bp, *rabp; 196 int i; 197 int rv = 0, readwait = 0; 198 199 *bpp = bp = getblk(vp, blkno, size, 0, 0); 200 201 /* if not found in cache, do some I/O */ 202 if ((bp->b_flags & B_CACHE) == 0) { 203 if (curproc && curproc->p_stats) /* count block I/O */ 204 curproc->p_stats->p_ru.ru_inblock++; 205 bp->b_flags |= B_READ; 206 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 207 if (bp->b_rcred == NOCRED) { 208 if (cred != NOCRED) 209 crhold(cred); 210 bp->b_rcred = cred; 211 } 212 vfs_busy_pages(bp, 0); 213 VOP_STRATEGY(bp); 214 ++readwait; 215 } else if (bp->b_lblkno == bp->b_blkno) { 216 VOP_BMAP(vp, bp->b_lblkno, (struct vnode **) 0, 217 &bp->b_blkno, (int *) 0); 218 } 219 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 220 if (inmem(vp, *rablkno)) 221 continue; 222 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 223 224 if ((rabp->b_flags & B_CACHE) == 0) { 225 if (curproc && curproc->p_stats) 226 curproc->p_stats->p_ru.ru_inblock++; 227 rabp->b_flags |= B_READ | B_ASYNC; 228 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 229 if (rabp->b_rcred == NOCRED) { 230 if (cred != NOCRED) 231 crhold(cred); 232 rabp->b_rcred = cred; 233 } 234 vfs_busy_pages(rabp, 0); 235 VOP_STRATEGY(rabp); 236 } else { 237 brelse(rabp); 238 } 239 } 240 241 if (readwait) { 242 rv = biowait(bp); 243 } 244 return (rv); 245} 246 247/* 248 * Write, release buffer on completion. (Done by iodone 249 * if async.) 250 */ 251int 252bwrite(struct buf * bp) 253{ 254 int oldflags = bp->b_flags; 255 256 if (bp->b_flags & B_INVAL) { 257 brelse(bp); 258 return (0); 259 } 260 if (!(bp->b_flags & B_BUSY)) 261 panic("bwrite: buffer is not busy???"); 262 263 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 264 bp->b_flags |= B_WRITEINPROG; 265 266 if (oldflags & B_ASYNC) { 267 if (oldflags & B_DELWRI) { 268 reassignbuf(bp, bp->b_vp); 269 } else if (curproc) { 270 ++curproc->p_stats->p_ru.ru_oublock; 271 } 272 } 273 bp->b_vp->v_numoutput++; 274 vfs_busy_pages(bp, 1); 275 VOP_STRATEGY(bp); 276 277 if ((oldflags & B_ASYNC) == 0) { 278 int rtval = biowait(bp); 279 280 if (oldflags & B_DELWRI) { 281 reassignbuf(bp, bp->b_vp); 282 } else if (curproc) { 283 ++curproc->p_stats->p_ru.ru_oublock; 284 } 285 brelse(bp); 286 return (rtval); 287 } 288 return (0); 289} 290 291int 292vn_bwrite(ap) 293 struct vop_bwrite_args *ap; 294{ 295 return (bwrite(ap->a_bp)); 296} 297 298/* 299 * Delayed write. (Buffer is marked dirty). 300 */ 301void 302bdwrite(struct buf * bp) 303{ 304 305 if ((bp->b_flags & B_BUSY) == 0) { 306 panic("bdwrite: buffer is not busy"); 307 } 308 if (bp->b_flags & B_INVAL) { 309 brelse(bp); 310 return; 311 } 312 if (bp->b_flags & B_TAPE) { 313 bawrite(bp); 314 return; 315 } 316 bp->b_flags &= ~B_READ; 317 vfs_dirty_pages(bp); 318 if ((bp->b_flags & B_DELWRI) == 0) { 319 if (curproc) 320 ++curproc->p_stats->p_ru.ru_oublock; 321 bp->b_flags |= B_DONE | B_DELWRI; 322 reassignbuf(bp, bp->b_vp); 323 } 324 brelse(bp); 325 return; 326} 327 328/* 329 * Asynchronous write. 330 * Start output on a buffer, but do not wait for it to complete. 331 * The buffer is released when the output completes. 332 */ 333void 334bawrite(struct buf * bp) 335{ 336#ifdef EVILFORNOW 337 /* 338 * #ifdef EXTRA_DEADLOCKS is appropriate for this code for now :-) 339 */ 340 if (((bp->b_flags & B_DELWRI) == 0) && (bp->b_vp->v_numoutput > 24)) { 341 int s = splbio(); 342 343 while (bp->b_vp->v_numoutput > 16) { 344 bp->b_vp->v_flag |= VBWAIT; 345 tsleep((caddr_t) &bp->b_vp->v_numoutput, PRIBIO, "bawnmo", 0); 346 } 347 splx(s); 348 } 349#endif 350 bp->b_flags |= B_ASYNC; 351 (void) bwrite(bp); 352} 353 354/* 355 * Release a buffer. 356 */ 357void 358brelse(struct buf * bp) 359{ 360 int s; 361 362 if (bp->b_flags & B_CLUSTER) { 363 relpbuf(bp); 364 return; 365 } 366 /* anyone need a "free" block? */ 367 s = splbio(); 368 369 if (needsbuffer) { 370 needsbuffer = 0; 371 wakeup((caddr_t) &needsbuffer); 372 } 373 /* anyone need this block? */ 374 if (bp->b_flags & B_WANTED) { 375 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_AGE); 376 wakeup((caddr_t) bp); 377 } else if (bp->b_flags & B_VMIO) { 378 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 379 wakeup((caddr_t) bp); 380 } 381 if (bp->b_flags & B_LOCKED) 382 bp->b_flags &= ~B_ERROR; 383 384 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 385 (bp->b_bufsize <= 0)) { 386 bp->b_flags |= B_INVAL; 387 bp->b_flags &= ~(B_DELWRI | B_CACHE); 388 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 389 brelvp(bp); 390 } 391 if (bp->b_flags & B_VMIO) { 392 vm_offset_t foff; 393 vm_object_t obj; 394 int i, resid; 395 vm_page_t m; 396 int iototal = bp->b_bufsize; 397 398 foff = 0; 399 obj = 0; 400 if (bp->b_npages) { 401 if (bp->b_vp && bp->b_vp->v_mount) { 402 foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 403 } else { 404 /* 405 * vnode pointer has been ripped away -- 406 * probably file gone... 407 */ 408 foff = bp->b_pages[0]->offset; 409 } 410 } 411 for (i = 0; i < bp->b_npages; i++) { 412 m = bp->b_pages[i]; 413 if (m == bogus_page) { 414 panic("brelse: bogus page found"); 415 } 416 resid = (m->offset + PAGE_SIZE) - foff; 417 if (resid > iototal) 418 resid = iototal; 419 if (resid > 0) { 420 if (bp->b_flags & (B_ERROR | B_NOCACHE)) { 421 vm_page_set_invalid(m, foff, resid); 422 } else if ((bp->b_flags & B_DELWRI) == 0) { 423 vm_page_set_clean(m, foff, resid); 424 vm_page_set_valid(m, foff, resid); 425 } 426 } else { 427 vm_page_test_dirty(m); 428 } 429 if (bp->b_flags & B_INVAL) { 430 if (m->bmapped == 0) { 431 panic("brelse: bmapped is zero for page\n"); 432 } 433 --m->bmapped; 434 if (m->bmapped == 0) { 435 PAGE_WAKEUP(m); 436 if ((m->dirty & m->valid) == 0 && 437 (m->flags & PG_REFERENCED) == 0 && 438 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) 439 vm_page_cache(m); 440 else if( (m->flags & PG_ACTIVE) == 0) 441 vm_page_activate(m); 442 } 443 } 444 foff += resid; 445 iototal -= resid; 446 } 447 448 if (bp->b_flags & B_INVAL) { 449 bufspace -= bp->b_bufsize; 450 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 451 bp->b_npages = 0; 452 bp->b_bufsize = 0; 453 bp->b_flags &= ~B_VMIO; 454 if (bp->b_vp) 455 brelvp(bp); 456 --nvmio; 457 } 458 } 459 if (bp->b_qindex != QUEUE_NONE) 460 panic("brelse: free buffer onto another queue???"); 461 462 /* enqueue */ 463 /* buffers with no memory */ 464 if (bp->b_bufsize == 0) { 465 bp->b_qindex = QUEUE_EMPTY; 466 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 467 LIST_REMOVE(bp, b_hash); 468 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 469 bp->b_dev = NODEV; 470 /* buffers with junk contents */ 471 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE)) { 472 bp->b_qindex = QUEUE_AGE; 473 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 474 LIST_REMOVE(bp, b_hash); 475 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 476 bp->b_dev = NODEV; 477 /* buffers that are locked */ 478 } else if (bp->b_flags & B_LOCKED) { 479 bp->b_qindex = QUEUE_LOCKED; 480 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 481 /* buffers with stale but valid contents */ 482 } else if (bp->b_flags & B_AGE) { 483 bp->b_qindex = QUEUE_AGE; 484 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 485 /* buffers with valid and quite potentially reuseable contents */ 486 } else { 487 if (bp->b_flags & B_VMIO) 488 bp->b_qindex = QUEUE_VMIO; 489 else { 490 bp->b_qindex = QUEUE_LRU; 491 ++nlru; 492 } 493 TAILQ_INSERT_TAIL(&bufqueues[bp->b_qindex], bp, b_freelist); 494 } 495 496 /* unlock */ 497 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE); 498 splx(s); 499} 500 501/* 502 * this routine implements clustered async writes for 503 * clearing out B_DELWRI buffers... 504 */ 505void 506vfs_bio_awrite(struct buf * bp) 507{ 508 int i; 509 daddr_t lblkno = bp->b_lblkno; 510 struct vnode *vp = bp->b_vp; 511 int s; 512 int ncl; 513 struct buf *bpa; 514 515 s = splbio(); 516 if( vp->v_mount && (vp->v_flag & VVMIO) && 517 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 518 int size = vp->v_mount->mnt_stat.f_iosize; 519 520 for (i = 1; i < MAXPHYS / size; i++) { 521 if ((bpa = incore(vp, lblkno + i)) && 522 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) && 523 (bpa->b_bufsize == size)) { 524 if ((bpa->b_blkno == bpa->b_lblkno) || 525 (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE)) 526 break; 527 } else { 528 break; 529 } 530 } 531 ncl = i; 532 /* 533 * this is a possible cluster write 534 */ 535 if (ncl != 1) { 536 cluster_wbuild(vp, NULL, size, lblkno, ncl, -1); 537 splx(s); 538 return; 539 } 540 } 541 /* 542 * default (old) behavior, writing out only one block 543 */ 544 bremfree(bp); 545 bp->b_flags |= B_BUSY | B_ASYNC; 546 bwrite(bp); 547 splx(s); 548} 549 550 551/* 552 * Find a buffer header which is available for use. 553 */ 554struct buf * 555getnewbuf(int slpflag, int slptimeo, int doingvmio) 556{ 557 struct buf *bp; 558 int s; 559 int firstbp = 1; 560 561 s = splbio(); 562start: 563 if (bufspace >= maxbufspace) 564 goto trytofreespace; 565 566 /* can we constitute a new buffer? */ 567 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 568 if (bp->b_qindex != QUEUE_EMPTY) 569 panic("getnewbuf: inconsistent EMPTY queue"); 570 bremfree(bp); 571 goto fillbuf; 572 } 573trytofreespace: 574 /* 575 * we keep the file I/O from hogging metadata I/O 576 */ 577 if (bp = bufqueues[QUEUE_AGE].tqh_first) { 578 if (bp->b_qindex != QUEUE_AGE) 579 panic("getnewbuf: inconsistent AGE queue"); 580 } else if ((nvmio > (nbuf / 2)) 581 && (bp = bufqueues[QUEUE_VMIO].tqh_first)) { 582 if (bp->b_qindex != QUEUE_VMIO) 583 panic("getnewbuf: inconsistent VMIO queue"); 584 } else if ((!doingvmio || (nlru > (nbuf / 2))) && 585 (bp = bufqueues[QUEUE_LRU].tqh_first)) { 586 if (bp->b_qindex != QUEUE_LRU) 587 panic("getnewbuf: inconsistent LRU queue"); 588 } 589 if (!bp) { 590 if (doingvmio) { 591 if (bp = bufqueues[QUEUE_VMIO].tqh_first) { 592 if (bp->b_qindex != QUEUE_VMIO) 593 panic("getnewbuf: inconsistent VMIO queue"); 594 } else if (bp = bufqueues[QUEUE_LRU].tqh_first) { 595 if (bp->b_qindex != QUEUE_LRU) 596 panic("getnewbuf: inconsistent LRU queue"); 597 } 598 } else { 599 if (bp = bufqueues[QUEUE_LRU].tqh_first) { 600 if (bp->b_qindex != QUEUE_LRU) 601 panic("getnewbuf: inconsistent LRU queue"); 602 } else if (bp = bufqueues[QUEUE_VMIO].tqh_first) { 603 if (bp->b_qindex != QUEUE_VMIO) 604 panic("getnewbuf: inconsistent VMIO queue"); 605 } 606 } 607 } 608 if (!bp) { 609 /* wait for a free buffer of any kind */ 610 needsbuffer = 1; 611 tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo); 612 splx(s); 613 return (0); 614 } 615 /* if we are a delayed write, convert to an async write */ 616 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 617 vfs_bio_awrite(bp); 618 if (!slpflag && !slptimeo) { 619 splx(s); 620 return (0); 621 } 622 goto start; 623 } 624 bremfree(bp); 625 626 if (bp->b_flags & B_VMIO) { 627 bp->b_flags |= B_INVAL | B_BUSY; 628 brelse(bp); 629 bremfree(bp); 630 } 631 if (bp->b_vp) 632 brelvp(bp); 633 634 /* we are not free, nor do we contain interesting data */ 635 if (bp->b_rcred != NOCRED) 636 crfree(bp->b_rcred); 637 if (bp->b_wcred != NOCRED) 638 crfree(bp->b_wcred); 639fillbuf: 640 bp->b_flags |= B_BUSY; 641 LIST_REMOVE(bp, b_hash); 642 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 643 splx(s); 644 if (bp->b_bufsize) { 645 allocbuf(bp, 0, 0); 646 } 647 bp->b_flags = B_BUSY; 648 bp->b_dev = NODEV; 649 bp->b_vp = NULL; 650 bp->b_blkno = bp->b_lblkno = 0; 651 bp->b_iodone = 0; 652 bp->b_error = 0; 653 bp->b_resid = 0; 654 bp->b_bcount = 0; 655 bp->b_npages = 0; 656 bp->b_wcred = bp->b_rcred = NOCRED; 657 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 658 bp->b_dirtyoff = bp->b_dirtyend = 0; 659 bp->b_validoff = bp->b_validend = 0; 660 if (bufspace >= maxbufspace) { 661 s = splbio(); 662 bp->b_flags |= B_INVAL; 663 brelse(bp); 664 goto trytofreespace; 665 } 666 return (bp); 667} 668 669/* 670 * Check to see if a block is currently memory resident. 671 */ 672struct buf * 673incore(struct vnode * vp, daddr_t blkno) 674{ 675 struct buf *bp; 676 struct bufhashhdr *bh; 677 678 int s = splbio(); 679 680 bh = BUFHASH(vp, blkno); 681 bp = bh->lh_first; 682 683 /* Search hash chain */ 684 while (bp) { 685 /* hit */ 686 if (bp->b_lblkno == blkno && bp->b_vp == vp 687 && (bp->b_flags & B_INVAL) == 0) { 688 splx(s); 689 return (bp); 690 } 691 bp = bp->b_hash.le_next; 692 } 693 splx(s); 694 695 return (0); 696} 697 698/* 699 * returns true if no I/O is needed to access the 700 * associated VM object. 701 */ 702 703int 704inmem(struct vnode * vp, daddr_t blkno) 705{ 706 vm_object_t obj; 707 vm_offset_t off, toff, tinc; 708 vm_page_t m; 709 710 if (incore(vp, blkno)) 711 return 1; 712 if (vp->v_mount == 0) 713 return 0; 714 if ((vp->v_vmdata == 0) || (vp->v_flag & VVMIO) == 0) 715 return 0; 716 717 obj = (vm_object_t) vp->v_vmdata; 718 tinc = PAGE_SIZE; 719 if (tinc > vp->v_mount->mnt_stat.f_iosize) 720 tinc = vp->v_mount->mnt_stat.f_iosize; 721 off = blkno * vp->v_mount->mnt_stat.f_iosize; 722 723 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 724 int mask; 725 726 m = vm_page_lookup(obj, trunc_page(toff + off)); 727 if (!m) 728 return 0; 729 if (vm_page_is_valid(m, toff + off, tinc) == 0) 730 return 0; 731 } 732 return 1; 733} 734 735/* 736 * Get a block given a specified block and offset into a file/device. 737 */ 738struct buf * 739getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 740{ 741 struct buf *bp; 742 int s; 743 struct bufhashhdr *bh; 744 vm_offset_t off; 745 int nleft; 746 747 s = splbio(); 748loop: 749 if ((cnt.v_free_count + cnt.v_cache_count) < 750 cnt.v_free_reserved + MAXBSIZE / PAGE_SIZE) 751 wakeup((caddr_t) &vm_pages_needed); 752 if (bp = incore(vp, blkno)) { 753 if (bp->b_flags & B_BUSY) { 754 bp->b_flags |= B_WANTED; 755 if (curproc == pageproc) { 756 bp->b_flags |= B_PDWANTED; 757 wakeup((caddr_t) &cnt.v_free_count); 758 } 759 if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo)) 760 goto loop; 761 splx(s); 762 return (struct buf *) NULL; 763 } 764 bp->b_flags |= B_BUSY | B_CACHE; 765 bremfree(bp); 766 /* 767 * check for size inconsistancies 768 */ 769 if (bp->b_bcount != size) { 770#if defined(VFS_BIO_DEBUG) 771 printf("getblk: invalid buffer size: %ld\n", bp->b_bcount); 772#endif 773 bp->b_flags |= B_INVAL; 774 bwrite(bp); 775 goto loop; 776 } 777 splx(s); 778 return (bp); 779 } else { 780 vm_object_t obj; 781 int doingvmio; 782 783 if ((obj = (vm_object_t) vp->v_vmdata) && (vp->v_flag & VVMIO)) { 784 doingvmio = 1; 785 } else { 786 doingvmio = 0; 787 } 788 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 789 if (slpflag || slptimeo) 790 return NULL; 791 goto loop; 792 } 793 if (incore(vp, blkno)) { 794 bp->b_flags |= B_INVAL; 795 brelse(bp); 796 goto loop; 797 } 798 bp->b_blkno = bp->b_lblkno = blkno; 799 bgetvp(vp, bp); 800 LIST_REMOVE(bp, b_hash); 801 bh = BUFHASH(vp, blkno); 802 LIST_INSERT_HEAD(bh, bp, b_hash); 803 if (doingvmio) { 804 bp->b_flags |= (B_VMIO | B_CACHE); 805#if defined(VFS_BIO_DEBUG) 806 if (vp->v_type != VREG) 807 printf("getblk: vmioing file type %d???\n", vp->v_type); 808#endif 809 ++nvmio; 810 } else { 811 if (bp->b_flags & B_VMIO) 812 --nvmio; 813 bp->b_flags &= ~B_VMIO; 814 } 815 splx(s); 816 if (!allocbuf(bp, size, 1)) { 817 s = splbio(); 818 goto loop; 819 } 820 return (bp); 821 } 822} 823 824/* 825 * Get an empty, disassociated buffer of given size. 826 */ 827struct buf * 828geteblk(int size) 829{ 830 struct buf *bp; 831 832 while ((bp = getnewbuf(0, 0, 0)) == 0); 833 allocbuf(bp, size, 0); 834 bp->b_flags |= B_INVAL; 835 return (bp); 836} 837 838/* 839 * Modify the length of a buffer's underlying buffer storage without 840 * destroying information (unless, of course the buffer is shrinking). 841 */ 842int 843allocbuf(struct buf * bp, int size, int vmio) 844{ 845 846 int s; 847 int newbsize, mbsize; 848 int i; 849 850 if ((bp->b_flags & B_VMIO) == 0) { 851 mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 852 newbsize = round_page(size); 853 854 if (newbsize == bp->b_bufsize) { 855 bp->b_bcount = size; 856 return 1; 857 } else if (newbsize < bp->b_bufsize) { 858 vm_hold_free_pages( 859 bp, 860 (vm_offset_t) bp->b_data + newbsize, 861 (vm_offset_t) bp->b_data + bp->b_bufsize); 862 bufspace -= (bp->b_bufsize - newbsize); 863 } else if (newbsize > bp->b_bufsize) { 864 vm_hold_load_pages( 865 bp, 866 (vm_offset_t) bp->b_data + bp->b_bufsize, 867 (vm_offset_t) bp->b_data + newbsize); 868 bufspace += (newbsize - bp->b_bufsize); 869 } 870 /* 871 * adjust buffer cache's idea of memory allocated to buffer 872 * contents 873 */ 874 } else { 875 vm_page_t m; 876 int desiredpages; 877 878 newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 879 desiredpages = round_page(newbsize) / PAGE_SIZE; 880 881 if (newbsize == bp->b_bufsize) { 882 bp->b_bcount = size; 883 return 1; 884 } else if (newbsize < bp->b_bufsize) { 885 if (desiredpages < bp->b_npages) { 886 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 887 desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages)); 888 for (i = desiredpages; i < bp->b_npages; i++) { 889 m = bp->b_pages[i]; 890 s = splhigh(); 891 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 892 m->flags |= PG_WANTED; 893 tsleep(m, PVM, "biodep", 0); 894 } 895 splx(s); 896 897 if (m->bmapped == 0) { 898 printf("allocbuf: bmapped is zero for page %d\n", i); 899 panic("allocbuf: error"); 900 } 901 --m->bmapped; 902 if (m->bmapped == 0) { 903 PAGE_WAKEUP(m); 904 if (m->valid == 0) { 905 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 906 vm_page_free(m); 907 } 908 } 909 bp->b_pages[i] = NULL; 910 } 911 bp->b_npages = desiredpages; 912 bufspace -= (bp->b_bufsize - newbsize); 913 } 914 } else { 915 vm_object_t obj; 916 vm_offset_t tinc, off, toff, objoff; 917 int pageindex, curbpnpages; 918 struct vnode *vp; 919 int bsize; 920 921 vp = bp->b_vp; 922 bsize = vp->v_mount->mnt_stat.f_iosize; 923 924 if (bp->b_npages < desiredpages) { 925 obj = (vm_object_t) vp->v_vmdata; 926 tinc = PAGE_SIZE; 927 if (tinc > bsize) 928 tinc = bsize; 929 off = bp->b_lblkno * bsize; 930 curbpnpages = bp->b_npages; 931 doretry: 932 for (toff = 0; toff < newbsize; toff += tinc) { 933 int mask; 934 int bytesinpage; 935 936 pageindex = toff / PAGE_SIZE; 937 objoff = trunc_page(toff + off); 938 if (pageindex < curbpnpages) { 939 int pb; 940 941 m = bp->b_pages[pageindex]; 942 if (m->offset != objoff) 943 panic("allocbuf: page changed offset??!!!?"); 944 bytesinpage = tinc; 945 if (tinc > (newbsize - toff)) 946 bytesinpage = newbsize - toff; 947 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 948 bp->b_flags &= ~B_CACHE; 949 } 950 if ((m->flags & PG_ACTIVE) == 0) 951 vm_page_activate(m); 952 continue; 953 } 954 m = vm_page_lookup(obj, objoff); 955 if (!m) { 956 m = vm_page_alloc(obj, objoff, 0); 957 if (!m) { 958 int j; 959 960 for (j = bp->b_npages; j < pageindex; j++) { 961 vm_page_t mt = bp->b_pages[j]; 962 963 PAGE_WAKEUP(mt); 964 if (mt->valid == 0 && mt->bmapped == 0) { 965 vm_page_free(mt); 966 } 967 } 968 VM_WAIT; 969 if (vmio && (bp->b_flags & B_PDWANTED)) { 970 --nvmio; 971 bp->b_flags &= ~B_VMIO; 972 bp->b_flags |= B_INVAL; 973 brelse(bp); 974 return 0; 975 } 976 curbpnpages = bp->b_npages; 977 goto doretry; 978 } 979 m->valid = 0; 980 vm_page_activate(m); 981 } else if ((m->valid == 0) || (m->flags & PG_BUSY)) { 982 int j; 983 int bufferdestroyed = 0; 984 985 for (j = bp->b_npages; j < pageindex; j++) { 986 vm_page_t mt = bp->b_pages[j]; 987 988 PAGE_WAKEUP(mt); 989 if (mt->valid == 0 && mt->bmapped == 0) { 990 vm_page_free(mt); 991 } 992 } 993 if (vmio && (bp->b_flags & B_PDWANTED)) { 994 --nvmio; 995 bp->b_flags &= ~B_VMIO; 996 bp->b_flags |= B_INVAL; 997 brelse(bp); 998 VM_WAIT; 999 bufferdestroyed = 1; 1000 } 1001 s = splbio(); 1002 if (m->flags & PG_BUSY) { 1003 m->flags |= PG_WANTED; 1004 tsleep(m, PRIBIO, "pgtblk", 0); 1005 } else if( m->valid == 0 && m->bmapped == 0) { 1006 vm_page_free(m); 1007 } 1008 splx(s); 1009 if (bufferdestroyed) 1010 return 0; 1011 curbpnpages = bp->b_npages; 1012 goto doretry; 1013 } else { 1014 int pb; 1015 1016 if ((m->flags & PG_CACHE) && 1017 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) { 1018 int j; 1019 1020 for (j = bp->b_npages; j < pageindex; j++) { 1021 vm_page_t mt = bp->b_pages[j]; 1022 1023 PAGE_WAKEUP(mt); 1024 if (mt->valid == 0 && mt->bmapped == 0) { 1025 vm_page_free(mt); 1026 } 1027 } 1028 VM_WAIT; 1029 if (vmio && (bp->b_flags & B_PDWANTED)) { 1030 --nvmio; 1031 bp->b_flags &= ~B_VMIO; 1032 bp->b_flags |= B_INVAL; 1033 brelse(bp); 1034 return 0; 1035 } 1036 curbpnpages = bp->b_npages; 1037 goto doretry; 1038 } 1039 bytesinpage = tinc; 1040 if (tinc > (newbsize - toff)) 1041 bytesinpage = newbsize - toff; 1042 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 1043 bp->b_flags &= ~B_CACHE; 1044 } 1045 if ((m->flags & PG_ACTIVE) == 0) 1046 vm_page_activate(m); 1047 m->flags |= PG_BUSY; 1048 } 1049 bp->b_pages[pageindex] = m; 1050 curbpnpages = pageindex + 1; 1051 } 1052 if (bsize >= PAGE_SIZE) { 1053 for (i = bp->b_npages; i < curbpnpages; i++) { 1054 m = bp->b_pages[i]; 1055 if (m->valid == 0) { 1056 bp->b_flags &= ~B_CACHE; 1057 } 1058 m->bmapped++; 1059 PAGE_WAKEUP(m); 1060 } 1061#if 0 1062 if( bp->b_flags & B_CACHE) { 1063 for (i = bp->b_npages; i < curbpnpages; i++) { 1064 bp->b_pages[i]->flags |= PG_REFERENCED; 1065 } 1066 } 1067#endif 1068 } else { 1069 if (!vm_page_is_valid(bp->b_pages[0], off, bsize)) 1070 bp->b_flags &= ~B_CACHE; 1071 bp->b_pages[0]->bmapped++; 1072 PAGE_WAKEUP(bp->b_pages[0]); 1073 } 1074 bp->b_npages = curbpnpages; 1075 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1076 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1077 bp->b_data += off % PAGE_SIZE; 1078 } 1079 bufspace += (newbsize - bp->b_bufsize); 1080 } 1081 } 1082 bp->b_bufsize = newbsize; 1083 bp->b_bcount = size; 1084 return 1; 1085} 1086 1087/* 1088 * Wait for buffer I/O completion, returning error status. 1089 */ 1090int 1091biowait(register struct buf * bp) 1092{ 1093 int s; 1094 1095 s = splbio(); 1096 while ((bp->b_flags & B_DONE) == 0) 1097 tsleep((caddr_t) bp, PRIBIO, "biowait", 0); 1098 if ((bp->b_flags & B_ERROR) || bp->b_error) { 1099 if ((bp->b_flags & B_INVAL) == 0) { 1100 bp->b_flags |= B_INVAL; 1101 bp->b_dev = NODEV; 1102 LIST_REMOVE(bp, b_hash); 1103 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1104 wakeup((caddr_t) bp); 1105 } 1106 if (!bp->b_error) 1107 bp->b_error = EIO; 1108 else 1109 bp->b_flags |= B_ERROR; 1110 splx(s); 1111 return (bp->b_error); 1112 } else { 1113 splx(s); 1114 return (0); 1115 } 1116} 1117 1118/* 1119 * Finish I/O on a buffer, calling an optional function. 1120 * This is usually called from interrupt level, so process blocking 1121 * is not *a good idea*. 1122 */ 1123void 1124biodone(register struct buf * bp) 1125{ 1126 int s; 1127 1128 s = splbio(); 1129 if (bp->b_flags & B_DONE) 1130 printf("biodone: buffer already done\n"); 1131 bp->b_flags |= B_DONE; 1132 1133 if ((bp->b_flags & B_READ) == 0) { 1134 vwakeup(bp); 1135 } 1136#ifdef BOUNCE_BUFFERS 1137 if (bp->b_flags & B_BOUNCE) 1138 vm_bounce_free(bp); 1139#endif 1140 1141 /* call optional completion function if requested */ 1142 if (bp->b_flags & B_CALL) { 1143 bp->b_flags &= ~B_CALL; 1144 (*bp->b_iodone) (bp); 1145 splx(s); 1146 return; 1147 } 1148 if (bp->b_flags & B_VMIO) { 1149 int i, resid; 1150 vm_offset_t foff; 1151 vm_page_t m; 1152 vm_object_t obj; 1153 int iosize; 1154 struct vnode *vp = bp->b_vp; 1155 1156 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1157 obj = (vm_object_t) vp->v_vmdata; 1158 if (!obj) { 1159 return; 1160 } 1161#if defined(VFS_BIO_DEBUG) 1162 if (obj->paging_in_progress < bp->b_npages) { 1163 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1164 obj->paging_in_progress, bp->b_npages); 1165 } 1166#endif 1167 iosize = bp->b_bufsize; 1168 for (i = 0; i < bp->b_npages; i++) { 1169 m = bp->b_pages[i]; 1170 if (m == bogus_page) { 1171 m = vm_page_lookup(obj, foff); 1172 if (!m) { 1173#if defined(VFS_BIO_DEBUG) 1174 printf("biodone: page disappeared\n"); 1175#endif 1176 --obj->paging_in_progress; 1177 continue; 1178 } 1179 bp->b_pages[i] = m; 1180 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1181 } 1182#if defined(VFS_BIO_DEBUG) 1183 if (trunc_page(foff) != m->offset) { 1184 printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset); 1185 } 1186#endif 1187 resid = (m->offset + PAGE_SIZE) - foff; 1188 if (resid > iosize) 1189 resid = iosize; 1190 if (resid > 0) { 1191 vm_page_set_valid(m, foff, resid); 1192 vm_page_set_clean(m, foff, resid); 1193 } 1194 if (m->busy == 0) { 1195 printf("biodone: page busy < 0, off: %d, foff: %d, resid: %d, index: %d\n", 1196 m->offset, foff, resid, i); 1197 printf(" iosize: %d, lblkno: %d\n", 1198 bp->b_vp->v_mount->mnt_stat.f_iosize, bp->b_lblkno); 1199 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1200 m->valid, m->dirty, m->bmapped); 1201 panic("biodone: page busy < 0\n"); 1202 } 1203 --m->busy; 1204 PAGE_WAKEUP(m); 1205 --obj->paging_in_progress; 1206 foff += resid; 1207 iosize -= resid; 1208 } 1209 if (obj && obj->paging_in_progress == 0) 1210 wakeup((caddr_t) obj); 1211 } 1212 /* 1213 * For asynchronous completions, release the buffer now. The brelse 1214 * checks for B_WANTED and will do the wakeup there if necessary - so 1215 * no need to do a wakeup here in the async case. 1216 */ 1217 1218 if (bp->b_flags & B_ASYNC) { 1219 brelse(bp); 1220 } else { 1221 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 1222 wakeup((caddr_t) bp); 1223 } 1224 splx(s); 1225} 1226 1227int 1228count_lock_queue() 1229{ 1230 int count; 1231 struct buf *bp; 1232 1233 count = 0; 1234 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1235 bp != NULL; 1236 bp = bp->b_freelist.tqe_next) 1237 count++; 1238 return (count); 1239} 1240 1241int vfs_update_interval = 30; 1242 1243void 1244vfs_update() 1245{ 1246 (void) spl0(); 1247 while (1) { 1248 tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update", 1249 hz * vfs_update_interval); 1250 vfs_update_wakeup = 0; 1251 sync(curproc, NULL, NULL); 1252 } 1253} 1254 1255void 1256vfs_unbusy_pages(struct buf * bp) 1257{ 1258 int i; 1259 1260 if (bp->b_flags & B_VMIO) { 1261 struct vnode *vp = bp->b_vp; 1262 vm_object_t obj = (vm_object_t) vp->v_vmdata; 1263 vm_offset_t foff; 1264 1265 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1266 1267 for (i = 0; i < bp->b_npages; i++) { 1268 vm_page_t m = bp->b_pages[i]; 1269 1270 if (m == bogus_page) { 1271 m = vm_page_lookup(obj, foff); 1272 if (!m) { 1273 panic("vfs_unbusy_pages: page missing\n"); 1274 } 1275 bp->b_pages[i] = m; 1276 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1277 } 1278 --obj->paging_in_progress; 1279 --m->busy; 1280 PAGE_WAKEUP(m); 1281 } 1282 if (obj->paging_in_progress == 0) 1283 wakeup((caddr_t) obj); 1284 } 1285} 1286 1287void 1288vfs_busy_pages(struct buf * bp, int clear_modify) 1289{ 1290 int i; 1291 1292 if (bp->b_flags & B_VMIO) { 1293 vm_object_t obj = (vm_object_t) bp->b_vp->v_vmdata; 1294 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1295 int iocount = bp->b_bufsize; 1296 1297 for (i = 0; i < bp->b_npages; i++) { 1298 vm_page_t m = bp->b_pages[i]; 1299 int resid = (m->offset + PAGE_SIZE) - foff; 1300 1301 if (resid > iocount) 1302 resid = iocount; 1303 obj->paging_in_progress++; 1304 m->busy++; 1305 if (clear_modify) { 1306 vm_page_test_dirty(m); 1307 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_READ); 1308 } else if (bp->b_bcount >= PAGE_SIZE) { 1309 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1310 bp->b_pages[i] = bogus_page; 1311 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1312 } 1313 } 1314 foff += resid; 1315 iocount -= resid; 1316 } 1317 } 1318} 1319 1320void 1321vfs_dirty_pages(struct buf * bp) 1322{ 1323 int i; 1324 1325 if (bp->b_flags & B_VMIO) { 1326 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1327 int iocount = bp->b_bufsize; 1328 1329 for (i = 0; i < bp->b_npages; i++) { 1330 vm_page_t m = bp->b_pages[i]; 1331 int resid = (m->offset + PAGE_SIZE) - foff; 1332 1333 if (resid > iocount) 1334 resid = iocount; 1335 if (resid > 0) { 1336 vm_page_set_valid(m, foff, resid); 1337 vm_page_set_dirty(m, foff, resid); 1338 } 1339 PAGE_WAKEUP(m); 1340 foff += resid; 1341 iocount -= resid; 1342 } 1343 } 1344} 1345/* 1346 * these routines are not in the correct place (yet) 1347 * also they work *ONLY* for kernel_pmap!!! 1348 */ 1349void 1350vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1351{ 1352 vm_offset_t pg; 1353 vm_page_t p; 1354 vm_offset_t from = round_page(froma); 1355 vm_offset_t to = round_page(toa); 1356 1357tryagain0: 1358 if ((curproc != pageproc) && ((cnt.v_free_count + cnt.v_cache_count) <= 1359 cnt.v_free_reserved + (toa - froma) / PAGE_SIZE)) { 1360 VM_WAIT; 1361 goto tryagain0; 1362 } 1363 for (pg = from; pg < to; pg += PAGE_SIZE) { 1364 1365tryagain: 1366 1367 p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS, 0); 1368 if (!p) { 1369 VM_WAIT; 1370 goto tryagain; 1371 } 1372 vm_page_wire(p); 1373 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1374 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p; 1375 PAGE_WAKEUP(p); 1376 bp->b_npages++; 1377 } 1378} 1379 1380void 1381vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1382{ 1383 vm_offset_t pg; 1384 vm_page_t p; 1385 vm_offset_t from = round_page(froma); 1386 vm_offset_t to = round_page(toa); 1387 1388 for (pg = from; pg < to; pg += PAGE_SIZE) { 1389 p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE]; 1390 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0; 1391 pmap_kremove(pg); 1392 vm_page_free(p); 1393 --bp->b_npages; 1394 } 1395} 1396 1397void 1398bufstats() 1399{ 1400} 1401