vfs_bio.c revision 7404
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.37 1995/03/26 23:28:50 davidg Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#define VMIO 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/kernel.h> 39#include <sys/proc.h> 40#include <sys/vnode.h> 41#include <vm/vm.h> 42#include <vm/vm_kern.h> 43#include <vm/vm_pageout.h> 44#include <vm/vm_page.h> 45#include <vm/vm_object.h> 46#include <sys/buf.h> 47#include <sys/mount.h> 48#include <sys/malloc.h> 49#include <sys/resourcevar.h> 50#include <sys/proc.h> 51 52#include <miscfs/specfs/specdev.h> 53 54struct buf *buf; /* buffer header pool */ 55int nbuf; /* number of buffer headers calculated 56 * elsewhere */ 57struct swqueue bswlist; 58 59void vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 60void vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to); 61void vfs_dirty_pages(struct buf * bp); 62 63int needsbuffer; 64 65/* 66 * Internal update daemon, process 3 67 * The variable vfs_update_wakeup allows for internal syncs. 68 */ 69int vfs_update_wakeup; 70 71 72/* 73 * buffers base kva 74 */ 75caddr_t buffers_kva; 76 77/* 78 * bogus page -- for I/O to/from partially complete buffers 79 * this is a temporary solution to the problem, but it is not 80 * really that bad. it would be better to split the buffer 81 * for input in the case of buffers partially already in memory, 82 * but the code is intricate enough already. 83 */ 84vm_page_t bogus_page; 85vm_offset_t bogus_offset; 86 87int bufspace, maxbufspace; 88 89/* 90 * advisory minimum for size of LRU queue or VMIO queue 91 */ 92int minbuf; 93 94/* 95 * Initialize buffer headers and related structures. 96 */ 97void 98bufinit() 99{ 100 struct buf *bp; 101 int i; 102 103 TAILQ_INIT(&bswlist); 104 LIST_INIT(&invalhash); 105 106 /* first, make a null hash table */ 107 for (i = 0; i < BUFHSZ; i++) 108 LIST_INIT(&bufhashtbl[i]); 109 110 /* next, make a null set of free lists */ 111 for (i = 0; i < BUFFER_QUEUES; i++) 112 TAILQ_INIT(&bufqueues[i]); 113 114 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 115 /* finally, initialize each buffer header and stick on empty q */ 116 for (i = 0; i < nbuf; i++) { 117 bp = &buf[i]; 118 bzero(bp, sizeof *bp); 119 bp->b_flags = B_INVAL; /* we're just an empty header */ 120 bp->b_dev = NODEV; 121 bp->b_vp = NULL; 122 bp->b_rcred = NOCRED; 123 bp->b_wcred = NOCRED; 124 bp->b_qindex = QUEUE_EMPTY; 125 bp->b_vnbufs.le_next = NOLIST; 126 bp->b_data = buffers_kva + i * MAXBSIZE; 127 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 128 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 129 } 130/* 131 * this will change later!!! 132 */ 133 minbuf = nbuf / 3; 134 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 135 136 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 137 bogus_page = vm_page_alloc(kernel_object, 138 bogus_offset - VM_MIN_KERNEL_ADDRESS, VM_ALLOC_NORMAL); 139 140} 141 142/* 143 * remove the buffer from the appropriate free list 144 */ 145void 146bremfree(struct buf * bp) 147{ 148 int s = splbio(); 149 150 if (bp->b_qindex != QUEUE_NONE) { 151 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 152 bp->b_qindex = QUEUE_NONE; 153 } else { 154 panic("bremfree: removing a buffer when not on a queue"); 155 } 156 splx(s); 157} 158 159/* 160 * Get a buffer with the specified data. Look in the cache first. 161 */ 162int 163bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 164 struct buf ** bpp) 165{ 166 struct buf *bp; 167 168 bp = getblk(vp, blkno, size, 0, 0); 169 *bpp = bp; 170 171 /* if not found in cache, do some I/O */ 172 if ((bp->b_flags & B_CACHE) == 0) { 173 if (curproc && curproc->p_stats) /* count block I/O */ 174 curproc->p_stats->p_ru.ru_inblock++; 175 bp->b_flags |= B_READ; 176 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 177 if (bp->b_rcred == NOCRED) { 178 if (cred != NOCRED) 179 crhold(cred); 180 bp->b_rcred = cred; 181 } 182 vfs_busy_pages(bp, 0); 183 VOP_STRATEGY(bp); 184 return (biowait(bp)); 185 } 186 return (0); 187} 188 189/* 190 * Operates like bread, but also starts asynchronous I/O on 191 * read-ahead blocks. 192 */ 193int 194breadn(struct vnode * vp, daddr_t blkno, int size, 195 daddr_t * rablkno, int *rabsize, 196 int cnt, struct ucred * cred, struct buf ** bpp) 197{ 198 struct buf *bp, *rabp; 199 int i; 200 int rv = 0, readwait = 0; 201 202 *bpp = bp = getblk(vp, blkno, size, 0, 0); 203 204 /* if not found in cache, do some I/O */ 205 if ((bp->b_flags & B_CACHE) == 0) { 206 if (curproc && curproc->p_stats) /* count block I/O */ 207 curproc->p_stats->p_ru.ru_inblock++; 208 bp->b_flags |= B_READ; 209 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 210 if (bp->b_rcred == NOCRED) { 211 if (cred != NOCRED) 212 crhold(cred); 213 bp->b_rcred = cred; 214 } 215 vfs_busy_pages(bp, 0); 216 VOP_STRATEGY(bp); 217 ++readwait; 218 } 219 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 220 if (inmem(vp, *rablkno)) 221 continue; 222 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 223 224 if ((rabp->b_flags & B_CACHE) == 0) { 225 if (curproc && curproc->p_stats) 226 curproc->p_stats->p_ru.ru_inblock++; 227 rabp->b_flags |= B_READ | B_ASYNC; 228 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 229 if (rabp->b_rcred == NOCRED) { 230 if (cred != NOCRED) 231 crhold(cred); 232 rabp->b_rcred = cred; 233 } 234 vfs_busy_pages(rabp, 0); 235 VOP_STRATEGY(rabp); 236 } else { 237 brelse(rabp); 238 } 239 } 240 241 if (readwait) { 242 rv = biowait(bp); 243 } 244 return (rv); 245} 246 247/* 248 * Write, release buffer on completion. (Done by iodone 249 * if async.) 250 */ 251int 252bwrite(struct buf * bp) 253{ 254 int oldflags = bp->b_flags; 255 256 if (bp->b_flags & B_INVAL) { 257 brelse(bp); 258 return (0); 259 } 260 if (!(bp->b_flags & B_BUSY)) 261 panic("bwrite: buffer is not busy???"); 262 263 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 264 bp->b_flags |= B_WRITEINPROG; 265 266 if (oldflags & B_ASYNC) { 267 if (oldflags & B_DELWRI) { 268 reassignbuf(bp, bp->b_vp); 269 } else if (curproc) { 270 ++curproc->p_stats->p_ru.ru_oublock; 271 } 272 } 273 bp->b_vp->v_numoutput++; 274 vfs_busy_pages(bp, 1); 275 VOP_STRATEGY(bp); 276 277 if ((oldflags & B_ASYNC) == 0) { 278 int rtval = biowait(bp); 279 280 if (oldflags & B_DELWRI) { 281 reassignbuf(bp, bp->b_vp); 282 } else if (curproc) { 283 ++curproc->p_stats->p_ru.ru_oublock; 284 } 285 brelse(bp); 286 return (rtval); 287 } 288 return (0); 289} 290 291int 292vn_bwrite(ap) 293 struct vop_bwrite_args *ap; 294{ 295 return (bwrite(ap->a_bp)); 296} 297 298/* 299 * Delayed write. (Buffer is marked dirty). 300 */ 301void 302bdwrite(struct buf * bp) 303{ 304 305 if ((bp->b_flags & B_BUSY) == 0) { 306 panic("bdwrite: buffer is not busy"); 307 } 308 if (bp->b_flags & B_INVAL) { 309 brelse(bp); 310 return; 311 } 312 if (bp->b_flags & B_TAPE) { 313 bawrite(bp); 314 return; 315 } 316 bp->b_flags &= ~B_READ; 317 vfs_dirty_pages(bp); 318 if ((bp->b_flags & B_DELWRI) == 0) { 319 if (curproc) 320 ++curproc->p_stats->p_ru.ru_oublock; 321 bp->b_flags |= B_DONE | B_DELWRI; 322 reassignbuf(bp, bp->b_vp); 323 } 324 if( bp->b_lblkno == bp->b_blkno) { 325 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL); 326 } 327 brelse(bp); 328 return; 329} 330 331/* 332 * Asynchronous write. 333 * Start output on a buffer, but do not wait for it to complete. 334 * The buffer is released when the output completes. 335 */ 336void 337bawrite(struct buf * bp) 338{ 339 struct vnode *vp; 340 vp = bp->b_vp; 341 bp->b_flags |= B_ASYNC; 342 (void) bwrite(bp); 343} 344 345/* 346 * Release a buffer. 347 */ 348void 349brelse(struct buf * bp) 350{ 351 int s; 352 353 if (bp->b_flags & B_CLUSTER) { 354 relpbuf(bp); 355 return; 356 } 357 /* anyone need a "free" block? */ 358 s = splbio(); 359 360 if (needsbuffer) { 361 needsbuffer = 0; 362 wakeup((caddr_t) &needsbuffer); 363 } 364 365 /* anyone need this block? */ 366 if (bp->b_flags & B_WANTED) { 367 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_AGE); 368 wakeup((caddr_t) bp); 369 } else if (bp->b_flags & B_VMIO) { 370 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 371 wakeup((caddr_t) bp); 372 } 373 if (bp->b_flags & B_LOCKED) 374 bp->b_flags &= ~B_ERROR; 375 376 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 377 (bp->b_bufsize <= 0)) { 378 bp->b_flags |= B_INVAL; 379 bp->b_flags &= ~(B_DELWRI | B_CACHE); 380 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) 381 brelvp(bp); 382 } 383 384 /* 385 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 386 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 387 * but the VM object is kept around. The B_NOCACHE flag is used to 388 * invalidate the pages in the VM object. 389 */ 390 if (bp->b_flags & B_VMIO) { 391 vm_offset_t foff; 392 vm_object_t obj; 393 int i, resid; 394 vm_page_t m; 395 int iototal = bp->b_bufsize; 396 397 foff = 0; 398 obj = 0; 399 if (bp->b_npages) { 400 if (bp->b_vp && bp->b_vp->v_mount) { 401 foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 402 } else { 403 /* 404 * vnode pointer has been ripped away -- 405 * probably file gone... 406 */ 407 foff = bp->b_pages[0]->offset; 408 } 409 } 410 for (i = 0; i < bp->b_npages; i++) { 411 m = bp->b_pages[i]; 412 if (m == bogus_page) { 413 panic("brelse: bogus page found"); 414 } 415 resid = (m->offset + PAGE_SIZE) - foff; 416 if (resid > iototal) 417 resid = iototal; 418 if (resid > 0) { 419 if (bp->b_flags & (B_ERROR | B_NOCACHE)) { 420 vm_page_set_invalid(m, foff, resid); 421 } else if ((bp->b_flags & B_DELWRI) == 0) { 422 vm_page_set_clean(m, foff, resid); 423 vm_page_set_valid(m, foff, resid); 424 } 425 } else { 426 vm_page_test_dirty(m); 427 } 428 foff += resid; 429 iototal -= resid; 430 } 431 432 if (bp->b_flags & B_INVAL) { 433 for(i=0;i<bp->b_npages;i++) { 434 m = bp->b_pages[i]; 435 --m->bmapped; 436 if (m->bmapped == 0) { 437 PAGE_WAKEUP(m); 438 if (m->valid == 0) { 439 vm_page_protect(m, VM_PROT_NONE); 440 vm_page_free(m); 441 } 442 else if ((m->dirty & m->valid) == 0 && 443 (m->flags & PG_REFERENCED) == 0 && 444 !pmap_is_referenced(VM_PAGE_TO_PHYS(m))) 445 vm_page_cache(m); 446 else if ((m->flags & PG_ACTIVE) == 0) { 447 vm_page_activate(m); 448 m->act_count = 0; 449 } 450 } 451 } 452 bufspace -= bp->b_bufsize; 453 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 454 bp->b_npages = 0; 455 bp->b_bufsize = 0; 456 bp->b_flags &= ~B_VMIO; 457 if (bp->b_vp) 458 brelvp(bp); 459 } 460 } 461 if (bp->b_qindex != QUEUE_NONE) 462 panic("brelse: free buffer onto another queue???"); 463 464 /* enqueue */ 465 /* buffers with no memory */ 466 if (bp->b_bufsize == 0) { 467 bp->b_qindex = QUEUE_EMPTY; 468 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 469 LIST_REMOVE(bp, b_hash); 470 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 471 bp->b_dev = NODEV; 472 /* buffers with junk contents */ 473 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE)) { 474 bp->b_qindex = QUEUE_AGE; 475 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 476 LIST_REMOVE(bp, b_hash); 477 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 478 bp->b_dev = NODEV; 479 /* buffers that are locked */ 480 } else if (bp->b_flags & B_LOCKED) { 481 bp->b_qindex = QUEUE_LOCKED; 482 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 483 /* buffers with stale but valid contents */ 484 } else if (bp->b_flags & B_AGE) { 485 bp->b_qindex = QUEUE_AGE; 486 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 487 /* buffers with valid and quite potentially reuseable contents */ 488 } else { 489 bp->b_qindex = QUEUE_LRU; 490 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 491 } 492 493 /* unlock */ 494 bp->b_flags &= ~(B_PDWANTED | B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE); 495 splx(s); 496} 497 498/* 499 * this routine implements clustered async writes for 500 * clearing out B_DELWRI buffers... This is much better 501 * than the old way of writing only one buffer at a time. 502 */ 503void 504vfs_bio_awrite(struct buf * bp) 505{ 506 int i; 507 daddr_t lblkno = bp->b_lblkno; 508 struct vnode *vp = bp->b_vp; 509 int s; 510 int ncl; 511 struct buf *bpa; 512 513 s = splbio(); 514 if( vp->v_mount && (vp->v_flag & VVMIO) && 515 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 516 int size = vp->v_mount->mnt_stat.f_iosize; 517 518 for (i = 1; i < MAXPHYS / size; i++) { 519 if ((bpa = incore(vp, lblkno + i)) && 520 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_BUSY | B_CLUSTEROK | B_INVAL)) == B_DELWRI | B_CLUSTEROK) && 521 (bpa->b_bufsize == size)) { 522 if ((bpa->b_blkno == bpa->b_lblkno) || 523 (bpa->b_blkno != bp->b_blkno + (i * size) / DEV_BSIZE)) 524 break; 525 } else { 526 break; 527 } 528 } 529 ncl = i; 530 /* 531 * this is a possible cluster write 532 */ 533 if (ncl != 1) { 534 bremfree(bp); 535 cluster_wbuild(vp, bp, size, lblkno, ncl, -1); 536 splx(s); 537 return; 538 } 539 } 540 /* 541 * default (old) behavior, writing out only one block 542 */ 543 bremfree(bp); 544 bp->b_flags |= B_BUSY | B_ASYNC; 545 bwrite(bp); 546 splx(s); 547} 548 549 550/* 551 * Find a buffer header which is available for use. 552 */ 553struct buf * 554getnewbuf(int slpflag, int slptimeo, int doingvmio) 555{ 556 struct buf *bp; 557 int s; 558 int firstbp = 1; 559 560 s = splbio(); 561start: 562 if (bufspace >= maxbufspace) 563 goto trytofreespace; 564 565 /* can we constitute a new buffer? */ 566 if ((bp = bufqueues[QUEUE_EMPTY].tqh_first)) { 567 if (bp->b_qindex != QUEUE_EMPTY) 568 panic("getnewbuf: inconsistent EMPTY queue"); 569 bremfree(bp); 570 goto fillbuf; 571 } 572trytofreespace: 573 /* 574 * We keep the file I/O from hogging metadata I/O 575 * This is desirable because file data is cached in the 576 * VM/Buffer cache even if a buffer is freed. 577 */ 578 if ((bp = bufqueues[QUEUE_AGE].tqh_first)) { 579 if (bp->b_qindex != QUEUE_AGE) 580 panic("getnewbuf: inconsistent AGE queue"); 581 } else if ((bp = bufqueues[QUEUE_LRU].tqh_first)) { 582 if (bp->b_qindex != QUEUE_LRU) 583 panic("getnewbuf: inconsistent LRU queue"); 584 } 585 if (!bp) { 586 /* wait for a free buffer of any kind */ 587 needsbuffer = 1; 588 tsleep((caddr_t) &needsbuffer, PRIBIO | slpflag, "newbuf", slptimeo); 589 splx(s); 590 return (0); 591 } 592 593 /* if we are a delayed write, convert to an async write */ 594 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 595 vfs_bio_awrite(bp); 596 if (!slpflag && !slptimeo) { 597 splx(s); 598 return (0); 599 } 600 goto start; 601 } 602 603 if (bp->b_flags & B_WANTED) { 604 bp->b_flags &= ~(B_WANTED|B_PDWANTED); 605 wakeup((caddr_t) bp); 606 } 607 bremfree(bp); 608 609 if (bp->b_flags & B_VMIO) { 610 bp->b_flags |= B_INVAL | B_BUSY; 611 brelse(bp); 612 bremfree(bp); 613 } 614 615 if (bp->b_vp) 616 brelvp(bp); 617 618 /* we are not free, nor do we contain interesting data */ 619 if (bp->b_rcred != NOCRED) 620 crfree(bp->b_rcred); 621 if (bp->b_wcred != NOCRED) 622 crfree(bp->b_wcred); 623fillbuf: 624 bp->b_flags |= B_BUSY; 625 LIST_REMOVE(bp, b_hash); 626 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 627 splx(s); 628 if (bp->b_bufsize) { 629 allocbuf(bp, 0); 630 } 631 bp->b_flags = B_BUSY; 632 bp->b_dev = NODEV; 633 bp->b_vp = NULL; 634 bp->b_blkno = bp->b_lblkno = 0; 635 bp->b_iodone = 0; 636 bp->b_error = 0; 637 bp->b_resid = 0; 638 bp->b_bcount = 0; 639 bp->b_npages = 0; 640 bp->b_wcred = bp->b_rcred = NOCRED; 641 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 642 bp->b_dirtyoff = bp->b_dirtyend = 0; 643 bp->b_validoff = bp->b_validend = 0; 644 if (bufspace >= maxbufspace) { 645 s = splbio(); 646 bp->b_flags |= B_INVAL; 647 brelse(bp); 648 goto trytofreespace; 649 } 650 return (bp); 651} 652 653/* 654 * Check to see if a block is currently memory resident. 655 */ 656struct buf * 657incore(struct vnode * vp, daddr_t blkno) 658{ 659 struct buf *bp; 660 struct bufhashhdr *bh; 661 662 int s = splbio(); 663 664 bh = BUFHASH(vp, blkno); 665 bp = bh->lh_first; 666 667 /* Search hash chain */ 668 while (bp) { 669 /* hit */ 670 if (bp->b_lblkno == blkno && bp->b_vp == vp 671 && (bp->b_flags & B_INVAL) == 0) { 672 splx(s); 673 return (bp); 674 } 675 bp = bp->b_hash.le_next; 676 } 677 splx(s); 678 679 return (0); 680} 681 682/* 683 * Returns true if no I/O is needed to access the 684 * associated VM object. This is like incore except 685 * it also hunts around in the VM system for the data. 686 */ 687 688int 689inmem(struct vnode * vp, daddr_t blkno) 690{ 691 vm_object_t obj; 692 vm_offset_t off, toff, tinc; 693 vm_page_t m; 694 695 if (incore(vp, blkno)) 696 return 1; 697 if (vp->v_mount == 0) 698 return 0; 699 if ((vp->v_vmdata == 0) || (vp->v_flag & VVMIO) == 0) 700 return 0; 701 702 obj = (vm_object_t) vp->v_vmdata; 703 tinc = PAGE_SIZE; 704 if (tinc > vp->v_mount->mnt_stat.f_iosize) 705 tinc = vp->v_mount->mnt_stat.f_iosize; 706 off = blkno * vp->v_mount->mnt_stat.f_iosize; 707 708 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 709 int mask; 710 711 m = vm_page_lookup(obj, trunc_page(toff + off)); 712 if (!m) 713 return 0; 714 if (vm_page_is_valid(m, toff + off, tinc) == 0) 715 return 0; 716 } 717 return 1; 718} 719 720/* 721 * Get a block given a specified block and offset into a file/device. 722 */ 723struct buf * 724getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 725{ 726 struct buf *bp; 727 int s; 728 struct bufhashhdr *bh; 729 vm_offset_t off; 730 int nleft; 731 732 s = splbio(); 733loop: 734 if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_cache_min) 735 pagedaemon_wakeup(); 736 737 if (bp = incore(vp, blkno)) { 738 if (bp->b_flags & B_BUSY) { 739 bp->b_flags |= B_WANTED; 740 if (curproc == pageproc) { 741 bp->b_flags |= B_PDWANTED; 742 wakeup((caddr_t) &cnt.v_free_count); 743 } 744 if (!tsleep((caddr_t) bp, PRIBIO | slpflag, "getblk", slptimeo)) 745 goto loop; 746 747 splx(s); 748 return (struct buf *) NULL; 749 } 750 bp->b_flags |= B_BUSY | B_CACHE; 751 bremfree(bp); 752 /* 753 * check for size inconsistancies 754 */ 755 if (bp->b_bcount != size) { 756#if defined(VFS_BIO_DEBUG) 757 printf("getblk: invalid buffer size: %ld\n", bp->b_bcount); 758#endif 759 bp->b_flags |= B_INVAL; 760 bwrite(bp); 761 goto loop; 762 } 763 splx(s); 764 return (bp); 765 } else { 766 vm_object_t obj; 767 int doingvmio; 768 769 if ((obj = (vm_object_t) vp->v_vmdata) && (vp->v_flag & VVMIO)) { 770 doingvmio = 1; 771 } else { 772 doingvmio = 0; 773 } 774 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 775 if (slpflag || slptimeo) 776 return NULL; 777 goto loop; 778 } 779 /* 780 * It is possible that another buffer has been constituted 781 * during the time that getnewbuf is blocked. This checks 782 * for this possibility, and handles it. 783 */ 784 if (incore(vp, blkno)) { 785 bp->b_flags |= B_INVAL; 786 brelse(bp); 787 goto loop; 788 } 789 /* 790 * Insert the buffer into the hash, so that it can 791 * be found by incore. 792 */ 793 bp->b_blkno = bp->b_lblkno = blkno; 794 bgetvp(vp, bp); 795 LIST_REMOVE(bp, b_hash); 796 bh = BUFHASH(vp, blkno); 797 LIST_INSERT_HEAD(bh, bp, b_hash); 798 799 if (doingvmio) { 800 bp->b_flags |= (B_VMIO | B_CACHE); 801#if defined(VFS_BIO_DEBUG) 802 if (vp->v_type != VREG) 803 printf("getblk: vmioing file type %d???\n", vp->v_type); 804#endif 805 } else { 806 bp->b_flags &= ~B_VMIO; 807 } 808 splx(s); 809 810 if (!allocbuf(bp, size)) { 811 s = splbio(); 812 goto loop; 813 } 814 return (bp); 815 } 816} 817 818/* 819 * Get an empty, disassociated buffer of given size. 820 */ 821struct buf * 822geteblk(int size) 823{ 824 struct buf *bp; 825 826 while ((bp = getnewbuf(0, 0, 0)) == 0); 827 allocbuf(bp, size); 828 bp->b_flags |= B_INVAL; 829 return (bp); 830} 831 832/* 833 * This code constitutes the buffer memory from either anonymous system 834 * memory (in the case of non-VMIO operations) or from an associated 835 * VM object (in the case of VMIO operations). 836 * 837 * Note that this code is tricky, and has many complications to resolve 838 * deadlock or inconsistant data situations. Tread lightly!!! 839 * 840 * Modify the length of a buffer's underlying buffer storage without 841 * destroying information (unless, of course the buffer is shrinking). 842 */ 843int 844allocbuf(struct buf * bp, int size) 845{ 846 847 int s; 848 int newbsize, mbsize; 849 int i; 850 int vmio = (bp->b_flags & B_VMIO) != 0; 851 852 if (!vmio) { 853 /* 854 * Just get anonymous memory from the kernel 855 */ 856 mbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 857 newbsize = round_page(size); 858 859 if (newbsize == bp->b_bufsize) { 860 bp->b_bcount = size; 861 return 1; 862 } else if (newbsize < bp->b_bufsize) { 863 vm_hold_free_pages( 864 bp, 865 (vm_offset_t) bp->b_data + newbsize, 866 (vm_offset_t) bp->b_data + bp->b_bufsize); 867 bufspace -= (bp->b_bufsize - newbsize); 868 } else if (newbsize > bp->b_bufsize) { 869 vm_hold_load_pages( 870 bp, 871 (vm_offset_t) bp->b_data + bp->b_bufsize, 872 (vm_offset_t) bp->b_data + newbsize); 873 bufspace += (newbsize - bp->b_bufsize); 874 } 875 } else { 876 vm_page_t m; 877 int desiredpages; 878 879 newbsize = ((size + DEV_BSIZE - 1) / DEV_BSIZE) * DEV_BSIZE; 880 desiredpages = round_page(newbsize) / PAGE_SIZE; 881 882 if (newbsize == bp->b_bufsize) { 883 bp->b_bcount = size; 884 return 1; 885 } else if (newbsize < bp->b_bufsize) { 886 if (desiredpages < bp->b_npages) { 887 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 888 desiredpages * PAGE_SIZE, (bp->b_npages - desiredpages)); 889 for (i = desiredpages; i < bp->b_npages; i++) { 890 m = bp->b_pages[i]; 891 s = splhigh(); 892 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 893 m->flags |= PG_WANTED; 894 tsleep(m, PVM, "biodep", 0); 895 } 896 splx(s); 897 898 if (m->bmapped == 0) { 899 printf("allocbuf: bmapped is zero for page %d\n", i); 900 panic("allocbuf: error"); 901 } 902 --m->bmapped; 903 if (m->bmapped == 0) { 904 PAGE_WAKEUP(m); 905 if (m->valid == 0) { 906 vm_page_protect(m, VM_PROT_NONE); 907 vm_page_free(m); 908 } 909 } 910 bp->b_pages[i] = NULL; 911 } 912 bp->b_npages = desiredpages; 913 bufspace -= (bp->b_bufsize - newbsize); 914 } 915 } else { 916 vm_object_t obj; 917 vm_offset_t tinc, off, toff, objoff; 918 int pageindex, curbpnpages; 919 struct vnode *vp; 920 int bsize; 921 922 vp = bp->b_vp; 923 bsize = vp->v_mount->mnt_stat.f_iosize; 924 925 if (bp->b_npages < desiredpages) { 926 obj = (vm_object_t) vp->v_vmdata; 927 tinc = PAGE_SIZE; 928 if (tinc > bsize) 929 tinc = bsize; 930 off = bp->b_lblkno * bsize; 931 curbpnpages = bp->b_npages; 932 doretry: 933 for (toff = 0; toff < newbsize; toff += tinc) { 934 int mask; 935 int bytesinpage; 936 937 pageindex = toff / PAGE_SIZE; 938 objoff = trunc_page(toff + off); 939 if (pageindex < curbpnpages) { 940 int pb; 941 942 m = bp->b_pages[pageindex]; 943 if (m->offset != objoff) 944 panic("allocbuf: page changed offset??!!!?"); 945 bytesinpage = tinc; 946 if (tinc > (newbsize - toff)) 947 bytesinpage = newbsize - toff; 948 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 949 bp->b_flags &= ~B_CACHE; 950 } 951 if ((m->flags & PG_ACTIVE) == 0) { 952 vm_page_activate(m); 953 m->act_count = 0; 954 } 955 continue; 956 } 957 m = vm_page_lookup(obj, objoff); 958 if (!m) { 959 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 960 if (!m) { 961 int j; 962 963 for (j = bp->b_npages; j < pageindex; j++) { 964 vm_page_t mt = bp->b_pages[j]; 965 966 PAGE_WAKEUP(mt); 967 if (mt->valid == 0 && mt->bmapped == 0) { 968 vm_page_free(mt); 969 } 970 } 971 VM_WAIT; 972 if (bp->b_flags & B_PDWANTED) { 973 bp->b_flags |= B_INVAL; 974 brelse(bp); 975 return 0; 976 } 977 curbpnpages = bp->b_npages; 978 goto doretry; 979 } 980 m->valid = 0; 981 vm_page_activate(m); 982 m->act_count = 0; 983 } else if ((m->valid == 0) || (m->flags & PG_BUSY)) { 984 int j; 985 int bufferdestroyed = 0; 986 987 for (j = bp->b_npages; j < pageindex; j++) { 988 vm_page_t mt = bp->b_pages[j]; 989 990 PAGE_WAKEUP(mt); 991 if (mt->valid == 0 && mt->bmapped == 0) { 992 vm_page_free(mt); 993 } 994 } 995 if (bp->b_flags & B_PDWANTED) { 996 bp->b_flags |= B_INVAL; 997 brelse(bp); 998 VM_WAIT; 999 bufferdestroyed = 1; 1000 } 1001 s = splbio(); 1002 if (m->flags & PG_BUSY) { 1003 m->flags |= PG_WANTED; 1004 tsleep(m, PRIBIO, "pgtblk", 0); 1005 } else if( m->valid == 0 && m->bmapped == 0) { 1006 vm_page_free(m); 1007 } 1008 splx(s); 1009 if (bufferdestroyed) 1010 return 0; 1011 curbpnpages = bp->b_npages; 1012 goto doretry; 1013 } else { 1014 int pb; 1015 1016 if ((m->flags & PG_CACHE) && 1017 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 1018 int j; 1019 1020 for (j = bp->b_npages; j < pageindex; j++) { 1021 vm_page_t mt = bp->b_pages[j]; 1022 1023 PAGE_WAKEUP(mt); 1024 if (mt->valid == 0 && mt->bmapped == 0) { 1025 vm_page_free(mt); 1026 } 1027 } 1028 VM_WAIT; 1029 if (bp->b_flags & B_PDWANTED) { 1030 bp->b_flags |= B_INVAL; 1031 brelse(bp); 1032 return 0; 1033 } 1034 curbpnpages = bp->b_npages; 1035 goto doretry; 1036 } 1037 bytesinpage = tinc; 1038 if (tinc > (newbsize - toff)) 1039 bytesinpage = newbsize - toff; 1040 if (!vm_page_is_valid(m, toff + off, bytesinpage)) { 1041 bp->b_flags &= ~B_CACHE; 1042 } 1043 if ((m->flags & PG_ACTIVE) == 0) { 1044 vm_page_activate(m); 1045 m->act_count = 0; 1046 } 1047 m->flags |= PG_BUSY; 1048 } 1049 bp->b_pages[pageindex] = m; 1050 curbpnpages = pageindex + 1; 1051 } 1052 if (bsize >= PAGE_SIZE) { 1053 for (i = bp->b_npages; i < curbpnpages; i++) { 1054 m = bp->b_pages[i]; 1055 if (m->valid == 0) { 1056 bp->b_flags &= ~B_CACHE; 1057 } 1058 m->bmapped++; 1059 PAGE_WAKEUP(m); 1060 } 1061 } else { 1062 if (!vm_page_is_valid(bp->b_pages[0], off, bsize)) 1063 bp->b_flags &= ~B_CACHE; 1064 bp->b_pages[0]->bmapped++; 1065 PAGE_WAKEUP(bp->b_pages[0]); 1066 } 1067 bp->b_npages = curbpnpages; 1068 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 1069 pmap_qenter((vm_offset_t) bp->b_data, bp->b_pages, bp->b_npages); 1070 bp->b_data += off % PAGE_SIZE; 1071 } 1072 bufspace += (newbsize - bp->b_bufsize); 1073 } 1074 } 1075 bp->b_bufsize = newbsize; 1076 bp->b_bcount = size; 1077 return 1; 1078} 1079 1080/* 1081 * Wait for buffer I/O completion, returning error status. 1082 */ 1083int 1084biowait(register struct buf * bp) 1085{ 1086 int s; 1087 1088 s = splbio(); 1089 while ((bp->b_flags & B_DONE) == 0) 1090 tsleep((caddr_t) bp, PRIBIO, "biowait", 0); 1091 if ((bp->b_flags & B_ERROR) || bp->b_error) { 1092 if ((bp->b_flags & B_INVAL) == 0) { 1093 bp->b_flags |= B_INVAL; 1094 bp->b_dev = NODEV; 1095 LIST_REMOVE(bp, b_hash); 1096 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1097 wakeup((caddr_t) bp); 1098 } 1099 if (!bp->b_error) 1100 bp->b_error = EIO; 1101 else 1102 bp->b_flags |= B_ERROR; 1103 splx(s); 1104 return (bp->b_error); 1105 } else { 1106 splx(s); 1107 return (0); 1108 } 1109} 1110 1111/* 1112 * Finish I/O on a buffer, calling an optional function. 1113 * This is usually called from interrupt level, so process blocking 1114 * is not *a good idea*. 1115 */ 1116void 1117biodone(register struct buf * bp) 1118{ 1119 int s; 1120 1121 s = splbio(); 1122 if (bp->b_flags & B_DONE) { 1123 splx(s); 1124 printf("biodone: buffer already done\n"); 1125 return; 1126 } 1127 bp->b_flags |= B_DONE; 1128 1129 if ((bp->b_flags & B_READ) == 0) { 1130 struct vnode *vp = bp->b_vp; 1131 vwakeup(bp); 1132 } 1133#ifdef BOUNCE_BUFFERS 1134 if (bp->b_flags & B_BOUNCE) 1135 vm_bounce_free(bp); 1136#endif 1137 1138 /* call optional completion function if requested */ 1139 if (bp->b_flags & B_CALL) { 1140 bp->b_flags &= ~B_CALL; 1141 (*bp->b_iodone) (bp); 1142 splx(s); 1143 return; 1144 } 1145 if (bp->b_flags & B_VMIO) { 1146 int i, resid; 1147 vm_offset_t foff; 1148 vm_page_t m; 1149 vm_object_t obj; 1150 int iosize; 1151 struct vnode *vp = bp->b_vp; 1152 1153 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1154 obj = (vm_object_t) vp->v_vmdata; 1155 if (!obj) { 1156 return; 1157 } 1158#if defined(VFS_BIO_DEBUG) 1159 if (obj->paging_in_progress < bp->b_npages) { 1160 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1161 obj->paging_in_progress, bp->b_npages); 1162 } 1163#endif 1164 iosize = bp->b_bufsize; 1165 for (i = 0; i < bp->b_npages; i++) { 1166 m = bp->b_pages[i]; 1167 if (m == bogus_page) { 1168 m = vm_page_lookup(obj, foff); 1169 if (!m) { 1170#if defined(VFS_BIO_DEBUG) 1171 printf("biodone: page disappeared\n"); 1172#endif 1173 --obj->paging_in_progress; 1174 continue; 1175 } 1176 bp->b_pages[i] = m; 1177 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1178 } 1179#if defined(VFS_BIO_DEBUG) 1180 if (trunc_page(foff) != m->offset) { 1181 printf("biodone: foff(%d)/m->offset(%d) mismatch\n", foff, m->offset); 1182 } 1183#endif 1184 resid = (m->offset + PAGE_SIZE) - foff; 1185 if (resid > iosize) 1186 resid = iosize; 1187 if (resid > 0) { 1188 vm_page_set_valid(m, foff, resid); 1189 vm_page_set_clean(m, foff, resid); 1190 } 1191 1192 /* 1193 * when debugging new filesystems or buffer I/O methods, this 1194 * is the most common error that pops up. if you see this, you 1195 * have not set the page busy flag correctly!!! 1196 */ 1197 if (m->busy == 0) { 1198 printf("biodone: page busy < 0, off: %d, foff: %d, resid: %d, index: %d\n", 1199 m->offset, foff, resid, i); 1200 printf(" iosize: %d, lblkno: %d\n", 1201 bp->b_vp->v_mount->mnt_stat.f_iosize, bp->b_lblkno); 1202 printf(" valid: 0x%x, dirty: 0x%x, mapped: %d\n", 1203 m->valid, m->dirty, m->bmapped); 1204 panic("biodone: page busy < 0\n"); 1205 } 1206 --m->busy; 1207 PAGE_WAKEUP(m); 1208 --obj->paging_in_progress; 1209 foff += resid; 1210 iosize -= resid; 1211 } 1212 if (obj && obj->paging_in_progress == 0 && 1213 (obj->flags & OBJ_PIPWNT)) { 1214 obj->flags &= ~OBJ_PIPWNT; 1215 wakeup((caddr_t) obj); 1216 } 1217 } 1218 /* 1219 * For asynchronous completions, release the buffer now. The brelse 1220 * checks for B_WANTED and will do the wakeup there if necessary - so 1221 * no need to do a wakeup here in the async case. 1222 */ 1223 1224 if (bp->b_flags & B_ASYNC) { 1225 brelse(bp); 1226 } else { 1227 bp->b_flags &= ~(B_WANTED | B_PDWANTED); 1228 wakeup((caddr_t) bp); 1229 } 1230 splx(s); 1231} 1232 1233int 1234count_lock_queue() 1235{ 1236 int count; 1237 struct buf *bp; 1238 1239 count = 0; 1240 for (bp = bufqueues[QUEUE_LOCKED].tqh_first; 1241 bp != NULL; 1242 bp = bp->b_freelist.tqe_next) 1243 count++; 1244 return (count); 1245} 1246 1247int vfs_update_interval = 30; 1248 1249void 1250vfs_update() 1251{ 1252 (void) spl0(); 1253 while (1) { 1254 tsleep((caddr_t) &vfs_update_wakeup, PRIBIO, "update", 1255 hz * vfs_update_interval); 1256 vfs_update_wakeup = 0; 1257 sync(curproc, NULL, NULL); 1258 } 1259} 1260 1261/* 1262 * This routine is called in lieu of iodone in the case of 1263 * incomplete I/O. This keeps the busy status for pages 1264 * consistant. 1265 */ 1266void 1267vfs_unbusy_pages(struct buf * bp) 1268{ 1269 int i; 1270 1271 if (bp->b_flags & B_VMIO) { 1272 struct vnode *vp = bp->b_vp; 1273 vm_object_t obj = (vm_object_t) vp->v_vmdata; 1274 vm_offset_t foff; 1275 1276 foff = vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1277 1278 for (i = 0; i < bp->b_npages; i++) { 1279 vm_page_t m = bp->b_pages[i]; 1280 1281 if (m == bogus_page) { 1282 m = vm_page_lookup(obj, foff); 1283 if (!m) { 1284 panic("vfs_unbusy_pages: page missing\n"); 1285 } 1286 bp->b_pages[i] = m; 1287 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1288 } 1289 --obj->paging_in_progress; 1290 --m->busy; 1291 PAGE_WAKEUP(m); 1292 } 1293 if (obj->paging_in_progress == 0 && 1294 (obj->flags & OBJ_PIPWNT)) { 1295 obj->flags &= ~OBJ_PIPWNT; 1296 wakeup((caddr_t) obj); 1297 } 1298 } 1299} 1300 1301/* 1302 * This routine is called before a device strategy routine. 1303 * It is used to tell the VM system that paging I/O is in 1304 * progress, and treat the pages associated with the buffer 1305 * almost as being PG_BUSY. Also the object paging_in_progress 1306 * flag is handled to make sure that the object doesn't become 1307 * inconsistant. 1308 */ 1309void 1310vfs_busy_pages(struct buf * bp, int clear_modify) 1311{ 1312 int i; 1313 1314 if (bp->b_flags & B_VMIO) { 1315 vm_object_t obj = (vm_object_t) bp->b_vp->v_vmdata; 1316 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1317 int iocount = bp->b_bufsize; 1318 1319 for (i = 0; i < bp->b_npages; i++) { 1320 vm_page_t m = bp->b_pages[i]; 1321 int resid = (m->offset + PAGE_SIZE) - foff; 1322 1323 if (resid > iocount) 1324 resid = iocount; 1325 obj->paging_in_progress++; 1326 m->busy++; 1327 if (clear_modify) { 1328 vm_page_test_dirty(m); 1329 vm_page_protect(m, VM_PROT_READ); 1330 } else if (bp->b_bcount >= PAGE_SIZE) { 1331 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1332 bp->b_pages[i] = bogus_page; 1333 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1334 } 1335 } 1336 foff += resid; 1337 iocount -= resid; 1338 } 1339 } 1340} 1341 1342/* 1343 * Tell the VM system that the pages associated with this buffer 1344 * are dirty. This is in case of the unlikely circumstance that 1345 * a buffer has to be destroyed before it is flushed. 1346 */ 1347void 1348vfs_dirty_pages(struct buf * bp) 1349{ 1350 int i; 1351 1352 if (bp->b_flags & B_VMIO) { 1353 vm_offset_t foff = bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1354 int iocount = bp->b_bufsize; 1355 1356 for (i = 0; i < bp->b_npages; i++) { 1357 vm_page_t m = bp->b_pages[i]; 1358 int resid = (m->offset + PAGE_SIZE) - foff; 1359 1360 if (resid > iocount) 1361 resid = iocount; 1362 if (resid > 0) { 1363 vm_page_set_valid(m, foff, resid); 1364 vm_page_set_dirty(m, foff, resid); 1365 } 1366 PAGE_WAKEUP(m); 1367 foff += resid; 1368 iocount -= resid; 1369 } 1370 } 1371} 1372/* 1373 * vm_hold_load_pages and vm_hold_unload pages get pages into 1374 * a buffers address space. The pages are anonymous and are 1375 * not associated with a file object. 1376 */ 1377void 1378vm_hold_load_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1379{ 1380 vm_offset_t pg; 1381 vm_page_t p; 1382 vm_offset_t from = round_page(froma); 1383 vm_offset_t to = round_page(toa); 1384 1385 for (pg = from; pg < to; pg += PAGE_SIZE) { 1386 1387tryagain: 1388 1389 p = vm_page_alloc(kernel_object, pg - VM_MIN_KERNEL_ADDRESS, 1390 VM_ALLOC_NORMAL); 1391 if (!p) { 1392 VM_WAIT; 1393 goto tryagain; 1394 } 1395 vm_page_wire(p); 1396 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1397 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = p; 1398 PAGE_WAKEUP(p); 1399 bp->b_npages++; 1400 } 1401} 1402 1403void 1404vm_hold_free_pages(struct buf * bp, vm_offset_t froma, vm_offset_t toa) 1405{ 1406 vm_offset_t pg; 1407 vm_page_t p; 1408 vm_offset_t from = round_page(froma); 1409 vm_offset_t to = round_page(toa); 1410 1411 for (pg = from; pg < to; pg += PAGE_SIZE) { 1412 p = bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE]; 1413 bp->b_pages[((caddr_t) pg - bp->b_data) / PAGE_SIZE] = 0; 1414 pmap_kremove(pg); 1415 vm_page_free(p); 1416 --bp->b_npages; 1417 } 1418} 1419 1420void 1421bufstats() 1422{ 1423} 1424