vfs_bio.c revision 32724
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.144 1998/01/22 17:29:51 dyson Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 */ 27 28#include "opt_bounce.h" 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <vm/vm.h> 41#include <vm/vm_param.h> 42#include <vm/vm_prot.h> 43#include <vm/vm_kern.h> 44#include <vm/vm_pageout.h> 45#include <vm/vm_page.h> 46#include <vm/vm_object.h> 47#include <vm/vm_extern.h> 48#include <vm/vm_map.h> 49#include <sys/buf.h> 50#include <sys/mount.h> 51#include <sys/malloc.h> 52#include <sys/resourcevar.h> 53 54static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 55 56static void vfs_update __P((void)); 57static struct proc *updateproc; 58static struct kproc_desc up_kp = { 59 "update", 60 vfs_update, 61 &updateproc 62}; 63SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 64 65struct buf *buf; /* buffer header pool */ 66struct swqueue bswlist; 67 68int count_lock_queue __P((void)); 69static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 70 vm_offset_t to); 71static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 72 vm_offset_t to); 73static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 74 vm_offset_t off, vm_offset_t size, 75 vm_page_t m); 76static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 77 int pageno, vm_page_t m); 78static void vfs_clean_pages(struct buf * bp); 79static void vfs_setdirty(struct buf *bp); 80static void vfs_vmio_release(struct buf *bp); 81static void flushdirtybuffers(int slpflag, int slptimeo); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95 96/* 97 * bogus page -- for I/O to/from partially complete buffers 98 * this is a temporary solution to the problem, but it is not 99 * really that bad. it would be better to split the buffer 100 * for input in the case of buffers partially already in memory, 101 * but the code is intricate enough already. 102 */ 103vm_page_t bogus_page; 104static vm_offset_t bogus_offset; 105 106static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 107 bufmallocspace, maxbufmallocspace; 108int numdirtybuffers, lodirtybuffers, hidirtybuffers; 109static int numfreebuffers, lofreebuffers, hifreebuffers; 110static int kvafreespace; 111 112SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 113 &numdirtybuffers, 0, ""); 114SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 115 &lodirtybuffers, 0, ""); 116SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 117 &hidirtybuffers, 0, ""); 118SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 119 &numfreebuffers, 0, ""); 120SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 121 &lofreebuffers, 0, ""); 122SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 123 &hifreebuffers, 0, ""); 124SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 125 &maxbufspace, 0, ""); 126SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 127 &bufspace, 0, ""); 128SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 129 &maxvmiobufspace, 0, ""); 130SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 131 &vmiospace, 0, ""); 132SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 133 &maxbufmallocspace, 0, ""); 134SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 135 &bufmallocspace, 0, ""); 136SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 137 &kvafreespace, 0, ""); 138 139static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 140static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 141 142extern int vm_swap_size; 143 144#define BUF_MAXUSE 24 145 146#define VFS_BIO_NEED_ANY 1 147#define VFS_BIO_NEED_LOWLIMIT 2 148#define VFS_BIO_NEED_FREE 4 149 150/* 151 * Initialize buffer headers and related structures. 152 */ 153void 154bufinit() 155{ 156 struct buf *bp; 157 int i; 158 159 TAILQ_INIT(&bswlist); 160 LIST_INIT(&invalhash); 161 162 /* first, make a null hash table */ 163 for (i = 0; i < BUFHSZ; i++) 164 LIST_INIT(&bufhashtbl[i]); 165 166 /* next, make a null set of free lists */ 167 for (i = 0; i < BUFFER_QUEUES; i++) 168 TAILQ_INIT(&bufqueues[i]); 169 170 /* finally, initialize each buffer header and stick on empty q */ 171 for (i = 0; i < nbuf; i++) { 172 bp = &buf[i]; 173 bzero(bp, sizeof *bp); 174 bp->b_flags = B_INVAL; /* we're just an empty header */ 175 bp->b_dev = NODEV; 176 bp->b_rcred = NOCRED; 177 bp->b_wcred = NOCRED; 178 bp->b_qindex = QUEUE_EMPTY; 179 bp->b_vnbufs.le_next = NOLIST; 180 bp->b_generation = 0; 181 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 182 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 183 } 184/* 185 * maxbufspace is currently calculated to support all filesystem blocks 186 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 187 * cache is still the same as it would be for 8K filesystems. This 188 * keeps the size of the buffer cache "in check" for big block filesystems. 189 */ 190 maxbufspace = (nbuf + 8) * DFLTBSIZE; 191/* 192 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 193 */ 194 maxvmiobufspace = 2 * maxbufspace / 3; 195/* 196 * Limit the amount of malloc memory since it is wired permanently into 197 * the kernel space. Even though this is accounted for in the buffer 198 * allocation, we don't want the malloced region to grow uncontrolled. 199 * The malloc scheme improves memory utilization significantly on average 200 * (small) directories. 201 */ 202 maxbufmallocspace = maxbufspace / 20; 203 204/* 205 * Remove the probability of deadlock conditions by limiting the 206 * number of dirty buffers. 207 */ 208 hidirtybuffers = nbuf / 8 + 20; 209 lodirtybuffers = nbuf / 16 + 10; 210 numdirtybuffers = 0; 211 lofreebuffers = nbuf / 18 + 5; 212 hifreebuffers = 2 * lofreebuffers; 213 numfreebuffers = nbuf; 214 kvafreespace = 0; 215 216 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 217 bogus_page = vm_page_alloc(kernel_object, 218 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 219 VM_ALLOC_NORMAL); 220 221} 222 223/* 224 * Free the kva allocation for a buffer 225 * Must be called only at splbio or higher, 226 * as this is the only locking for buffer_map. 227 */ 228static void 229bfreekva(struct buf * bp) 230{ 231 if (bp->b_kvasize == 0) 232 return; 233 234 vm_map_delete(buffer_map, 235 (vm_offset_t) bp->b_kvabase, 236 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 237 238 bp->b_kvasize = 0; 239 240} 241 242/* 243 * remove the buffer from the appropriate free list 244 */ 245void 246bremfree(struct buf * bp) 247{ 248 int s = splbio(); 249 250 if (bp->b_qindex != QUEUE_NONE) { 251 if (bp->b_qindex == QUEUE_EMPTY) { 252 kvafreespace -= bp->b_kvasize; 253 } 254 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 255 bp->b_qindex = QUEUE_NONE; 256 } else { 257#if !defined(MAX_PERF) 258 panic("bremfree: removing a buffer when not on a queue"); 259#endif 260 } 261 if ((bp->b_flags & B_INVAL) || 262 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 263 --numfreebuffers; 264 splx(s); 265} 266 267 268/* 269 * Get a buffer with the specified data. Look in the cache first. 270 */ 271int 272bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 273 struct buf ** bpp) 274{ 275 struct buf *bp; 276 277 bp = getblk(vp, blkno, size, 0, 0); 278 *bpp = bp; 279 280 /* if not found in cache, do some I/O */ 281 if ((bp->b_flags & B_CACHE) == 0) { 282 if (curproc != NULL) 283 curproc->p_stats->p_ru.ru_inblock++; 284 bp->b_flags |= B_READ; 285 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 286 if (bp->b_rcred == NOCRED) { 287 if (cred != NOCRED) 288 crhold(cred); 289 bp->b_rcred = cred; 290 } 291 vfs_busy_pages(bp, 0); 292 VOP_STRATEGY(bp); 293 return (biowait(bp)); 294 } 295 return (0); 296} 297 298/* 299 * Operates like bread, but also starts asynchronous I/O on 300 * read-ahead blocks. 301 */ 302int 303breadn(struct vnode * vp, daddr_t blkno, int size, 304 daddr_t * rablkno, int *rabsize, 305 int cnt, struct ucred * cred, struct buf ** bpp) 306{ 307 struct buf *bp, *rabp; 308 int i; 309 int rv = 0, readwait = 0; 310 311 *bpp = bp = getblk(vp, blkno, size, 0, 0); 312 313 /* if not found in cache, do some I/O */ 314 if ((bp->b_flags & B_CACHE) == 0) { 315 if (curproc != NULL) 316 curproc->p_stats->p_ru.ru_inblock++; 317 bp->b_flags |= B_READ; 318 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 319 if (bp->b_rcred == NOCRED) { 320 if (cred != NOCRED) 321 crhold(cred); 322 bp->b_rcred = cred; 323 } 324 vfs_busy_pages(bp, 0); 325 VOP_STRATEGY(bp); 326 ++readwait; 327 } 328 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 329 if (inmem(vp, *rablkno)) 330 continue; 331 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 332 333 if ((rabp->b_flags & B_CACHE) == 0) { 334 if (curproc != NULL) 335 curproc->p_stats->p_ru.ru_inblock++; 336 rabp->b_flags |= B_READ | B_ASYNC; 337 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 338 if (rabp->b_rcred == NOCRED) { 339 if (cred != NOCRED) 340 crhold(cred); 341 rabp->b_rcred = cred; 342 } 343 vfs_busy_pages(rabp, 0); 344 VOP_STRATEGY(rabp); 345 } else { 346 brelse(rabp); 347 } 348 } 349 350 if (readwait) { 351 rv = biowait(bp); 352 } 353 return (rv); 354} 355 356/* 357 * Write, release buffer on completion. (Done by iodone 358 * if async.) 359 */ 360int 361bwrite(struct buf * bp) 362{ 363 int oldflags = bp->b_flags; 364 365 if (bp->b_flags & B_INVAL) { 366 brelse(bp); 367 return (0); 368 } 369#if !defined(MAX_PERF) 370 if (!(bp->b_flags & B_BUSY)) 371 panic("bwrite: buffer is not busy???"); 372#endif 373 374 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 375 bp->b_flags |= B_WRITEINPROG; 376 377 if ((oldflags & B_DELWRI) == B_DELWRI) { 378 --numdirtybuffers; 379 reassignbuf(bp, bp->b_vp); 380 } 381 382 bp->b_vp->v_numoutput++; 383 vfs_busy_pages(bp, 1); 384 if (curproc != NULL) 385 curproc->p_stats->p_ru.ru_oublock++; 386 VOP_STRATEGY(bp); 387 388 if ((oldflags & B_ASYNC) == 0) { 389 int rtval = biowait(bp); 390 391 if (oldflags & B_DELWRI) { 392 reassignbuf(bp, bp->b_vp); 393 } 394 brelse(bp); 395 return (rtval); 396 } 397 return (0); 398} 399 400inline void 401vfs_bio_need_satisfy(void) { 402 ++numfreebuffers; 403 if (!needsbuffer) 404 return; 405 if (numdirtybuffers < lodirtybuffers) { 406 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 407 } else { 408 needsbuffer &= ~VFS_BIO_NEED_ANY; 409 } 410 if (numfreebuffers >= hifreebuffers) { 411 needsbuffer &= ~VFS_BIO_NEED_FREE; 412 } 413 wakeup(&needsbuffer); 414} 415 416/* 417 * Delayed write. (Buffer is marked dirty). 418 */ 419void 420bdwrite(struct buf * bp) 421{ 422 423#if !defined(MAX_PERF) 424 if ((bp->b_flags & B_BUSY) == 0) { 425 panic("bdwrite: buffer is not busy"); 426 } 427#endif 428 429 if (bp->b_flags & B_INVAL) { 430 brelse(bp); 431 return; 432 } 433 if (bp->b_flags & B_TAPE) { 434 bawrite(bp); 435 return; 436 } 437 bp->b_flags &= ~(B_READ|B_RELBUF); 438 if ((bp->b_flags & B_DELWRI) == 0) { 439 bp->b_flags |= B_DONE | B_DELWRI; 440 reassignbuf(bp, bp->b_vp); 441 ++numdirtybuffers; 442 } 443 444 /* 445 * This bmap keeps the system from needing to do the bmap later, 446 * perhaps when the system is attempting to do a sync. Since it 447 * is likely that the indirect block -- or whatever other datastructure 448 * that the filesystem needs is still in memory now, it is a good 449 * thing to do this. Note also, that if the pageout daemon is 450 * requesting a sync -- there might not be enough memory to do 451 * the bmap then... So, this is important to do. 452 */ 453 if (bp->b_lblkno == bp->b_blkno) { 454 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 455 } 456 457 /* 458 * Set the *dirty* buffer range based upon the VM system dirty pages. 459 */ 460 vfs_setdirty(bp); 461 462 /* 463 * We need to do this here to satisfy the vnode_pager and the 464 * pageout daemon, so that it thinks that the pages have been 465 * "cleaned". Note that since the pages are in a delayed write 466 * buffer -- the VFS layer "will" see that the pages get written 467 * out on the next sync, or perhaps the cluster will be completed. 468 */ 469 vfs_clean_pages(bp); 470 bqrelse(bp); 471 472 if (numdirtybuffers >= hidirtybuffers) 473 flushdirtybuffers(0, 0); 474 475 return; 476} 477 478/* 479 * Asynchronous write. 480 * Start output on a buffer, but do not wait for it to complete. 481 * The buffer is released when the output completes. 482 */ 483void 484bawrite(struct buf * bp) 485{ 486 bp->b_flags |= B_ASYNC; 487 (void) VOP_BWRITE(bp); 488} 489 490/* 491 * Ordered write. 492 * Start output on a buffer, but only wait for it to complete if the 493 * output device cannot guarantee ordering in some other way. Devices 494 * that can perform asynchronous ordered writes will set the B_ASYNC 495 * flag in their strategy routine. 496 * The buffer is released when the output completes. 497 */ 498int 499bowrite(struct buf * bp) 500{ 501 /* 502 * XXX Add in B_ASYNC once the SCSI 503 * layer can deal with ordered 504 * writes properly. 505 */ 506 bp->b_flags |= B_ORDERED; 507 return (VOP_BWRITE(bp)); 508} 509 510/* 511 * Release a buffer. 512 */ 513void 514brelse(struct buf * bp) 515{ 516 int s; 517 518 if (bp->b_flags & B_CLUSTER) { 519 relpbuf(bp); 520 return; 521 } 522 /* anyone need a "free" block? */ 523 s = splbio(); 524 525 /* anyone need this block? */ 526 if (bp->b_flags & B_WANTED) { 527 bp->b_flags &= ~(B_WANTED | B_AGE); 528 wakeup(bp); 529 } 530 531 if (bp->b_flags & B_LOCKED) 532 bp->b_flags &= ~B_ERROR; 533 534 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 535 (bp->b_bufsize <= 0)) { 536 bp->b_flags |= B_INVAL; 537 if (bp->b_flags & B_DELWRI) 538 --numdirtybuffers; 539 bp->b_flags &= ~(B_DELWRI | B_CACHE); 540 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 541 if (bp->b_bufsize) 542 allocbuf(bp, 0); 543 brelvp(bp); 544 } 545 } 546 547 /* 548 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 549 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 550 * but the VM object is kept around. The B_NOCACHE flag is used to 551 * invalidate the pages in the VM object. 552 * 553 * If the buffer is a partially filled NFS buffer, keep it 554 * since invalidating it now will lose informatio. The valid 555 * flags in the vm_pages have only DEV_BSIZE resolution but 556 * the b_validoff, b_validend fields have byte resolution. 557 * This can avoid unnecessary re-reads of the buffer. 558 * XXX this seems to cause performance problems. 559 */ 560 if ((bp->b_flags & B_VMIO) 561 && !(bp->b_vp->v_tag == VT_NFS && 562 bp->b_vp->v_type != VBLK && 563 (bp->b_flags & B_DELWRI) != 0) 564#ifdef notdef 565 && (bp->b_vp->v_tag != VT_NFS 566 || bp->b_vp->v_type == VBLK 567 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 568 || bp->b_validend == 0 569 || (bp->b_validoff == 0 570 && bp->b_validend == bp->b_bufsize)) 571#endif 572 ) { 573 vm_ooffset_t foff; 574 vm_object_t obj; 575 int i, resid; 576 vm_page_t m; 577 struct vnode *vp; 578 int iototal = bp->b_bufsize; 579 580 vp = bp->b_vp; 581 582#if !defined(MAX_PERF) 583 if (!vp) 584 panic("brelse: missing vp"); 585#endif 586 587 if (bp->b_npages) { 588 vm_pindex_t poff; 589 obj = (vm_object_t) vp->v_object; 590 if (vp->v_type == VBLK) 591 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 592 else 593 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 594 poff = OFF_TO_IDX(foff); 595 for (i = 0; i < bp->b_npages; i++) { 596 m = bp->b_pages[i]; 597 if (m == bogus_page) { 598 m = vm_page_lookup(obj, poff + i); 599#if !defined(MAX_PERF) 600 if (!m) { 601 panic("brelse: page missing\n"); 602 } 603#endif 604 bp->b_pages[i] = m; 605 pmap_qenter(trunc_page(bp->b_data), 606 bp->b_pages, bp->b_npages); 607 } 608 resid = IDX_TO_OFF(m->pindex+1) - foff; 609 if (resid > iototal) 610 resid = iototal; 611 if (resid > 0) { 612 /* 613 * Don't invalidate the page if the local machine has already 614 * modified it. This is the lesser of two evils, and should 615 * be fixed. 616 */ 617 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 618 vm_page_test_dirty(m); 619 if (m->dirty == 0) { 620 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 621 if (m->valid == 0) 622 vm_page_protect(m, VM_PROT_NONE); 623 } 624 } 625 if (resid >= PAGE_SIZE) { 626 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 627 bp->b_flags |= B_INVAL; 628 } 629 } else { 630 if (!vm_page_is_valid(m, 631 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 632 bp->b_flags |= B_INVAL; 633 } 634 } 635 } 636 foff += resid; 637 iototal -= resid; 638 } 639 } 640 if (bp->b_flags & (B_INVAL | B_RELBUF)) 641 vfs_vmio_release(bp); 642 } 643#if !defined(MAX_PERF) 644 if (bp->b_qindex != QUEUE_NONE) 645 panic("brelse: free buffer onto another queue???"); 646#endif 647 648 /* enqueue */ 649 /* buffers with no memory */ 650 if (bp->b_bufsize == 0) { 651 bp->b_flags |= B_INVAL; 652 bp->b_qindex = QUEUE_EMPTY; 653 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 654 LIST_REMOVE(bp, b_hash); 655 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 656 bp->b_dev = NODEV; 657 kvafreespace += bp->b_kvasize; 658 bp->b_generation++; 659 660 /* buffers with junk contents */ 661 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 662 bp->b_flags |= B_INVAL; 663 bp->b_qindex = QUEUE_AGE; 664 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 665 LIST_REMOVE(bp, b_hash); 666 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 667 bp->b_dev = NODEV; 668 bp->b_generation++; 669 670 /* buffers that are locked */ 671 } else if (bp->b_flags & B_LOCKED) { 672 bp->b_qindex = QUEUE_LOCKED; 673 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 674 675 /* buffers with stale but valid contents */ 676 } else if (bp->b_flags & B_AGE) { 677 bp->b_qindex = QUEUE_AGE; 678 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 679 680 /* buffers with valid and quite potentially reuseable contents */ 681 } else { 682 bp->b_qindex = QUEUE_LRU; 683 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 684 } 685 686 if ((bp->b_flags & B_INVAL) || 687 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 688 if (bp->b_flags & B_DELWRI) { 689 --numdirtybuffers; 690 bp->b_flags &= ~B_DELWRI; 691 } 692 vfs_bio_need_satisfy(); 693 } 694 695 /* unlock */ 696 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 697 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 698 splx(s); 699} 700 701/* 702 * Release a buffer. 703 */ 704void 705bqrelse(struct buf * bp) 706{ 707 int s; 708 709 s = splbio(); 710 711 /* anyone need this block? */ 712 if (bp->b_flags & B_WANTED) { 713 bp->b_flags &= ~(B_WANTED | B_AGE); 714 wakeup(bp); 715 } 716 717#if !defined(MAX_PERF) 718 if (bp->b_qindex != QUEUE_NONE) 719 panic("bqrelse: free buffer onto another queue???"); 720#endif 721 722 if (bp->b_flags & B_LOCKED) { 723 bp->b_flags &= ~B_ERROR; 724 bp->b_qindex = QUEUE_LOCKED; 725 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 726 /* buffers with stale but valid contents */ 727 } else { 728 bp->b_qindex = QUEUE_LRU; 729 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 730 } 731 732 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 733 vfs_bio_need_satisfy(); 734 } 735 736 /* unlock */ 737 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 738 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 739 splx(s); 740} 741 742static void 743vfs_vmio_release(bp) 744 struct buf *bp; 745{ 746 int i; 747 vm_page_t m; 748 749 for (i = 0; i < bp->b_npages; i++) { 750 m = bp->b_pages[i]; 751 bp->b_pages[i] = NULL; 752 vm_page_unwire(m); 753 /* 754 * We don't mess with busy pages, it is 755 * the responsibility of the process that 756 * busied the pages to deal with them. 757 */ 758 if ((m->flags & PG_BUSY) || (m->busy != 0)) 759 continue; 760 761 if (m->wire_count == 0) { 762 763 if (m->flags & PG_WANTED) { 764 m->flags &= ~PG_WANTED; 765 wakeup(m); 766 } 767 768 /* 769 * If this is an async free -- we cannot place 770 * pages onto the cache queue. If it is an 771 * async free, then we don't modify any queues. 772 * This is probably in error (for perf reasons), 773 * and we will eventually need to build 774 * a more complete infrastructure to support I/O 775 * rundown. 776 */ 777 if ((bp->b_flags & B_ASYNC) == 0) { 778 779 /* 780 * In the case of sync buffer frees, we can do pretty much 781 * anything to any of the memory queues. Specifically, 782 * the cache queue is okay to be modified. 783 */ 784 if (m->valid) { 785 if(m->dirty == 0) 786 vm_page_test_dirty(m); 787 /* 788 * this keeps pressure off of the process memory 789 */ 790 if (m->dirty == 0 && m->hold_count == 0) 791 vm_page_cache(m); 792 else 793 vm_page_deactivate(m); 794 } else if (m->hold_count == 0) { 795 struct vnode *vp; 796 vp = bp->b_vp; 797 vm_page_protect(m, VM_PROT_NONE); 798 vm_page_free(m); 799 } 800 } else { 801 /* 802 * If async, then at least we clear the 803 * act_count. 804 */ 805 m->act_count = 0; 806 } 807 } 808 } 809 bufspace -= bp->b_bufsize; 810 vmiospace -= bp->b_bufsize; 811 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 812 bp->b_npages = 0; 813 bp->b_bufsize = 0; 814 bp->b_flags &= ~B_VMIO; 815 if (bp->b_vp) 816 brelvp(bp); 817} 818 819/* 820 * Check to see if a block is currently memory resident. 821 */ 822struct buf * 823gbincore(struct vnode * vp, daddr_t blkno) 824{ 825 struct buf *bp; 826 struct bufhashhdr *bh; 827 828 bh = BUFHASH(vp, blkno); 829 bp = bh->lh_first; 830 831 /* Search hash chain */ 832 while (bp != NULL) { 833 /* hit */ 834 if (bp->b_vp == vp && bp->b_lblkno == blkno && 835 (bp->b_flags & B_INVAL) == 0) { 836 break; 837 } 838 bp = bp->b_hash.le_next; 839 } 840 return (bp); 841} 842 843/* 844 * this routine implements clustered async writes for 845 * clearing out B_DELWRI buffers... This is much better 846 * than the old way of writing only one buffer at a time. 847 */ 848int 849vfs_bio_awrite(struct buf * bp) 850{ 851 int i; 852 daddr_t lblkno = bp->b_lblkno; 853 struct vnode *vp = bp->b_vp; 854 int s; 855 int ncl; 856 struct buf *bpa; 857 int nwritten; 858 int size; 859 int maxcl; 860 861 s = splbio(); 862 /* 863 * right now we support clustered writing only to regular files 864 */ 865 if ((vp->v_type == VREG) && 866 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 867 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 868 869 size = vp->v_mount->mnt_stat.f_iosize; 870 maxcl = MAXPHYS / size; 871 872 for (i = 1; i < maxcl; i++) { 873 if ((bpa = gbincore(vp, lblkno + i)) && 874 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 875 (B_DELWRI | B_CLUSTEROK)) && 876 (bpa->b_bufsize == size)) { 877 if ((bpa->b_blkno == bpa->b_lblkno) || 878 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 879 break; 880 } else { 881 break; 882 } 883 } 884 ncl = i; 885 /* 886 * this is a possible cluster write 887 */ 888 if (ncl != 1) { 889 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 890 splx(s); 891 return nwritten; 892 } 893 } 894#if 0 895 else if ((vp->v_flag & VOBJBUF) && (vp->v_type == VBLK) && 896 ((size = bp->b_bufsize) >= PAGE_SIZE)) { 897 maxcl = MAXPHYS / size; 898 for (i = 1; i < maxcl; i++) { 899 if ((bpa = gbincore(vp, lblkno + i)) && 900 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 901 (B_DELWRI | B_CLUSTEROK)) && 902 (bpa->b_bufsize == size)) { 903 if (bpa->b_blkno != 904 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) 905 break; 906 } else { 907 break; 908 } 909 } 910 ncl = i; 911 /* 912 * this is a possible cluster write 913 */ 914 if (ncl != 1) { 915 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 916 splx(s); 917 return nwritten; 918 } 919 } 920#endif 921 922 bremfree(bp); 923 splx(s); 924 /* 925 * default (old) behavior, writing out only one block 926 */ 927 bp->b_flags |= B_BUSY | B_ASYNC; 928 nwritten = bp->b_bufsize; 929 (void) VOP_BWRITE(bp); 930 return nwritten; 931} 932 933 934/* 935 * Find a buffer header which is available for use. 936 */ 937static struct buf * 938getnewbuf(struct vnode *vp, daddr_t blkno, 939 int slpflag, int slptimeo, int size, int maxsize) 940{ 941 struct buf *bp, *bp1; 942 int nbyteswritten = 0; 943 vm_offset_t addr; 944 static int writerecursion = 0; 945 946start: 947 if (bufspace >= maxbufspace) 948 goto trytofreespace; 949 950 /* can we constitute a new buffer? */ 951 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 952#if !defined(MAX_PERF) 953 if (bp->b_qindex != QUEUE_EMPTY) 954 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 955 bp->b_qindex); 956#endif 957 bp->b_flags |= B_BUSY; 958 bremfree(bp); 959 goto fillbuf; 960 } 961trytofreespace: 962 /* 963 * We keep the file I/O from hogging metadata I/O 964 * This is desirable because file data is cached in the 965 * VM/Buffer cache even if a buffer is freed. 966 */ 967 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 968#if !defined(MAX_PERF) 969 if (bp->b_qindex != QUEUE_AGE) 970 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 971 bp->b_qindex); 972#endif 973 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 974#if !defined(MAX_PERF) 975 if (bp->b_qindex != QUEUE_LRU) 976 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 977 bp->b_qindex); 978#endif 979 } 980 if (!bp) { 981 /* wait for a free buffer of any kind */ 982 needsbuffer |= VFS_BIO_NEED_ANY; 983 do 984 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 985 slptimeo); 986 while (needsbuffer & VFS_BIO_NEED_ANY); 987 return (0); 988 } 989 990#if defined(DIAGNOSTIC) 991 if (bp->b_flags & B_BUSY) { 992 panic("getnewbuf: busy buffer on free list\n"); 993 } 994#endif 995 996 /* 997 * We are fairly aggressive about freeing VMIO buffers, but since 998 * the buffering is intact without buffer headers, there is not 999 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 1000 */ 1001 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 1002 if ((bp->b_flags & B_VMIO) == 0 || 1003 (vmiospace < maxvmiobufspace)) { 1004 --bp->b_usecount; 1005 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1006 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1007 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1008 goto start; 1009 } 1010 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1011 } 1012 } 1013 1014 1015 /* if we are a delayed write, convert to an async write */ 1016 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1017 1018 /* 1019 * If our delayed write is likely to be used soon, then 1020 * recycle back onto the LRU queue. 1021 */ 1022 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 1023 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 1024 1025 if (bp->b_usecount > 0) { 1026 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 1027 1028 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1029 1030 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1031 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1032 bp->b_usecount--; 1033 goto start; 1034 } 1035 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1036 } 1037 } 1038 } 1039 1040 /* 1041 * Certain layered filesystems can recursively re-enter the vfs_bio 1042 * code, due to delayed writes. This helps keep the system from 1043 * deadlocking. 1044 */ 1045 if (writerecursion > 0) { 1046 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1047 while (bp) { 1048 if ((bp->b_flags & B_DELWRI) == 0) 1049 break; 1050 bp = TAILQ_NEXT(bp, b_freelist); 1051 } 1052 if (bp == NULL) { 1053 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1054 while (bp) { 1055 if ((bp->b_flags & B_DELWRI) == 0) 1056 break; 1057 bp = TAILQ_NEXT(bp, b_freelist); 1058 } 1059 } 1060 if (bp == NULL) 1061 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1062 } else { 1063 ++writerecursion; 1064 nbyteswritten += vfs_bio_awrite(bp); 1065 --writerecursion; 1066 if (!slpflag && !slptimeo) { 1067 return (0); 1068 } 1069 goto start; 1070 } 1071 } 1072 1073 if (bp->b_flags & B_WANTED) { 1074 bp->b_flags &= ~B_WANTED; 1075 wakeup(bp); 1076 } 1077 bremfree(bp); 1078 bp->b_flags |= B_BUSY; 1079 1080 if (bp->b_flags & B_VMIO) { 1081 bp->b_flags &= ~B_ASYNC; 1082 vfs_vmio_release(bp); 1083 } 1084 1085 if (bp->b_vp) 1086 brelvp(bp); 1087 1088fillbuf: 1089 bp->b_generation++; 1090 1091 /* we are not free, nor do we contain interesting data */ 1092 if (bp->b_rcred != NOCRED) { 1093 crfree(bp->b_rcred); 1094 bp->b_rcred = NOCRED; 1095 } 1096 if (bp->b_wcred != NOCRED) { 1097 crfree(bp->b_wcred); 1098 bp->b_wcred = NOCRED; 1099 } 1100 1101 LIST_REMOVE(bp, b_hash); 1102 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1103 if (bp->b_bufsize) { 1104 allocbuf(bp, 0); 1105 } 1106 bp->b_flags = B_BUSY; 1107 bp->b_dev = NODEV; 1108 bp->b_vp = NULL; 1109 bp->b_blkno = bp->b_lblkno = 0; 1110 bp->b_iodone = 0; 1111 bp->b_error = 0; 1112 bp->b_resid = 0; 1113 bp->b_bcount = 0; 1114 bp->b_npages = 0; 1115 bp->b_dirtyoff = bp->b_dirtyend = 0; 1116 bp->b_validoff = bp->b_validend = 0; 1117 bp->b_usecount = 5; 1118 1119 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1120 1121 /* 1122 * we assume that buffer_map is not at address 0 1123 */ 1124 addr = 0; 1125 if (maxsize != bp->b_kvasize) { 1126 bfreekva(bp); 1127 1128findkvaspace: 1129 /* 1130 * See if we have buffer kva space 1131 */ 1132 if (vm_map_findspace(buffer_map, 1133 vm_map_min(buffer_map), maxsize, &addr)) { 1134 if (kvafreespace > 0) { 1135 int totfree = 0, freed; 1136 do { 1137 freed = 0; 1138 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1139 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1140 if (bp1->b_kvasize != 0) { 1141 totfree += bp1->b_kvasize; 1142 freed = bp1->b_kvasize; 1143 bremfree(bp1); 1144 bfreekva(bp1); 1145 brelse(bp1); 1146 break; 1147 } 1148 } 1149 } while (freed); 1150 /* 1151 * if we found free space, then retry with the same buffer. 1152 */ 1153 if (totfree) 1154 goto findkvaspace; 1155 } 1156 bp->b_flags |= B_INVAL; 1157 brelse(bp); 1158 goto trytofreespace; 1159 } 1160 } 1161 1162 /* 1163 * See if we are below are allocated minimum 1164 */ 1165 if (bufspace >= (maxbufspace + nbyteswritten)) { 1166 bp->b_flags |= B_INVAL; 1167 brelse(bp); 1168 goto trytofreespace; 1169 } 1170 1171 /* 1172 * create a map entry for the buffer -- in essence 1173 * reserving the kva space. 1174 */ 1175 if (addr) { 1176 vm_map_insert(buffer_map, NULL, 0, 1177 addr, addr + maxsize, 1178 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1179 1180 bp->b_kvabase = (caddr_t) addr; 1181 bp->b_kvasize = maxsize; 1182 } 1183 bp->b_data = bp->b_kvabase; 1184 1185 return (bp); 1186} 1187 1188static void 1189waitfreebuffers(int slpflag, int slptimeo) { 1190 while (numfreebuffers < hifreebuffers) { 1191 flushdirtybuffers(slpflag, slptimeo); 1192 if (numfreebuffers < hifreebuffers) 1193 break; 1194 needsbuffer |= VFS_BIO_NEED_FREE; 1195 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo)) 1196 break; 1197 } 1198} 1199 1200static void 1201flushdirtybuffers(int slpflag, int slptimeo) { 1202 int s; 1203 static pid_t flushing = 0; 1204 1205 s = splbio(); 1206 1207 if (flushing) { 1208 if (flushing == curproc->p_pid) { 1209 splx(s); 1210 return; 1211 } 1212 while (flushing) { 1213 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) { 1214 splx(s); 1215 return; 1216 } 1217 } 1218 } 1219 flushing = curproc->p_pid; 1220 1221 while (numdirtybuffers > lodirtybuffers) { 1222 struct buf *bp; 1223 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1224 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1225 if (bp == NULL) 1226 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1227 1228 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1229 bp = TAILQ_NEXT(bp, b_freelist); 1230 } 1231 1232 if (bp) { 1233 vfs_bio_awrite(bp); 1234 continue; 1235 } 1236 break; 1237 } 1238 1239 flushing = 0; 1240 wakeup(&flushing); 1241 splx(s); 1242} 1243 1244/* 1245 * Check to see if a block is currently memory resident. 1246 */ 1247struct buf * 1248incore(struct vnode * vp, daddr_t blkno) 1249{ 1250 struct buf *bp; 1251 1252 int s = splbio(); 1253 bp = gbincore(vp, blkno); 1254 splx(s); 1255 return (bp); 1256} 1257 1258/* 1259 * Returns true if no I/O is needed to access the 1260 * associated VM object. This is like incore except 1261 * it also hunts around in the VM system for the data. 1262 */ 1263 1264int 1265inmem(struct vnode * vp, daddr_t blkno) 1266{ 1267 vm_object_t obj; 1268 vm_offset_t toff, tinc; 1269 vm_page_t m; 1270 vm_ooffset_t off; 1271 1272 if (incore(vp, blkno)) 1273 return 1; 1274 if (vp->v_mount == NULL) 1275 return 0; 1276 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1277 return 0; 1278 1279 obj = vp->v_object; 1280 tinc = PAGE_SIZE; 1281 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1282 tinc = vp->v_mount->mnt_stat.f_iosize; 1283 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1284 1285 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1286 1287 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1288 if (!m) 1289 return 0; 1290 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1291 return 0; 1292 } 1293 return 1; 1294} 1295 1296/* 1297 * now we set the dirty range for the buffer -- 1298 * for NFS -- if the file is mapped and pages have 1299 * been written to, let it know. We want the 1300 * entire range of the buffer to be marked dirty if 1301 * any of the pages have been written to for consistancy 1302 * with the b_validoff, b_validend set in the nfs write 1303 * code, and used by the nfs read code. 1304 */ 1305static void 1306vfs_setdirty(struct buf *bp) { 1307 int i; 1308 vm_object_t object; 1309 vm_offset_t boffset, offset; 1310 /* 1311 * We qualify the scan for modified pages on whether the 1312 * object has been flushed yet. The OBJ_WRITEABLE flag 1313 * is not cleared simply by protecting pages off. 1314 */ 1315 if ((bp->b_flags & B_VMIO) && 1316 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1317 /* 1318 * test the pages to see if they have been modified directly 1319 * by users through the VM system. 1320 */ 1321 for (i = 0; i < bp->b_npages; i++) 1322 vm_page_test_dirty(bp->b_pages[i]); 1323 1324 /* 1325 * scan forwards for the first page modified 1326 */ 1327 for (i = 0; i < bp->b_npages; i++) { 1328 if (bp->b_pages[i]->dirty) { 1329 break; 1330 } 1331 } 1332 boffset = (i << PAGE_SHIFT); 1333 if (boffset < bp->b_dirtyoff) { 1334 bp->b_dirtyoff = boffset; 1335 } 1336 1337 /* 1338 * scan backwards for the last page modified 1339 */ 1340 for (i = bp->b_npages - 1; i >= 0; --i) { 1341 if (bp->b_pages[i]->dirty) { 1342 break; 1343 } 1344 } 1345 boffset = (i + 1); 1346 offset = boffset + bp->b_pages[0]->pindex; 1347 if (offset >= object->size) 1348 boffset = object->size - bp->b_pages[0]->pindex; 1349 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1350 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1351 } 1352} 1353 1354/* 1355 * Get a block given a specified block and offset into a file/device. 1356 */ 1357struct buf * 1358getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1359{ 1360 struct buf *bp; 1361 int s; 1362 struct bufhashhdr *bh; 1363 int maxsize; 1364 int generation; 1365 1366 if (vp->v_mount) { 1367 maxsize = vp->v_mount->mnt_stat.f_iosize; 1368 /* 1369 * This happens on mount points. 1370 */ 1371 if (maxsize < size) 1372 maxsize = size; 1373 } else { 1374 maxsize = size; 1375 } 1376 1377#if !defined(MAX_PERF) 1378 if (size > MAXBSIZE) 1379 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1380#endif 1381 1382 s = splbio(); 1383loop: 1384 if (numfreebuffers < lofreebuffers) { 1385 waitfreebuffers(slpflag, slptimeo); 1386 } 1387 1388 if ((bp = gbincore(vp, blkno))) { 1389loop1: 1390 generation = bp->b_generation; 1391 if (bp->b_flags & B_BUSY) { 1392 bp->b_flags |= B_WANTED; 1393 if (bp->b_usecount < BUF_MAXUSE) 1394 ++bp->b_usecount; 1395 if (!tsleep(bp, 1396 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) { 1397 if (bp->b_generation != generation) 1398 goto loop; 1399 goto loop1; 1400 } else { 1401 splx(s); 1402 return (struct buf *) NULL; 1403 } 1404 } 1405 bp->b_flags |= B_BUSY | B_CACHE; 1406 bremfree(bp); 1407 1408 /* 1409 * check for size inconsistancies (note that they shouldn't 1410 * happen but do when filesystems don't handle the size changes 1411 * correctly.) We are conservative on metadata and don't just 1412 * extend the buffer but write and re-constitute it. 1413 */ 1414 1415 if (bp->b_bcount != size) { 1416 bp->b_generation++; 1417 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1418 allocbuf(bp, size); 1419 } else { 1420 bp->b_flags |= B_NOCACHE; 1421 VOP_BWRITE(bp); 1422 goto loop; 1423 } 1424 } 1425 1426 if (bp->b_usecount < BUF_MAXUSE) 1427 ++bp->b_usecount; 1428 splx(s); 1429 return (bp); 1430 } else { 1431 vm_object_t obj; 1432 1433 if ((bp = getnewbuf(vp, blkno, 1434 slpflag, slptimeo, size, maxsize)) == 0) { 1435 if (slpflag || slptimeo) { 1436 splx(s); 1437 return NULL; 1438 } 1439 goto loop; 1440 } 1441 1442 /* 1443 * This code is used to make sure that a buffer is not 1444 * created while the getnewbuf routine is blocked. 1445 * Normally the vnode is locked so this isn't a problem. 1446 * VBLK type I/O requests, however, don't lock the vnode. 1447 */ 1448 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1449 bp->b_flags |= B_INVAL; 1450 brelse(bp); 1451 goto loop; 1452 } 1453 1454 /* 1455 * Insert the buffer into the hash, so that it can 1456 * be found by incore. 1457 */ 1458 bp->b_blkno = bp->b_lblkno = blkno; 1459 bgetvp(vp, bp); 1460 LIST_REMOVE(bp, b_hash); 1461 bh = BUFHASH(vp, blkno); 1462 LIST_INSERT_HEAD(bh, bp, b_hash); 1463 1464 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) { 1465 bp->b_flags |= (B_VMIO | B_CACHE); 1466#if defined(VFS_BIO_DEBUG) 1467 if (vp->v_type != VREG && vp->v_type != VBLK) 1468 printf("getblk: vmioing file type %d???\n", vp->v_type); 1469#endif 1470 } else { 1471 bp->b_flags &= ~B_VMIO; 1472 } 1473 splx(s); 1474 1475 allocbuf(bp, size); 1476#ifdef PC98 1477 /* 1478 * 1024byte/sector support 1479 */ 1480#define B_XXX2 0x8000000 1481 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1482#endif 1483 return (bp); 1484 } 1485} 1486 1487/* 1488 * Get an empty, disassociated buffer of given size. 1489 */ 1490struct buf * 1491geteblk(int size) 1492{ 1493 struct buf *bp; 1494 int s; 1495 1496 s = splbio(); 1497 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1498 splx(s); 1499 allocbuf(bp, size); 1500 bp->b_flags |= B_INVAL; 1501 return (bp); 1502} 1503 1504 1505/* 1506 * This code constitutes the buffer memory from either anonymous system 1507 * memory (in the case of non-VMIO operations) or from an associated 1508 * VM object (in the case of VMIO operations). 1509 * 1510 * Note that this code is tricky, and has many complications to resolve 1511 * deadlock or inconsistant data situations. Tread lightly!!! 1512 * 1513 * Modify the length of a buffer's underlying buffer storage without 1514 * destroying information (unless, of course the buffer is shrinking). 1515 */ 1516int 1517allocbuf(struct buf * bp, int size) 1518{ 1519 1520 int s; 1521 int newbsize, mbsize; 1522 int i; 1523 1524#if !defined(MAX_PERF) 1525 if (!(bp->b_flags & B_BUSY)) 1526 panic("allocbuf: buffer not busy"); 1527 1528 if (bp->b_kvasize < size) 1529 panic("allocbuf: buffer too small"); 1530#endif 1531 1532 if ((bp->b_flags & B_VMIO) == 0) { 1533 caddr_t origbuf; 1534 int origbufsize; 1535 /* 1536 * Just get anonymous memory from the kernel 1537 */ 1538 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1539#if !defined(NO_B_MALLOC) 1540 if (bp->b_flags & B_MALLOC) 1541 newbsize = mbsize; 1542 else 1543#endif 1544 newbsize = round_page(size); 1545 1546 if (newbsize < bp->b_bufsize) { 1547#if !defined(NO_B_MALLOC) 1548 /* 1549 * malloced buffers are not shrunk 1550 */ 1551 if (bp->b_flags & B_MALLOC) { 1552 if (newbsize) { 1553 bp->b_bcount = size; 1554 } else { 1555 free(bp->b_data, M_BIOBUF); 1556 bufspace -= bp->b_bufsize; 1557 bufmallocspace -= bp->b_bufsize; 1558 bp->b_data = bp->b_kvabase; 1559 bp->b_bufsize = 0; 1560 bp->b_bcount = 0; 1561 bp->b_flags &= ~B_MALLOC; 1562 } 1563 return 1; 1564 } 1565#endif 1566 vm_hold_free_pages( 1567 bp, 1568 (vm_offset_t) bp->b_data + newbsize, 1569 (vm_offset_t) bp->b_data + bp->b_bufsize); 1570 } else if (newbsize > bp->b_bufsize) { 1571#if !defined(NO_B_MALLOC) 1572 /* 1573 * We only use malloced memory on the first allocation. 1574 * and revert to page-allocated memory when the buffer grows. 1575 */ 1576 if ( (bufmallocspace < maxbufmallocspace) && 1577 (bp->b_bufsize == 0) && 1578 (mbsize <= PAGE_SIZE/2)) { 1579 1580 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1581 bp->b_bufsize = mbsize; 1582 bp->b_bcount = size; 1583 bp->b_flags |= B_MALLOC; 1584 bufspace += mbsize; 1585 bufmallocspace += mbsize; 1586 return 1; 1587 } 1588#endif 1589 origbuf = NULL; 1590 origbufsize = 0; 1591#if !defined(NO_B_MALLOC) 1592 /* 1593 * If the buffer is growing on it's other-than-first allocation, 1594 * then we revert to the page-allocation scheme. 1595 */ 1596 if (bp->b_flags & B_MALLOC) { 1597 origbuf = bp->b_data; 1598 origbufsize = bp->b_bufsize; 1599 bp->b_data = bp->b_kvabase; 1600 bufspace -= bp->b_bufsize; 1601 bufmallocspace -= bp->b_bufsize; 1602 bp->b_bufsize = 0; 1603 bp->b_flags &= ~B_MALLOC; 1604 newbsize = round_page(newbsize); 1605 } 1606#endif 1607 vm_hold_load_pages( 1608 bp, 1609 (vm_offset_t) bp->b_data + bp->b_bufsize, 1610 (vm_offset_t) bp->b_data + newbsize); 1611#if !defined(NO_B_MALLOC) 1612 if (origbuf) { 1613 bcopy(origbuf, bp->b_data, origbufsize); 1614 free(origbuf, M_BIOBUF); 1615 } 1616#endif 1617 } 1618 } else { 1619 vm_page_t m; 1620 int desiredpages; 1621 1622 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1623 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1624 1625#if !defined(NO_B_MALLOC) 1626 if (bp->b_flags & B_MALLOC) 1627 panic("allocbuf: VMIO buffer can't be malloced"); 1628#endif 1629 1630 if (newbsize < bp->b_bufsize) { 1631 if (desiredpages < bp->b_npages) { 1632 for (i = desiredpages; i < bp->b_npages; i++) { 1633 /* 1634 * the page is not freed here -- it 1635 * is the responsibility of vnode_pager_setsize 1636 */ 1637 m = bp->b_pages[i]; 1638#if defined(DIAGNOSTIC) 1639 if (m == bogus_page) 1640 panic("allocbuf: bogus page found"); 1641#endif 1642 s = splvm(); 1643 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1644 m->flags |= PG_WANTED; 1645 tsleep(m, PVM, "biodep", 0); 1646 } 1647 splx(s); 1648 1649 bp->b_pages[i] = NULL; 1650 vm_page_unwire(m); 1651 } 1652 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1653 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1654 bp->b_npages = desiredpages; 1655 } 1656 } else if (newbsize > bp->b_bufsize) { 1657 vm_object_t obj; 1658 vm_offset_t tinc, toff; 1659 vm_ooffset_t off; 1660 vm_pindex_t objoff; 1661 int pageindex, curbpnpages; 1662 struct vnode *vp; 1663 int bsize; 1664 1665 vp = bp->b_vp; 1666 1667 if (vp->v_type == VBLK) 1668 bsize = DEV_BSIZE; 1669 else 1670 bsize = vp->v_mount->mnt_stat.f_iosize; 1671 1672 if (bp->b_npages < desiredpages) { 1673 obj = vp->v_object; 1674 tinc = PAGE_SIZE; 1675 if (tinc > bsize) 1676 tinc = bsize; 1677 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1678 curbpnpages = bp->b_npages; 1679 doretry: 1680 bp->b_flags |= B_CACHE; 1681 bp->b_validoff = bp->b_validend = 0; 1682 for (toff = 0; toff < newbsize; toff += tinc) { 1683 int bytesinpage; 1684 1685 pageindex = toff >> PAGE_SHIFT; 1686 objoff = OFF_TO_IDX(off + toff); 1687 if (pageindex < curbpnpages) { 1688 1689 m = bp->b_pages[pageindex]; 1690#ifdef VFS_BIO_DIAG 1691 if (m->pindex != objoff) 1692 panic("allocbuf: page changed offset??!!!?"); 1693#endif 1694 bytesinpage = tinc; 1695 if (tinc > (newbsize - toff)) 1696 bytesinpage = newbsize - toff; 1697 if (bp->b_flags & B_CACHE) 1698 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1699 continue; 1700 } 1701 m = vm_page_lookup(obj, objoff); 1702 if (!m) { 1703 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1704 if (!m) { 1705 VM_WAIT; 1706 vm_pageout_deficit += (desiredpages - bp->b_npages); 1707 goto doretry; 1708 } 1709 /* 1710 * Normally it is unwise to clear PG_BUSY without 1711 * PAGE_WAKEUP -- but it is okay here, as there is 1712 * no chance for blocking between here and vm_page_alloc 1713 */ 1714 m->flags &= ~PG_BUSY; 1715 vm_page_wire(m); 1716 bp->b_flags &= ~B_CACHE; 1717 } else if (m->flags & PG_BUSY) { 1718 s = splvm(); 1719 if (m->flags & PG_BUSY) { 1720 m->flags |= PG_WANTED; 1721 tsleep(m, PVM, "pgtblk", 0); 1722 } 1723 splx(s); 1724 goto doretry; 1725 } else { 1726 if ((curproc != pageproc) && 1727 ((m->queue - m->pc) == PQ_CACHE) && 1728 ((cnt.v_free_count + cnt.v_cache_count) < 1729 (cnt.v_free_min + cnt.v_cache_min))) { 1730 pagedaemon_wakeup(); 1731 } 1732 bytesinpage = tinc; 1733 if (tinc > (newbsize - toff)) 1734 bytesinpage = newbsize - toff; 1735 if (bp->b_flags & B_CACHE) 1736 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1737 vm_page_wire(m); 1738 } 1739 bp->b_pages[pageindex] = m; 1740 curbpnpages = pageindex + 1; 1741 } 1742 if (vp->v_tag == VT_NFS && 1743 vp->v_type != VBLK) { 1744 if (bp->b_dirtyend > 0) { 1745 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1746 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1747 } 1748 if (bp->b_validend == 0) 1749 bp->b_flags &= ~B_CACHE; 1750 } 1751 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1752 bp->b_npages = curbpnpages; 1753 pmap_qenter((vm_offset_t) bp->b_data, 1754 bp->b_pages, bp->b_npages); 1755 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1756 } 1757 } 1758 } 1759 if (bp->b_flags & B_VMIO) 1760 vmiospace += (newbsize - bp->b_bufsize); 1761 bufspace += (newbsize - bp->b_bufsize); 1762 bp->b_bufsize = newbsize; 1763 bp->b_bcount = size; 1764 return 1; 1765} 1766 1767/* 1768 * Wait for buffer I/O completion, returning error status. 1769 */ 1770int 1771biowait(register struct buf * bp) 1772{ 1773 int s; 1774 1775 s = splbio(); 1776 while ((bp->b_flags & B_DONE) == 0) 1777#if defined(NO_SCHEDULE_MODS) 1778 tsleep(bp, PRIBIO, "biowait", 0); 1779#else 1780 if (bp->b_flags & B_READ) 1781 tsleep(bp, PRIBIO, "biord", 0); 1782 else 1783 tsleep(bp, curproc->p_usrpri, "biowr", 0); 1784#endif 1785 splx(s); 1786 if (bp->b_flags & B_EINTR) { 1787 bp->b_flags &= ~B_EINTR; 1788 return (EINTR); 1789 } 1790 if (bp->b_flags & B_ERROR) { 1791 return (bp->b_error ? bp->b_error : EIO); 1792 } else { 1793 return (0); 1794 } 1795} 1796 1797/* 1798 * Finish I/O on a buffer, calling an optional function. 1799 * This is usually called from interrupt level, so process blocking 1800 * is not *a good idea*. 1801 */ 1802void 1803biodone(register struct buf * bp) 1804{ 1805 int s; 1806 1807 s = splbio(); 1808 1809#if !defined(MAX_PERF) 1810 if (!(bp->b_flags & B_BUSY)) 1811 panic("biodone: buffer not busy"); 1812#endif 1813 1814 if (bp->b_flags & B_DONE) { 1815 splx(s); 1816#if !defined(MAX_PERF) 1817 printf("biodone: buffer already done\n"); 1818#endif 1819 return; 1820 } 1821 bp->b_flags |= B_DONE; 1822 1823 if ((bp->b_flags & B_READ) == 0) { 1824 vwakeup(bp); 1825 } 1826#ifdef BOUNCE_BUFFERS 1827 if (bp->b_flags & B_BOUNCE) 1828 vm_bounce_free(bp); 1829#endif 1830 1831 /* call optional completion function if requested */ 1832 if (bp->b_flags & B_CALL) { 1833 bp->b_flags &= ~B_CALL; 1834 (*bp->b_iodone) (bp); 1835 splx(s); 1836 return; 1837 } 1838 if (bp->b_flags & B_VMIO) { 1839 int i, resid; 1840 vm_ooffset_t foff; 1841 vm_page_t m; 1842 vm_object_t obj; 1843 int iosize; 1844 struct vnode *vp = bp->b_vp; 1845 1846 obj = vp->v_object; 1847 1848#if defined(VFS_BIO_DEBUG) 1849 if (vp->v_usecount == 0) { 1850 panic("biodone: zero vnode ref count"); 1851 } 1852 1853 if (vp->v_object == NULL) { 1854 panic("biodone: missing VM object"); 1855 } 1856 1857 if ((vp->v_flag & VOBJBUF) == 0) { 1858 panic("biodone: vnode is not setup for merged cache"); 1859 } 1860#endif 1861 1862 if (vp->v_type == VBLK) 1863 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1864 else 1865 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1866#if !defined(MAX_PERF) 1867 if (!obj) { 1868 panic("biodone: no object"); 1869 } 1870#endif 1871#if defined(VFS_BIO_DEBUG) 1872 if (obj->paging_in_progress < bp->b_npages) { 1873 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1874 obj->paging_in_progress, bp->b_npages); 1875 } 1876#endif 1877 iosize = bp->b_bufsize; 1878 for (i = 0; i < bp->b_npages; i++) { 1879 int bogusflag = 0; 1880 m = bp->b_pages[i]; 1881 if (m == bogus_page) { 1882 bogusflag = 1; 1883 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1884 if (!m) { 1885#if defined(VFS_BIO_DEBUG) 1886 printf("biodone: page disappeared\n"); 1887#endif 1888 --obj->paging_in_progress; 1889 continue; 1890 } 1891 bp->b_pages[i] = m; 1892 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1893 } 1894#if defined(VFS_BIO_DEBUG) 1895 if (OFF_TO_IDX(foff) != m->pindex) { 1896 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1897 } 1898#endif 1899 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1900 if (resid > iosize) 1901 resid = iosize; 1902 /* 1903 * In the write case, the valid and clean bits are 1904 * already changed correctly, so we only need to do this 1905 * here in the read case. 1906 */ 1907 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1908 vfs_page_set_valid(bp, foff, i, m); 1909 } 1910 1911 /* 1912 * when debugging new filesystems or buffer I/O methods, this 1913 * is the most common error that pops up. if you see this, you 1914 * have not set the page busy flag correctly!!! 1915 */ 1916 if (m->busy == 0) { 1917#if !defined(MAX_PERF) 1918 printf("biodone: page busy < 0, " 1919 "pindex: %d, foff: 0x(%x,%x), " 1920 "resid: %d, index: %d\n", 1921 (int) m->pindex, (int)(foff >> 32), 1922 (int) foff & 0xffffffff, resid, i); 1923#endif 1924 if (vp->v_type != VBLK) 1925#if !defined(MAX_PERF) 1926 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1927 bp->b_vp->v_mount->mnt_stat.f_iosize, 1928 (int) bp->b_lblkno, 1929 bp->b_flags, bp->b_npages); 1930 else 1931 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1932 (int) bp->b_lblkno, 1933 bp->b_flags, bp->b_npages); 1934 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1935 m->valid, m->dirty, m->wire_count); 1936#endif 1937 panic("biodone: page busy < 0\n"); 1938 } 1939 --m->busy; 1940 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1941 m->flags &= ~PG_WANTED; 1942 wakeup(m); 1943 } 1944 --obj->paging_in_progress; 1945 foff += resid; 1946 iosize -= resid; 1947 } 1948 if (obj && obj->paging_in_progress == 0 && 1949 (obj->flags & OBJ_PIPWNT)) { 1950 obj->flags &= ~OBJ_PIPWNT; 1951 wakeup(obj); 1952 } 1953 } 1954 /* 1955 * For asynchronous completions, release the buffer now. The brelse 1956 * checks for B_WANTED and will do the wakeup there if necessary - so 1957 * no need to do a wakeup here in the async case. 1958 */ 1959 1960 if (bp->b_flags & B_ASYNC) { 1961 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1962 brelse(bp); 1963 else 1964 bqrelse(bp); 1965 } else { 1966 bp->b_flags &= ~B_WANTED; 1967 wakeup(bp); 1968 } 1969 splx(s); 1970} 1971 1972int 1973count_lock_queue() 1974{ 1975 int count; 1976 struct buf *bp; 1977 1978 count = 0; 1979 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1980 bp != NULL; 1981 bp = TAILQ_NEXT(bp, b_freelist)) 1982 count++; 1983 return (count); 1984} 1985 1986int vfs_update_interval = 30; 1987 1988static void 1989vfs_update() 1990{ 1991 while (1) { 1992 tsleep(&vfs_update_wakeup, PUSER, "update", 1993 hz * vfs_update_interval); 1994 vfs_update_wakeup = 0; 1995 sync(curproc, NULL); 1996 } 1997} 1998 1999static int 2000sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2001{ 2002 int error = sysctl_handle_int(oidp, 2003 oidp->oid_arg1, oidp->oid_arg2, req); 2004 if (!error) 2005 wakeup(&vfs_update_wakeup); 2006 return error; 2007} 2008 2009SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2010 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2011 2012 2013/* 2014 * This routine is called in lieu of iodone in the case of 2015 * incomplete I/O. This keeps the busy status for pages 2016 * consistant. 2017 */ 2018void 2019vfs_unbusy_pages(struct buf * bp) 2020{ 2021 int i; 2022 2023 if (bp->b_flags & B_VMIO) { 2024 struct vnode *vp = bp->b_vp; 2025 vm_object_t obj = vp->v_object; 2026 vm_ooffset_t foff; 2027 2028 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2029 2030 for (i = 0; i < bp->b_npages; i++) { 2031 vm_page_t m = bp->b_pages[i]; 2032 2033 if (m == bogus_page) { 2034 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 2035#if !defined(MAX_PERF) 2036 if (!m) { 2037 panic("vfs_unbusy_pages: page missing\n"); 2038 } 2039#endif 2040 bp->b_pages[i] = m; 2041 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2042 } 2043 --obj->paging_in_progress; 2044 --m->busy; 2045 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 2046 m->flags &= ~PG_WANTED; 2047 wakeup(m); 2048 } 2049 } 2050 if (obj->paging_in_progress == 0 && 2051 (obj->flags & OBJ_PIPWNT)) { 2052 obj->flags &= ~OBJ_PIPWNT; 2053 wakeup(obj); 2054 } 2055 } 2056} 2057 2058/* 2059 * Set NFS' b_validoff and b_validend fields from the valid bits 2060 * of a page. If the consumer is not NFS, and the page is not 2061 * valid for the entire range, clear the B_CACHE flag to force 2062 * the consumer to re-read the page. 2063 */ 2064static void 2065vfs_buf_set_valid(struct buf *bp, 2066 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2067 vm_page_t m) 2068{ 2069 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2070 vm_offset_t svalid, evalid; 2071 int validbits = m->valid; 2072 2073 /* 2074 * This only bothers with the first valid range in the 2075 * page. 2076 */ 2077 svalid = off; 2078 while (validbits && !(validbits & 1)) { 2079 svalid += DEV_BSIZE; 2080 validbits >>= 1; 2081 } 2082 evalid = svalid; 2083 while (validbits & 1) { 2084 evalid += DEV_BSIZE; 2085 validbits >>= 1; 2086 } 2087 /* 2088 * Make sure this range is contiguous with the range 2089 * built up from previous pages. If not, then we will 2090 * just use the range from the previous pages. 2091 */ 2092 if (svalid == bp->b_validend) { 2093 bp->b_validoff = min(bp->b_validoff, svalid); 2094 bp->b_validend = max(bp->b_validend, evalid); 2095 } 2096 } else if (!vm_page_is_valid(m, 2097 (vm_offset_t) ((foff + off) & PAGE_MASK), 2098 size)) { 2099 bp->b_flags &= ~B_CACHE; 2100 } 2101} 2102 2103/* 2104 * Set the valid bits in a page, taking care of the b_validoff, 2105 * b_validend fields which NFS uses to optimise small reads. Off is 2106 * the offset within the file and pageno is the page index within the buf. 2107 */ 2108static void 2109vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2110{ 2111 struct vnode *vp = bp->b_vp; 2112 vm_ooffset_t soff, eoff; 2113 2114 soff = off; 2115 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2116 vm_page_set_invalid(m, 2117 (vm_offset_t) (soff & PAGE_MASK), 2118 (vm_offset_t) (eoff - soff)); 2119 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2120 vm_ooffset_t sv, ev; 2121 off = off - pageno * PAGE_SIZE; 2122 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2123 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 2124 soff = max(sv, soff); 2125 eoff = min(ev, eoff); 2126 } 2127 if (eoff > soff) 2128 vm_page_set_validclean(m, 2129 (vm_offset_t) (soff & PAGE_MASK), 2130 (vm_offset_t) (eoff - soff)); 2131} 2132 2133/* 2134 * This routine is called before a device strategy routine. 2135 * It is used to tell the VM system that paging I/O is in 2136 * progress, and treat the pages associated with the buffer 2137 * almost as being PG_BUSY. Also the object paging_in_progress 2138 * flag is handled to make sure that the object doesn't become 2139 * inconsistant. 2140 */ 2141void 2142vfs_busy_pages(struct buf * bp, int clear_modify) 2143{ 2144 int i; 2145 2146 if (bp->b_flags & B_VMIO) { 2147 struct vnode *vp = bp->b_vp; 2148 vm_object_t obj = vp->v_object; 2149 vm_ooffset_t foff; 2150 2151 if (vp->v_type == VBLK) 2152 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2153 else 2154 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2155 vfs_setdirty(bp); 2156 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2157 vm_page_t m = bp->b_pages[i]; 2158 2159 if ((bp->b_flags & B_CLUSTER) == 0) { 2160 obj->paging_in_progress++; 2161 m->busy++; 2162 } 2163 vm_page_protect(m, VM_PROT_NONE); 2164 if (clear_modify) 2165 vfs_page_set_valid(bp, foff, i, m); 2166 else if (bp->b_bcount >= PAGE_SIZE) { 2167 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2168 bp->b_pages[i] = bogus_page; 2169 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2170 } 2171 } 2172 } 2173 } 2174} 2175 2176/* 2177 * Tell the VM system that the pages associated with this buffer 2178 * are clean. This is used for delayed writes where the data is 2179 * going to go to disk eventually without additional VM intevention. 2180 */ 2181void 2182vfs_clean_pages(struct buf * bp) 2183{ 2184 int i; 2185 2186 if (bp->b_flags & B_VMIO) { 2187 struct vnode *vp = bp->b_vp; 2188 vm_ooffset_t foff; 2189 2190 if (vp->v_type == VBLK) 2191 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2192 else 2193 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2194 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2195 vm_page_t m = bp->b_pages[i]; 2196 2197 vfs_page_set_valid(bp, foff, i, m); 2198 } 2199 } 2200} 2201 2202void 2203vfs_bio_clrbuf(struct buf *bp) { 2204 int i; 2205 if( bp->b_flags & B_VMIO) { 2206 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2207 int mask; 2208 mask = 0; 2209 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2210 mask |= (1 << (i/DEV_BSIZE)); 2211 if( bp->b_pages[0]->valid != mask) { 2212 bzero(bp->b_data, bp->b_bufsize); 2213 } 2214 bp->b_pages[0]->valid = mask; 2215 bp->b_resid = 0; 2216 return; 2217 } 2218 for(i=0;i<bp->b_npages;i++) { 2219 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2220 continue; 2221 if( bp->b_pages[i]->valid == 0) { 2222 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2223 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2224 } 2225 } else { 2226 int j; 2227 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2228 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 2229 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2230 } 2231 } 2232 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 2233 } 2234 bp->b_resid = 0; 2235 } else { 2236 clrbuf(bp); 2237 } 2238} 2239 2240/* 2241 * vm_hold_load_pages and vm_hold_unload pages get pages into 2242 * a buffers address space. The pages are anonymous and are 2243 * not associated with a file object. 2244 */ 2245void 2246vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2247{ 2248 vm_offset_t pg; 2249 vm_page_t p; 2250 int index; 2251 2252 to = round_page(to); 2253 from = round_page(from); 2254 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2255 2256 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2257 2258tryagain: 2259 2260 p = vm_page_alloc(kernel_object, 2261 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2262 VM_ALLOC_NORMAL); 2263 if (!p) { 2264 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2265 VM_WAIT; 2266 goto tryagain; 2267 } 2268 vm_page_wire(p); 2269 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2270 bp->b_pages[index] = p; 2271 PAGE_WAKEUP(p); 2272 } 2273 bp->b_npages = index; 2274} 2275 2276void 2277vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2278{ 2279 vm_offset_t pg; 2280 vm_page_t p; 2281 int index, newnpages; 2282 2283 from = round_page(from); 2284 to = round_page(to); 2285 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2286 2287 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2288 p = bp->b_pages[index]; 2289 if (p && (index < bp->b_npages)) { 2290#if !defined(MAX_PERF) 2291 if (p->busy) { 2292 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2293 bp->b_blkno, bp->b_lblkno); 2294 } 2295#endif 2296 bp->b_pages[index] = NULL; 2297 pmap_kremove(pg); 2298 vm_page_unwire(p); 2299 vm_page_free(p); 2300 } 2301 } 2302 bp->b_npages = newnpages; 2303} 2304 2305 2306#include "opt_ddb.h" 2307#ifdef DDB 2308#include <ddb/ddb.h> 2309 2310DB_SHOW_COMMAND(buffer, db_show_buffer) 2311{ 2312 /* get args */ 2313 struct buf *bp = (struct buf *)addr; 2314 2315 if (!have_addr) { 2316 db_printf("usage: show buffer <addr>\n"); 2317 return; 2318 } 2319 2320 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2321 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2322 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2323 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2324 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2325 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2326 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2327 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2328 "b_blkno = %d, b_pblkno = %d\n", 2329 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2330 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2331 if (bp->b_npages) { 2332 int i; 2333 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2334 for (i = 0; i < bp->b_npages; i++) { 2335 vm_page_t m; 2336 m = bp->b_pages[i]; 2337 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2338 VM_PAGE_TO_PHYS(m)); 2339 if ((i + 1) < bp->b_npages) 2340 db_printf(","); 2341 } 2342 db_printf("\n"); 2343 } 2344} 2345#endif /* DDB */ 2346