vfs_bio.c revision 37384
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.164 1998/05/01 15:10:59 peter Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 */ 27 28#include "opt_bounce.h" 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <miscfs/specfs/specdev.h> 41#include <vm/vm.h> 42#include <vm/vm_param.h> 43#include <vm/vm_prot.h> 44#include <vm/vm_kern.h> 45#include <vm/vm_pageout.h> 46#include <vm/vm_page.h> 47#include <vm/vm_object.h> 48#include <vm/vm_extern.h> 49#include <vm/vm_map.h> 50#include <sys/buf.h> 51#include <sys/mount.h> 52#include <sys/malloc.h> 53#include <sys/resourcevar.h> 54 55static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 56 57struct bio_ops bioops; /* I/O operation notification */ 58 59#if 0 /* replaced bu sched_sync */ 60static void vfs_update __P((void)); 61static struct proc *updateproc; 62static struct kproc_desc up_kp = { 63 "update", 64 vfs_update, 65 &updateproc 66}; 67SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 68#endif 69 70struct buf *buf; /* buffer header pool */ 71struct swqueue bswlist; 72 73static int count_lock_queue __P((void)); 74static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 75 vm_offset_t to); 76static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 77 vm_offset_t to); 78static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 79 vm_offset_t off, vm_offset_t size, 80 vm_page_t m); 81static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 82 int pageno, vm_page_t m); 83static void vfs_clean_pages(struct buf * bp); 84static void vfs_setdirty(struct buf *bp); 85static void vfs_vmio_release(struct buf *bp); 86static void flushdirtybuffers(int slpflag, int slptimeo); 87 88int needsbuffer; 89 90/* 91 * Internal update daemon, process 3 92 * The variable vfs_update_wakeup allows for internal syncs. 93 */ 94int vfs_update_wakeup; 95 96 97/* 98 * buffers base kva 99 */ 100 101/* 102 * bogus page -- for I/O to/from partially complete buffers 103 * this is a temporary solution to the problem, but it is not 104 * really that bad. it would be better to split the buffer 105 * for input in the case of buffers partially already in memory, 106 * but the code is intricate enough already. 107 */ 108vm_page_t bogus_page; 109static vm_offset_t bogus_offset; 110 111static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 112 bufmallocspace, maxbufmallocspace; 113int numdirtybuffers; 114static int lodirtybuffers, hidirtybuffers; 115static int numfreebuffers, lofreebuffers, hifreebuffers; 116static int kvafreespace; 117 118SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 119 &numdirtybuffers, 0, ""); 120SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 121 &lodirtybuffers, 0, ""); 122SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 123 &hidirtybuffers, 0, ""); 124SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 125 &numfreebuffers, 0, ""); 126SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 127 &lofreebuffers, 0, ""); 128SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 129 &hifreebuffers, 0, ""); 130SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 131 &maxbufspace, 0, ""); 132SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 133 &bufspace, 0, ""); 134SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 135 &maxvmiobufspace, 0, ""); 136SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 137 &vmiospace, 0, ""); 138SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 139 &maxbufmallocspace, 0, ""); 140SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 141 &bufmallocspace, 0, ""); 142SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 143 &kvafreespace, 0, ""); 144 145static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 146struct bqueues bufqueues[BUFFER_QUEUES] = {0}; 147 148extern int vm_swap_size; 149 150#define BUF_MAXUSE 24 151 152#define VFS_BIO_NEED_ANY 1 153#define VFS_BIO_NEED_LOWLIMIT 2 154#define VFS_BIO_NEED_FREE 4 155 156/* 157 * Initialize buffer headers and related structures. 158 */ 159void 160bufinit() 161{ 162 struct buf *bp; 163 int i; 164 165 TAILQ_INIT(&bswlist); 166 LIST_INIT(&invalhash); 167 168 /* first, make a null hash table */ 169 for (i = 0; i < BUFHSZ; i++) 170 LIST_INIT(&bufhashtbl[i]); 171 172 /* next, make a null set of free lists */ 173 for (i = 0; i < BUFFER_QUEUES; i++) 174 TAILQ_INIT(&bufqueues[i]); 175 176 /* finally, initialize each buffer header and stick on empty q */ 177 for (i = 0; i < nbuf; i++) { 178 bp = &buf[i]; 179 bzero(bp, sizeof *bp); 180 bp->b_flags = B_INVAL; /* we're just an empty header */ 181 bp->b_dev = NODEV; 182 bp->b_rcred = NOCRED; 183 bp->b_wcred = NOCRED; 184 bp->b_qindex = QUEUE_EMPTY; 185 bp->b_vnbufs.le_next = NOLIST; 186 LIST_INIT(&bp->b_dep); 187 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 188 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 189 } 190/* 191 * maxbufspace is currently calculated to support all filesystem blocks 192 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 193 * cache is still the same as it would be for 8K filesystems. This 194 * keeps the size of the buffer cache "in check" for big block filesystems. 195 */ 196 maxbufspace = (nbuf + 8) * DFLTBSIZE; 197/* 198 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 199 */ 200 maxvmiobufspace = 2 * maxbufspace / 3; 201/* 202 * Limit the amount of malloc memory since it is wired permanently into 203 * the kernel space. Even though this is accounted for in the buffer 204 * allocation, we don't want the malloced region to grow uncontrolled. 205 * The malloc scheme improves memory utilization significantly on average 206 * (small) directories. 207 */ 208 maxbufmallocspace = maxbufspace / 20; 209 210/* 211 * Remove the probability of deadlock conditions by limiting the 212 * number of dirty buffers. 213 */ 214 hidirtybuffers = nbuf / 8 + 20; 215 lodirtybuffers = nbuf / 16 + 10; 216 numdirtybuffers = 0; 217 lofreebuffers = nbuf / 18 + 5; 218 hifreebuffers = 2 * lofreebuffers; 219 numfreebuffers = nbuf; 220 kvafreespace = 0; 221 222 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 223 bogus_page = vm_page_alloc(kernel_object, 224 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 225 VM_ALLOC_NORMAL); 226 227} 228 229/* 230 * Free the kva allocation for a buffer 231 * Must be called only at splbio or higher, 232 * as this is the only locking for buffer_map. 233 */ 234static void 235bfreekva(struct buf * bp) 236{ 237 if (bp->b_kvasize == 0) 238 return; 239 240 vm_map_delete(buffer_map, 241 (vm_offset_t) bp->b_kvabase, 242 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 243 244 bp->b_kvasize = 0; 245 246} 247 248/* 249 * remove the buffer from the appropriate free list 250 */ 251void 252bremfree(struct buf * bp) 253{ 254 int s = splbio(); 255 256 if (bp->b_qindex != QUEUE_NONE) { 257 if (bp->b_qindex == QUEUE_EMPTY) { 258 kvafreespace -= bp->b_kvasize; 259 } 260 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 261 bp->b_qindex = QUEUE_NONE; 262 } else { 263#if !defined(MAX_PERF) 264 panic("bremfree: removing a buffer when not on a queue"); 265#endif 266 } 267 if ((bp->b_flags & B_INVAL) || 268 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 269 --numfreebuffers; 270 splx(s); 271} 272 273 274/* 275 * Get a buffer with the specified data. Look in the cache first. 276 */ 277int 278bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 279 struct buf ** bpp) 280{ 281 struct buf *bp; 282 283 bp = getblk(vp, blkno, size, 0, 0); 284 *bpp = bp; 285 286 /* if not found in cache, do some I/O */ 287 if ((bp->b_flags & B_CACHE) == 0) { 288 if (curproc != NULL) 289 curproc->p_stats->p_ru.ru_inblock++; 290 bp->b_flags |= B_READ; 291 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 292 if (bp->b_rcred == NOCRED) { 293 if (cred != NOCRED) 294 crhold(cred); 295 bp->b_rcred = cred; 296 } 297 vfs_busy_pages(bp, 0); 298 VOP_STRATEGY(vp, bp); 299 return (biowait(bp)); 300 } 301 return (0); 302} 303 304/* 305 * Operates like bread, but also starts asynchronous I/O on 306 * read-ahead blocks. 307 */ 308int 309breadn(struct vnode * vp, daddr_t blkno, int size, 310 daddr_t * rablkno, int *rabsize, 311 int cnt, struct ucred * cred, struct buf ** bpp) 312{ 313 struct buf *bp, *rabp; 314 int i; 315 int rv = 0, readwait = 0; 316 317 *bpp = bp = getblk(vp, blkno, size, 0, 0); 318 319 /* if not found in cache, do some I/O */ 320 if ((bp->b_flags & B_CACHE) == 0) { 321 if (curproc != NULL) 322 curproc->p_stats->p_ru.ru_inblock++; 323 bp->b_flags |= B_READ; 324 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 325 if (bp->b_rcred == NOCRED) { 326 if (cred != NOCRED) 327 crhold(cred); 328 bp->b_rcred = cred; 329 } 330 vfs_busy_pages(bp, 0); 331 VOP_STRATEGY(vp, bp); 332 ++readwait; 333 } 334 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 335 if (inmem(vp, *rablkno)) 336 continue; 337 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 338 339 if ((rabp->b_flags & B_CACHE) == 0) { 340 if (curproc != NULL) 341 curproc->p_stats->p_ru.ru_inblock++; 342 rabp->b_flags |= B_READ | B_ASYNC; 343 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 344 if (rabp->b_rcred == NOCRED) { 345 if (cred != NOCRED) 346 crhold(cred); 347 rabp->b_rcred = cred; 348 } 349 vfs_busy_pages(rabp, 0); 350 VOP_STRATEGY(vp, rabp); 351 } else { 352 brelse(rabp); 353 } 354 } 355 356 if (readwait) { 357 rv = biowait(bp); 358 } 359 return (rv); 360} 361 362/* 363 * Write, release buffer on completion. (Done by iodone 364 * if async.) 365 */ 366int 367bwrite(struct buf * bp) 368{ 369 int oldflags; 370 struct vnode *vp; 371 struct mount *mp; 372 373 374 if (bp->b_flags & B_INVAL) { 375 brelse(bp); 376 return (0); 377 } 378 379 oldflags = bp->b_flags; 380 381#if !defined(MAX_PERF) 382 if ((bp->b_flags & B_BUSY) == 0) 383 panic("bwrite: buffer is not busy???"); 384#endif 385 386 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 387 bp->b_flags |= B_WRITEINPROG; 388 389 if ((oldflags & B_DELWRI) == B_DELWRI) { 390 --numdirtybuffers; 391 reassignbuf(bp, bp->b_vp); 392 } 393 394 bp->b_vp->v_numoutput++; 395 vfs_busy_pages(bp, 1); 396 if (curproc != NULL) 397 curproc->p_stats->p_ru.ru_oublock++; 398 VOP_STRATEGY(bp->b_vp, bp); 399 400 /* 401 * Collect statistics on synchronous and asynchronous writes. 402 * Writes to block devices are charged to their associated 403 * filesystem (if any). 404 */ 405 if ((vp = bp->b_vp) != NULL) { 406 if (vp->v_type == VBLK) 407 mp = vp->v_specmountpoint; 408 else 409 mp = vp->v_mount; 410 if (mp != NULL) 411 if ((oldflags & B_ASYNC) == 0) 412 mp->mnt_stat.f_syncwrites++; 413 else 414 mp->mnt_stat.f_asyncwrites++; 415 } 416 417 if ((oldflags & B_ASYNC) == 0) { 418 int rtval = biowait(bp); 419 brelse(bp); 420 return (rtval); 421 } 422 return (0); 423} 424 425__inline void 426vfs_bio_need_satisfy(void) { 427 ++numfreebuffers; 428 if (!needsbuffer) 429 return; 430 if (numdirtybuffers < lodirtybuffers) { 431 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 432 } else { 433 needsbuffer &= ~VFS_BIO_NEED_ANY; 434 } 435 if (numfreebuffers >= hifreebuffers) { 436 needsbuffer &= ~VFS_BIO_NEED_FREE; 437 } 438 wakeup(&needsbuffer); 439} 440 441/* 442 * Delayed write. (Buffer is marked dirty). 443 */ 444void 445bdwrite(struct buf * bp) 446{ 447 int s; 448 struct vnode *vp; 449 450#if !defined(MAX_PERF) 451 if ((bp->b_flags & B_BUSY) == 0) { 452 panic("bdwrite: buffer is not busy"); 453 } 454#endif 455 456 if (bp->b_flags & B_INVAL) { 457 brelse(bp); 458 return; 459 } 460 if (bp->b_flags & B_TAPE) { 461 bawrite(bp); 462 return; 463 } 464 bp->b_flags &= ~(B_READ|B_RELBUF); 465 if ((bp->b_flags & B_DELWRI) == 0) { 466 bp->b_flags |= B_DONE | B_DELWRI; 467 reassignbuf(bp, bp->b_vp); 468 ++numdirtybuffers; 469 } 470 471 /* 472 * This bmap keeps the system from needing to do the bmap later, 473 * perhaps when the system is attempting to do a sync. Since it 474 * is likely that the indirect block -- or whatever other datastructure 475 * that the filesystem needs is still in memory now, it is a good 476 * thing to do this. Note also, that if the pageout daemon is 477 * requesting a sync -- there might not be enough memory to do 478 * the bmap then... So, this is important to do. 479 */ 480 if (bp->b_lblkno == bp->b_blkno) { 481 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 482 } 483 484 /* 485 * Set the *dirty* buffer range based upon the VM system dirty pages. 486 */ 487 vfs_setdirty(bp); 488 489 /* 490 * We need to do this here to satisfy the vnode_pager and the 491 * pageout daemon, so that it thinks that the pages have been 492 * "cleaned". Note that since the pages are in a delayed write 493 * buffer -- the VFS layer "will" see that the pages get written 494 * out on the next sync, or perhaps the cluster will be completed. 495 */ 496 vfs_clean_pages(bp); 497 bqrelse(bp); 498 499 /* 500 * XXX The soft dependency code is not prepared to 501 * have I/O done when a bdwrite is requested. For 502 * now we just let the write be delayed if it is 503 * requested by the soft dependency code. 504 */ 505 if ((vp = bp->b_vp) && 506 (vp->v_type == VBLK && vp->v_specmountpoint && 507 (vp->v_specmountpoint->mnt_flag & MNT_SOFTDEP)) || 508 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP))) 509 return; 510 511 if (numdirtybuffers >= hidirtybuffers) 512 flushdirtybuffers(0, 0); 513 514 return; 515} 516 517 518/* 519 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 520 * Check how this compares with vfs_setdirty(); XXX [JRE] 521 */ 522void 523bdirty(bp) 524 struct buf *bp; 525{ 526 int s; 527 528 bp->b_flags &= ~(B_READ|B_RELBUF); /* XXX ??? check this */ 529 if ((bp->b_flags & B_DELWRI) == 0) { 530 bp->b_flags |= B_DONE | B_DELWRI; /* why done? XXX JRE */ 531 reassignbuf(bp, bp->b_vp); 532 ++numdirtybuffers; 533 } 534} 535 536/* 537 * Asynchronous write. 538 * Start output on a buffer, but do not wait for it to complete. 539 * The buffer is released when the output completes. 540 */ 541void 542bawrite(struct buf * bp) 543{ 544 bp->b_flags |= B_ASYNC; 545 (void) VOP_BWRITE(bp); 546} 547 548/* 549 * Ordered write. 550 * Start output on a buffer, but only wait for it to complete if the 551 * output device cannot guarantee ordering in some other way. Devices 552 * that can perform asynchronous ordered writes will set the B_ASYNC 553 * flag in their strategy routine. 554 * The buffer is released when the output completes. 555 */ 556int 557bowrite(struct buf * bp) 558{ 559 /* 560 * XXX Add in B_ASYNC once the SCSI 561 * layer can deal with ordered 562 * writes properly. 563 */ 564 bp->b_flags |= B_ORDERED; 565 return (VOP_BWRITE(bp)); 566} 567 568/* 569 * Release a buffer. 570 */ 571void 572brelse(struct buf * bp) 573{ 574 int s; 575 576 if (bp->b_flags & B_CLUSTER) { 577 relpbuf(bp); 578 return; 579 } 580 581 s = splbio(); 582 583 /* anyone need this block? */ 584 if (bp->b_flags & B_WANTED) { 585 bp->b_flags &= ~(B_WANTED | B_AGE); 586 wakeup(bp); 587 } 588 589 if (bp->b_flags & B_LOCKED) 590 bp->b_flags &= ~B_ERROR; 591 592 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 593 (bp->b_bufsize <= 0)) { 594 bp->b_flags |= B_INVAL; 595 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 596 (*bioops.io_deallocate)(bp); 597 if (bp->b_flags & B_DELWRI) 598 --numdirtybuffers; 599 bp->b_flags &= ~(B_DELWRI | B_CACHE); 600 if ((bp->b_flags & B_VMIO) == 0) { 601 if (bp->b_bufsize) 602 allocbuf(bp, 0); 603 if (bp->b_vp) 604 brelvp(bp); 605 } 606 } 607 608 /* 609 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 610 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 611 * but the VM object is kept around. The B_NOCACHE flag is used to 612 * invalidate the pages in the VM object. 613 * 614 * If the buffer is a partially filled NFS buffer, keep it 615 * since invalidating it now will lose informatio. The valid 616 * flags in the vm_pages have only DEV_BSIZE resolution but 617 * the b_validoff, b_validend fields have byte resolution. 618 * This can avoid unnecessary re-reads of the buffer. 619 * XXX this seems to cause performance problems. 620 */ 621 if ((bp->b_flags & B_VMIO) 622 && !(bp->b_vp->v_tag == VT_NFS && 623 bp->b_vp->v_type != VBLK && 624 (bp->b_flags & B_DELWRI) != 0) 625#ifdef notdef 626 && (bp->b_vp->v_tag != VT_NFS 627 || bp->b_vp->v_type == VBLK 628 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 629 || bp->b_validend == 0 630 || (bp->b_validoff == 0 631 && bp->b_validend == bp->b_bufsize)) 632#endif 633 ) { 634 635 int i, j, resid; 636 vm_page_t m; 637 off_t foff; 638 vm_pindex_t poff; 639 vm_object_t obj; 640 struct vnode *vp; 641 642 vp = bp->b_vp; 643 644 resid = bp->b_bufsize; 645 foff = bp->b_offset; 646 647 for (i = 0; i < bp->b_npages; i++) { 648 m = bp->b_pages[i]; 649 m->flags &= ~PG_ZERO; 650 if (m == bogus_page) { 651 652 obj = (vm_object_t) vp->v_object; 653 poff = OFF_TO_IDX(bp->b_offset); 654 655 for (j = i; j < bp->b_npages; j++) { 656 m = bp->b_pages[j]; 657 if (m == bogus_page) { 658 m = vm_page_lookup(obj, poff + j); 659#if !defined(MAX_PERF) 660 if (!m) { 661 panic("brelse: page missing\n"); 662 } 663#endif 664 bp->b_pages[j] = m; 665 } 666 } 667 668 if ((bp->b_flags & B_INVAL) == 0) { 669 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 670 } 671 } 672 if (bp->b_flags & (B_NOCACHE|B_ERROR)) { 673 int poffset = foff & PAGE_MASK; 674 int presid = resid > (PAGE_SIZE - poffset) ? 675 (PAGE_SIZE - poffset) : resid; 676 vm_page_set_invalid(m, poffset, presid); 677 } 678 resid -= PAGE_SIZE; 679 } 680 681 if (bp->b_flags & (B_INVAL | B_RELBUF)) 682 vfs_vmio_release(bp); 683 684 } else if (bp->b_flags & B_VMIO) { 685 686 if (bp->b_flags & (B_INVAL | B_RELBUF)) 687 vfs_vmio_release(bp); 688 689 } 690 691#if !defined(MAX_PERF) 692 if (bp->b_qindex != QUEUE_NONE) 693 panic("brelse: free buffer onto another queue???"); 694#endif 695 696 /* enqueue */ 697 /* buffers with no memory */ 698 if (bp->b_bufsize == 0) { 699 bp->b_flags |= B_INVAL; 700 bp->b_qindex = QUEUE_EMPTY; 701 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 702 LIST_REMOVE(bp, b_hash); 703 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 704 bp->b_dev = NODEV; 705 kvafreespace += bp->b_kvasize; 706 707 /* buffers with junk contents */ 708 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 709 bp->b_flags |= B_INVAL; 710 bp->b_qindex = QUEUE_AGE; 711 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 712 LIST_REMOVE(bp, b_hash); 713 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 714 bp->b_dev = NODEV; 715 716 /* buffers that are locked */ 717 } else if (bp->b_flags & B_LOCKED) { 718 bp->b_qindex = QUEUE_LOCKED; 719 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 720 721 /* buffers with stale but valid contents */ 722 } else if (bp->b_flags & B_AGE) { 723 bp->b_qindex = QUEUE_AGE; 724 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 725 726 /* buffers with valid and quite potentially reuseable contents */ 727 } else { 728 bp->b_qindex = QUEUE_LRU; 729 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 730 } 731 732 if ((bp->b_flags & B_INVAL) || 733 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 734 if (bp->b_flags & B_DELWRI) { 735 --numdirtybuffers; 736 bp->b_flags &= ~B_DELWRI; 737 } 738 vfs_bio_need_satisfy(); 739 } 740 741 /* unlock */ 742 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 743 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 744 splx(s); 745} 746 747/* 748 * Release a buffer. 749 */ 750void 751bqrelse(struct buf * bp) 752{ 753 int s; 754 755 s = splbio(); 756 757 /* anyone need this block? */ 758 if (bp->b_flags & B_WANTED) { 759 bp->b_flags &= ~(B_WANTED | B_AGE); 760 wakeup(bp); 761 } 762 763#if !defined(MAX_PERF) 764 if (bp->b_qindex != QUEUE_NONE) 765 panic("bqrelse: free buffer onto another queue???"); 766#endif 767 768 if (bp->b_flags & B_LOCKED) { 769 bp->b_flags &= ~B_ERROR; 770 bp->b_qindex = QUEUE_LOCKED; 771 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 772 /* buffers with stale but valid contents */ 773 } else { 774 bp->b_qindex = QUEUE_LRU; 775 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 776 } 777 778 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 779 vfs_bio_need_satisfy(); 780 } 781 782 /* unlock */ 783 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 784 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 785 splx(s); 786} 787 788static void 789vfs_vmio_release(bp) 790 struct buf *bp; 791{ 792 int i; 793 vm_page_t m; 794 795 for (i = 0; i < bp->b_npages; i++) { 796 m = bp->b_pages[i]; 797 bp->b_pages[i] = NULL; 798 vm_page_unwire(m); 799 800 /* 801 * We don't mess with busy pages, it is 802 * the responsibility of the process that 803 * busied the pages to deal with them. 804 */ 805 if ((m->flags & PG_BUSY) || (m->busy != 0)) 806 continue; 807 808 if (m->wire_count == 0) { 809 810 /* 811 * If this is an async free -- we cannot place 812 * pages onto the cache queue. If it is an 813 * async free, then we don't modify any queues. 814 * This is probably in error (for perf reasons), 815 * and we will eventually need to build 816 * a more complete infrastructure to support I/O 817 * rundown. 818 */ 819 if ((bp->b_flags & B_ASYNC) == 0) { 820 821 /* 822 * In the case of sync buffer frees, we can do pretty much 823 * anything to any of the memory queues. Specifically, 824 * the cache queue is okay to be modified. 825 */ 826 if (m->valid) { 827 if(m->dirty == 0) 828 vm_page_test_dirty(m); 829 /* 830 * this keeps pressure off of the process memory 831 */ 832 if (m->dirty == 0 && m->hold_count == 0) 833 vm_page_cache(m); 834 else 835 vm_page_deactivate(m); 836 m->flags &= ~PG_ZERO; 837 } else if (m->hold_count == 0) { 838 m->flags |= PG_BUSY; 839 vm_page_protect(m, VM_PROT_NONE); 840 vm_page_free(m); 841 } 842 } else { 843 /* 844 * If async, then at least we clear the 845 * act_count. 846 */ 847 m->act_count = 0; 848 m->flags &= ~PG_ZERO; 849 } 850 } 851 } 852 bufspace -= bp->b_bufsize; 853 vmiospace -= bp->b_bufsize; 854 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 855 bp->b_npages = 0; 856 bp->b_bufsize = 0; 857 bp->b_flags &= ~B_VMIO; 858 if (bp->b_vp) 859 brelvp(bp); 860} 861 862/* 863 * Check to see if a block is currently memory resident. 864 */ 865struct buf * 866gbincore(struct vnode * vp, daddr_t blkno) 867{ 868 struct buf *bp; 869 struct bufhashhdr *bh; 870 871 bh = BUFHASH(vp, blkno); 872 bp = bh->lh_first; 873 874 /* Search hash chain */ 875 while (bp != NULL) { 876 /* hit */ 877 if (bp->b_vp == vp && bp->b_lblkno == blkno && 878 (bp->b_flags & B_INVAL) == 0) { 879 break; 880 } 881 bp = bp->b_hash.le_next; 882 } 883 return (bp); 884} 885 886/* 887 * this routine implements clustered async writes for 888 * clearing out B_DELWRI buffers... This is much better 889 * than the old way of writing only one buffer at a time. 890 */ 891int 892vfs_bio_awrite(struct buf * bp) 893{ 894 int i; 895 daddr_t lblkno = bp->b_lblkno; 896 struct vnode *vp = bp->b_vp; 897 int s; 898 int ncl; 899 struct buf *bpa; 900 int nwritten; 901 int size; 902 int maxcl; 903 904 s = splbio(); 905 /* 906 * right now we support clustered writing only to regular files 907 */ 908 if ((vp->v_type == VREG) && 909 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 910 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 911 912 size = vp->v_mount->mnt_stat.f_iosize; 913 maxcl = MAXPHYS / size; 914 915 for (i = 1; i < maxcl; i++) { 916 if ((bpa = gbincore(vp, lblkno + i)) && 917 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 918 (B_DELWRI | B_CLUSTEROK)) && 919 (bpa->b_bufsize == size)) { 920 if ((bpa->b_blkno == bpa->b_lblkno) || 921 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 922 break; 923 } else { 924 break; 925 } 926 } 927 ncl = i; 928 /* 929 * this is a possible cluster write 930 */ 931 if (ncl != 1) { 932 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 933 splx(s); 934 return nwritten; 935 } 936 } 937 938 bremfree(bp); 939 bp->b_flags |= B_BUSY | B_ASYNC; 940 941 splx(s); 942 /* 943 * default (old) behavior, writing out only one block 944 */ 945 nwritten = bp->b_bufsize; 946 (void) VOP_BWRITE(bp); 947 return nwritten; 948} 949 950 951/* 952 * Find a buffer header which is available for use. 953 */ 954static struct buf * 955getnewbuf(struct vnode *vp, daddr_t blkno, 956 int slpflag, int slptimeo, int size, int maxsize) 957{ 958 struct buf *bp, *bp1; 959 int nbyteswritten = 0; 960 vm_offset_t addr; 961 static int writerecursion = 0; 962 963start: 964 if (bufspace >= maxbufspace) 965 goto trytofreespace; 966 967 /* can we constitute a new buffer? */ 968 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 969#if !defined(MAX_PERF) 970 if (bp->b_qindex != QUEUE_EMPTY) 971 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 972 bp->b_qindex); 973#endif 974 bp->b_flags |= B_BUSY; 975 bremfree(bp); 976 goto fillbuf; 977 } 978trytofreespace: 979 /* 980 * We keep the file I/O from hogging metadata I/O 981 * This is desirable because file data is cached in the 982 * VM/Buffer cache even if a buffer is freed. 983 */ 984 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 985#if !defined(MAX_PERF) 986 if (bp->b_qindex != QUEUE_AGE) 987 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 988 bp->b_qindex); 989#endif 990 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 991#if !defined(MAX_PERF) 992 if (bp->b_qindex != QUEUE_LRU) 993 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 994 bp->b_qindex); 995#endif 996 } 997 if (!bp) { 998 /* wait for a free buffer of any kind */ 999 needsbuffer |= VFS_BIO_NEED_ANY; 1000 do 1001 tsleep(&needsbuffer, (PRIBIO + 4) | slpflag, "newbuf", 1002 slptimeo); 1003 while (needsbuffer & VFS_BIO_NEED_ANY); 1004 return (0); 1005 } 1006 1007#if defined(DIAGNOSTIC) 1008 if (bp->b_flags & B_BUSY) { 1009 panic("getnewbuf: busy buffer on free list\n"); 1010 } 1011#endif 1012 1013 /* 1014 * We are fairly aggressive about freeing VMIO buffers, but since 1015 * the buffering is intact without buffer headers, there is not 1016 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 1017 */ 1018 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 1019 if ((bp->b_flags & B_VMIO) == 0 || 1020 (vmiospace < maxvmiobufspace)) { 1021 --bp->b_usecount; 1022 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1023 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1024 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1025 goto start; 1026 } 1027 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1028 } 1029 } 1030 1031 1032 /* if we are a delayed write, convert to an async write */ 1033 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1034 1035 /* 1036 * If our delayed write is likely to be used soon, then 1037 * recycle back onto the LRU queue. 1038 */ 1039 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 1040 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 1041 1042 if (bp->b_usecount > 0) { 1043 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 1044 1045 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1046 1047 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1048 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1049 bp->b_usecount--; 1050 goto start; 1051 } 1052 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1053 } 1054 } 1055 } 1056 1057 /* 1058 * Certain layered filesystems can recursively re-enter the vfs_bio 1059 * code, due to delayed writes. This helps keep the system from 1060 * deadlocking. 1061 */ 1062 if (writerecursion > 0) { 1063 if (writerecursion > 5) { 1064 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1065 while (bp) { 1066 if ((bp->b_flags & B_DELWRI) == 0) 1067 break; 1068 bp = TAILQ_NEXT(bp, b_freelist); 1069 } 1070 if (bp == NULL) { 1071 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1072 while (bp) { 1073 if ((bp->b_flags & B_DELWRI) == 0) 1074 break; 1075 bp = TAILQ_NEXT(bp, b_freelist); 1076 } 1077 } 1078 if (bp == NULL) 1079 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1080 } else { 1081 bremfree(bp); 1082 bp->b_flags |= B_BUSY | B_AGE | B_ASYNC; 1083 nbyteswritten += bp->b_bufsize; 1084 ++writerecursion; 1085 VOP_BWRITE(bp); 1086 --writerecursion; 1087 if (!slpflag && !slptimeo) { 1088 return (0); 1089 } 1090 goto start; 1091 } 1092 } else { 1093 ++writerecursion; 1094 nbyteswritten += vfs_bio_awrite(bp); 1095 --writerecursion; 1096 if (!slpflag && !slptimeo) { 1097 return (0); 1098 } 1099 goto start; 1100 } 1101 } 1102 1103 if (bp->b_flags & B_WANTED) { 1104 bp->b_flags &= ~B_WANTED; 1105 wakeup(bp); 1106 } 1107 bremfree(bp); 1108 bp->b_flags |= B_BUSY; 1109 1110 if (bp->b_flags & B_VMIO) { 1111 bp->b_flags &= ~B_ASYNC; 1112 vfs_vmio_release(bp); 1113 } 1114 1115 if (bp->b_vp) 1116 brelvp(bp); 1117 1118fillbuf: 1119 1120 /* we are not free, nor do we contain interesting data */ 1121 if (bp->b_rcred != NOCRED) { 1122 crfree(bp->b_rcred); 1123 bp->b_rcred = NOCRED; 1124 } 1125 if (bp->b_wcred != NOCRED) { 1126 crfree(bp->b_wcred); 1127 bp->b_wcred = NOCRED; 1128 } 1129 if (LIST_FIRST(&bp->b_dep) != NULL && 1130 bioops.io_deallocate) 1131 (*bioops.io_deallocate)(bp); 1132 1133 LIST_REMOVE(bp, b_hash); 1134 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1135 if (bp->b_bufsize) { 1136 allocbuf(bp, 0); 1137 } 1138 bp->b_flags = B_BUSY; 1139 bp->b_dev = NODEV; 1140 bp->b_vp = NULL; 1141 bp->b_blkno = bp->b_lblkno = 0; 1142 bp->b_offset = NOOFFSET; 1143 bp->b_iodone = 0; 1144 bp->b_error = 0; 1145 bp->b_resid = 0; 1146 bp->b_bcount = 0; 1147 bp->b_npages = 0; 1148 bp->b_dirtyoff = bp->b_dirtyend = 0; 1149 bp->b_validoff = bp->b_validend = 0; 1150 bp->b_usecount = 5; 1151 /* Here, not kern_physio.c, is where this should be done*/ 1152 LIST_INIT(&bp->b_dep); 1153 1154 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1155 1156 /* 1157 * we assume that buffer_map is not at address 0 1158 */ 1159 addr = 0; 1160 if (maxsize != bp->b_kvasize) { 1161 bfreekva(bp); 1162 1163findkvaspace: 1164 /* 1165 * See if we have buffer kva space 1166 */ 1167 if (vm_map_findspace(buffer_map, 1168 vm_map_min(buffer_map), maxsize, &addr)) { 1169 if (kvafreespace > 0) { 1170 int totfree = 0, freed; 1171 do { 1172 freed = 0; 1173 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1174 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1175 if (bp1->b_kvasize != 0) { 1176 totfree += bp1->b_kvasize; 1177 freed = bp1->b_kvasize; 1178 bremfree(bp1); 1179 bfreekva(bp1); 1180 brelse(bp1); 1181 break; 1182 } 1183 } 1184 } while (freed); 1185 /* 1186 * if we found free space, then retry with the same buffer. 1187 */ 1188 if (totfree) 1189 goto findkvaspace; 1190 } 1191 bp->b_flags |= B_INVAL; 1192 brelse(bp); 1193 goto trytofreespace; 1194 } 1195 } 1196 1197 /* 1198 * See if we are below are allocated minimum 1199 */ 1200 if (bufspace >= (maxbufspace + nbyteswritten)) { 1201 bp->b_flags |= B_INVAL; 1202 brelse(bp); 1203 goto trytofreespace; 1204 } 1205 1206 /* 1207 * create a map entry for the buffer -- in essence 1208 * reserving the kva space. 1209 */ 1210 if (addr) { 1211 vm_map_insert(buffer_map, NULL, 0, 1212 addr, addr + maxsize, 1213 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1214 1215 bp->b_kvabase = (caddr_t) addr; 1216 bp->b_kvasize = maxsize; 1217 } 1218 bp->b_data = bp->b_kvabase; 1219 1220 return (bp); 1221} 1222 1223static void 1224waitfreebuffers(int slpflag, int slptimeo) { 1225 while (numfreebuffers < hifreebuffers) { 1226 flushdirtybuffers(slpflag, slptimeo); 1227 if (numfreebuffers < hifreebuffers) 1228 break; 1229 needsbuffer |= VFS_BIO_NEED_FREE; 1230 if (tsleep(&needsbuffer, (PRIBIO + 4)|slpflag, "biofre", slptimeo)) 1231 break; 1232 } 1233} 1234 1235static void 1236flushdirtybuffers(int slpflag, int slptimeo) { 1237 int s; 1238 static pid_t flushing = 0; 1239 1240 s = splbio(); 1241 1242 if (flushing) { 1243 if (flushing == curproc->p_pid) { 1244 splx(s); 1245 return; 1246 } 1247 while (flushing) { 1248 if (tsleep(&flushing, (PRIBIO + 4)|slpflag, "biofls", slptimeo)) { 1249 splx(s); 1250 return; 1251 } 1252 } 1253 } 1254 flushing = curproc->p_pid; 1255 1256 while (numdirtybuffers > lodirtybuffers) { 1257 struct buf *bp; 1258 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1259 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1260 if (bp == NULL) 1261 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1262 1263 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1264 bp = TAILQ_NEXT(bp, b_freelist); 1265 } 1266 1267 if (bp) { 1268 vfs_bio_awrite(bp); 1269 continue; 1270 } 1271 break; 1272 } 1273 1274 flushing = 0; 1275 wakeup(&flushing); 1276 splx(s); 1277} 1278 1279/* 1280 * Check to see if a block is currently memory resident. 1281 */ 1282struct buf * 1283incore(struct vnode * vp, daddr_t blkno) 1284{ 1285 struct buf *bp; 1286 1287 int s = splbio(); 1288 bp = gbincore(vp, blkno); 1289 splx(s); 1290 return (bp); 1291} 1292 1293/* 1294 * Returns true if no I/O is needed to access the 1295 * associated VM object. This is like incore except 1296 * it also hunts around in the VM system for the data. 1297 */ 1298 1299int 1300inmem(struct vnode * vp, daddr_t blkno) 1301{ 1302 vm_object_t obj; 1303 vm_offset_t toff, tinc; 1304 vm_page_t m; 1305 vm_ooffset_t off; 1306 1307 if (incore(vp, blkno)) 1308 return 1; 1309 if (vp->v_mount == NULL) 1310 return 0; 1311 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1312 return 0; 1313 1314 obj = vp->v_object; 1315 tinc = PAGE_SIZE; 1316 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1317 tinc = vp->v_mount->mnt_stat.f_iosize; 1318 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1319 1320 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1321 1322 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1323 if (!m) 1324 return 0; 1325 if (vm_page_is_valid(m, 1326 (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0) 1327 return 0; 1328 } 1329 return 1; 1330} 1331 1332/* 1333 * now we set the dirty range for the buffer -- 1334 * for NFS -- if the file is mapped and pages have 1335 * been written to, let it know. We want the 1336 * entire range of the buffer to be marked dirty if 1337 * any of the pages have been written to for consistancy 1338 * with the b_validoff, b_validend set in the nfs write 1339 * code, and used by the nfs read code. 1340 */ 1341static void 1342vfs_setdirty(struct buf *bp) { 1343 int i; 1344 vm_object_t object; 1345 vm_offset_t boffset, offset; 1346 /* 1347 * We qualify the scan for modified pages on whether the 1348 * object has been flushed yet. The OBJ_WRITEABLE flag 1349 * is not cleared simply by protecting pages off. 1350 */ 1351 if ((bp->b_flags & B_VMIO) && 1352 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1353 /* 1354 * test the pages to see if they have been modified directly 1355 * by users through the VM system. 1356 */ 1357 for (i = 0; i < bp->b_npages; i++) { 1358 bp->b_pages[i]->flags &= ~PG_ZERO; 1359 vm_page_test_dirty(bp->b_pages[i]); 1360 } 1361 1362 /* 1363 * scan forwards for the first page modified 1364 */ 1365 for (i = 0; i < bp->b_npages; i++) { 1366 if (bp->b_pages[i]->dirty) { 1367 break; 1368 } 1369 } 1370 boffset = (i << PAGE_SHIFT); 1371 if (boffset < bp->b_dirtyoff) { 1372 bp->b_dirtyoff = boffset; 1373 } 1374 1375 /* 1376 * scan backwards for the last page modified 1377 */ 1378 for (i = bp->b_npages - 1; i >= 0; --i) { 1379 if (bp->b_pages[i]->dirty) { 1380 break; 1381 } 1382 } 1383 boffset = (i + 1); 1384 offset = boffset + bp->b_pages[0]->pindex; 1385 if (offset >= object->size) 1386 boffset = object->size - bp->b_pages[0]->pindex; 1387 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1388 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1389 } 1390} 1391 1392/* 1393 * Get a block given a specified block and offset into a file/device. 1394 */ 1395struct buf * 1396getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1397{ 1398 struct buf *bp; 1399 int i, s; 1400 struct bufhashhdr *bh; 1401 int maxsize; 1402 int generation; 1403 int checksize; 1404 1405 if (vp->v_mount) { 1406 maxsize = vp->v_mount->mnt_stat.f_iosize; 1407 /* 1408 * This happens on mount points. 1409 */ 1410 if (maxsize < size) 1411 maxsize = size; 1412 } else { 1413 maxsize = size; 1414 } 1415 1416#if !defined(MAX_PERF) 1417 if (size > MAXBSIZE) 1418 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1419#endif 1420 1421 s = splbio(); 1422loop: 1423 if (numfreebuffers < lofreebuffers) { 1424 waitfreebuffers(slpflag, slptimeo); 1425 } 1426 1427 if ((bp = gbincore(vp, blkno))) { 1428loop1: 1429 if (bp->b_flags & B_BUSY) { 1430 1431 bp->b_flags |= B_WANTED; 1432 if (bp->b_usecount < BUF_MAXUSE) 1433 ++bp->b_usecount; 1434 1435 if (!tsleep(bp, 1436 (PRIBIO + 4) | slpflag, "getblk", slptimeo)) { 1437 goto loop; 1438 } 1439 1440 splx(s); 1441 return (struct buf *) NULL; 1442 } 1443 bp->b_flags |= B_BUSY | B_CACHE; 1444 bremfree(bp); 1445 1446 /* 1447 * check for size inconsistancies (note that they shouldn't 1448 * happen but do when filesystems don't handle the size changes 1449 * correctly.) We are conservative on metadata and don't just 1450 * extend the buffer but write (if needed) and re-constitute it. 1451 */ 1452 1453 if (bp->b_bcount != size) { 1454 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1455 allocbuf(bp, size); 1456 } else { 1457 if (bp->b_flags & B_DELWRI) { 1458 bp->b_flags |= B_NOCACHE; 1459 VOP_BWRITE(bp); 1460 } else { 1461 if (bp->b_flags & B_VMIO) { 1462 bp->b_flags |= B_RELBUF; 1463 brelse(bp); 1464 } else { 1465 bp->b_flags |= B_NOCACHE; 1466 VOP_BWRITE(bp); 1467 } 1468 } 1469 goto loop; 1470 } 1471 } 1472 1473#ifdef DIAGNOSTIC 1474 if (bp->b_offset == NOOFFSET) 1475 panic("getblk: no buffer offset"); 1476#endif 1477 1478 /* 1479 * Check that the constituted buffer really deserves for the 1480 * B_CACHE bit to be set. B_VMIO type buffers might not 1481 * contain fully valid pages. Normal (old-style) buffers 1482 * should be fully valid. 1483 */ 1484 if (bp->b_flags & B_VMIO) { 1485 checksize = bp->b_bufsize; 1486 for (i = 0; i < bp->b_npages; i++) { 1487 int resid; 1488 int poffset; 1489 poffset = bp->b_offset & PAGE_MASK; 1490 resid = (checksize > (PAGE_SIZE - poffset)) ? 1491 (PAGE_SIZE - poffset) : checksize; 1492 if (!vm_page_is_valid(bp->b_pages[i], poffset, resid)) { 1493 bp->b_flags &= ~(B_CACHE | B_DONE); 1494 break; 1495 } 1496 checksize -= resid; 1497 } 1498 } 1499 1500 if (bp->b_usecount < BUF_MAXUSE) 1501 ++bp->b_usecount; 1502 splx(s); 1503 return (bp); 1504 } else { 1505 vm_object_t obj; 1506 1507 if ((bp = getnewbuf(vp, blkno, 1508 slpflag, slptimeo, size, maxsize)) == 0) { 1509 if (slpflag || slptimeo) { 1510 splx(s); 1511 return NULL; 1512 } 1513 goto loop; 1514 } 1515 1516 /* 1517 * This code is used to make sure that a buffer is not 1518 * created while the getnewbuf routine is blocked. 1519 * Normally the vnode is locked so this isn't a problem. 1520 * VBLK type I/O requests, however, don't lock the vnode. 1521 */ 1522 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1523 bp->b_flags |= B_INVAL; 1524 brelse(bp); 1525 goto loop; 1526 } 1527 1528 /* 1529 * Insert the buffer into the hash, so that it can 1530 * be found by incore. 1531 */ 1532 bp->b_blkno = bp->b_lblkno = blkno; 1533 1534 if (vp->v_type != VBLK) 1535 bp->b_offset = (off_t) blkno * maxsize; 1536 else 1537 bp->b_offset = (off_t) blkno * DEV_BSIZE; 1538 1539 bgetvp(vp, bp); 1540 LIST_REMOVE(bp, b_hash); 1541 bh = BUFHASH(vp, blkno); 1542 LIST_INSERT_HEAD(bh, bp, b_hash); 1543 1544 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) { 1545 bp->b_flags |= (B_VMIO | B_CACHE); 1546#if defined(VFS_BIO_DEBUG) 1547 if (vp->v_type != VREG && vp->v_type != VBLK) 1548 printf("getblk: vmioing file type %d???\n", vp->v_type); 1549#endif 1550 } else { 1551 bp->b_flags &= ~B_VMIO; 1552 } 1553 1554 allocbuf(bp, size); 1555 1556 splx(s); 1557 return (bp); 1558 } 1559} 1560 1561/* 1562 * Get an empty, disassociated buffer of given size. 1563 */ 1564struct buf * 1565geteblk(int size) 1566{ 1567 struct buf *bp; 1568 int s; 1569 1570 s = splbio(); 1571 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1572 splx(s); 1573 allocbuf(bp, size); 1574 bp->b_flags |= B_INVAL; 1575 return (bp); 1576} 1577 1578 1579/* 1580 * This code constitutes the buffer memory from either anonymous system 1581 * memory (in the case of non-VMIO operations) or from an associated 1582 * VM object (in the case of VMIO operations). 1583 * 1584 * Note that this code is tricky, and has many complications to resolve 1585 * deadlock or inconsistant data situations. Tread lightly!!! 1586 * 1587 * Modify the length of a buffer's underlying buffer storage without 1588 * destroying information (unless, of course the buffer is shrinking). 1589 */ 1590int 1591allocbuf(struct buf * bp, int size) 1592{ 1593 1594 int s; 1595 int newbsize, mbsize; 1596 int i; 1597 1598#if !defined(MAX_PERF) 1599 if (!(bp->b_flags & B_BUSY)) 1600 panic("allocbuf: buffer not busy"); 1601 1602 if (bp->b_kvasize < size) 1603 panic("allocbuf: buffer too small"); 1604#endif 1605 1606 if ((bp->b_flags & B_VMIO) == 0) { 1607 caddr_t origbuf; 1608 int origbufsize; 1609 /* 1610 * Just get anonymous memory from the kernel 1611 */ 1612 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1613#if !defined(NO_B_MALLOC) 1614 if (bp->b_flags & B_MALLOC) 1615 newbsize = mbsize; 1616 else 1617#endif 1618 newbsize = round_page(size); 1619 1620 if (newbsize < bp->b_bufsize) { 1621#if !defined(NO_B_MALLOC) 1622 /* 1623 * malloced buffers are not shrunk 1624 */ 1625 if (bp->b_flags & B_MALLOC) { 1626 if (newbsize) { 1627 bp->b_bcount = size; 1628 } else { 1629 free(bp->b_data, M_BIOBUF); 1630 bufspace -= bp->b_bufsize; 1631 bufmallocspace -= bp->b_bufsize; 1632 bp->b_data = bp->b_kvabase; 1633 bp->b_bufsize = 0; 1634 bp->b_bcount = 0; 1635 bp->b_flags &= ~B_MALLOC; 1636 } 1637 return 1; 1638 } 1639#endif 1640 vm_hold_free_pages( 1641 bp, 1642 (vm_offset_t) bp->b_data + newbsize, 1643 (vm_offset_t) bp->b_data + bp->b_bufsize); 1644 } else if (newbsize > bp->b_bufsize) { 1645#if !defined(NO_B_MALLOC) 1646 /* 1647 * We only use malloced memory on the first allocation. 1648 * and revert to page-allocated memory when the buffer grows. 1649 */ 1650 if ( (bufmallocspace < maxbufmallocspace) && 1651 (bp->b_bufsize == 0) && 1652 (mbsize <= PAGE_SIZE/2)) { 1653 1654 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1655 bp->b_bufsize = mbsize; 1656 bp->b_bcount = size; 1657 bp->b_flags |= B_MALLOC; 1658 bufspace += mbsize; 1659 bufmallocspace += mbsize; 1660 return 1; 1661 } 1662#endif 1663 origbuf = NULL; 1664 origbufsize = 0; 1665#if !defined(NO_B_MALLOC) 1666 /* 1667 * If the buffer is growing on its other-than-first allocation, 1668 * then we revert to the page-allocation scheme. 1669 */ 1670 if (bp->b_flags & B_MALLOC) { 1671 origbuf = bp->b_data; 1672 origbufsize = bp->b_bufsize; 1673 bp->b_data = bp->b_kvabase; 1674 bufspace -= bp->b_bufsize; 1675 bufmallocspace -= bp->b_bufsize; 1676 bp->b_bufsize = 0; 1677 bp->b_flags &= ~B_MALLOC; 1678 newbsize = round_page(newbsize); 1679 } 1680#endif 1681 vm_hold_load_pages( 1682 bp, 1683 (vm_offset_t) bp->b_data + bp->b_bufsize, 1684 (vm_offset_t) bp->b_data + newbsize); 1685#if !defined(NO_B_MALLOC) 1686 if (origbuf) { 1687 bcopy(origbuf, bp->b_data, origbufsize); 1688 free(origbuf, M_BIOBUF); 1689 } 1690#endif 1691 } 1692 } else { 1693 vm_page_t m; 1694 int desiredpages; 1695 1696 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1697 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1698 1699#if !defined(NO_B_MALLOC) 1700 if (bp->b_flags & B_MALLOC) 1701 panic("allocbuf: VMIO buffer can't be malloced"); 1702#endif 1703 1704 if (newbsize < bp->b_bufsize) { 1705 if (desiredpages < bp->b_npages) { 1706 for (i = desiredpages; i < bp->b_npages; i++) { 1707 /* 1708 * the page is not freed here -- it 1709 * is the responsibility of vnode_pager_setsize 1710 */ 1711 m = bp->b_pages[i]; 1712#if defined(DIAGNOSTIC) 1713 if (m == bogus_page) 1714 panic("allocbuf: bogus page found"); 1715#endif 1716 vm_page_sleep(m, "biodep", &m->busy); 1717 1718 bp->b_pages[i] = NULL; 1719 vm_page_unwire(m); 1720 } 1721 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1722 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1723 bp->b_npages = desiredpages; 1724 } 1725 } else if (newbsize > bp->b_bufsize) { 1726 vm_object_t obj; 1727 vm_offset_t tinc, toff; 1728 vm_ooffset_t off; 1729 vm_pindex_t objoff; 1730 int pageindex, curbpnpages; 1731 struct vnode *vp; 1732 int bsize; 1733 int orig_validoff = bp->b_validoff; 1734 int orig_validend = bp->b_validend; 1735 1736 vp = bp->b_vp; 1737 1738 if (vp->v_type == VBLK) 1739 bsize = DEV_BSIZE; 1740 else 1741 bsize = vp->v_mount->mnt_stat.f_iosize; 1742 1743 if (bp->b_npages < desiredpages) { 1744 obj = vp->v_object; 1745 tinc = PAGE_SIZE; 1746 if (tinc > bsize) 1747 tinc = bsize; 1748 1749 off = bp->b_offset; 1750#ifdef DIAGNOSTIC 1751 if (bp->b_offset == NOOFFSET) 1752 panic("allocbuf: no buffer offset"); 1753#endif 1754 1755 curbpnpages = bp->b_npages; 1756 doretry: 1757 bp->b_validoff = orig_validoff; 1758 bp->b_validend = orig_validend; 1759 bp->b_flags |= B_CACHE; 1760 for (toff = 0; toff < newbsize; toff += tinc) { 1761 int bytesinpage; 1762 1763 pageindex = toff >> PAGE_SHIFT; 1764 objoff = OFF_TO_IDX(off + toff); 1765 if (pageindex < curbpnpages) { 1766 1767 m = bp->b_pages[pageindex]; 1768#ifdef VFS_BIO_DIAG 1769 if (m->pindex != objoff) 1770 panic("allocbuf: page changed offset??!!!?"); 1771#endif 1772 bytesinpage = tinc; 1773 if (tinc > (newbsize - toff)) 1774 bytesinpage = newbsize - toff; 1775 if (bp->b_flags & B_CACHE) 1776 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1777 continue; 1778 } 1779 m = vm_page_lookup(obj, objoff); 1780 if (!m) { 1781 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1782 if (!m) { 1783 VM_WAIT; 1784 vm_pageout_deficit += (desiredpages - bp->b_npages); 1785 goto doretry; 1786 } 1787 1788 vm_page_wire(m); 1789 m->flags &= ~PG_BUSY; 1790 bp->b_flags &= ~B_CACHE; 1791 1792 } else if (m->flags & PG_BUSY) { 1793 s = splvm(); 1794 if (m->flags & PG_BUSY) { 1795 m->flags |= PG_WANTED; 1796 tsleep(m, PVM, "pgtblk", 0); 1797 } 1798 splx(s); 1799 goto doretry; 1800 } else { 1801 if ((curproc != pageproc) && 1802 ((m->queue - m->pc) == PQ_CACHE) && 1803 ((cnt.v_free_count + cnt.v_cache_count) < 1804 (cnt.v_free_min + cnt.v_cache_min))) { 1805 pagedaemon_wakeup(); 1806 } 1807 bytesinpage = tinc; 1808 if (tinc > (newbsize - toff)) 1809 bytesinpage = newbsize - toff; 1810 if (bp->b_flags & B_CACHE) 1811 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1812 m->flags &= ~PG_ZERO; 1813 vm_page_wire(m); 1814 } 1815 bp->b_pages[pageindex] = m; 1816 curbpnpages = pageindex + 1; 1817 } 1818 if (vp->v_tag == VT_NFS && 1819 vp->v_type != VBLK) { 1820 if (bp->b_dirtyend > 0) { 1821 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1822 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1823 } 1824 if (bp->b_validend == 0) 1825 bp->b_flags &= ~B_CACHE; 1826 } 1827 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1828 bp->b_npages = curbpnpages; 1829 pmap_qenter((vm_offset_t) bp->b_data, 1830 bp->b_pages, bp->b_npages); 1831 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1832 } 1833 } 1834 } 1835 if (bp->b_flags & B_VMIO) 1836 vmiospace += (newbsize - bp->b_bufsize); 1837 bufspace += (newbsize - bp->b_bufsize); 1838 bp->b_bufsize = newbsize; 1839 bp->b_bcount = size; 1840 return 1; 1841} 1842 1843/* 1844 * Wait for buffer I/O completion, returning error status. 1845 */ 1846int 1847biowait(register struct buf * bp) 1848{ 1849 int s; 1850 1851 s = splbio(); 1852 while ((bp->b_flags & B_DONE) == 0) 1853#if defined(NO_SCHEDULE_MODS) 1854 tsleep(bp, PRIBIO, "biowait", 0); 1855#else 1856 if (bp->b_flags & B_READ) 1857 tsleep(bp, PRIBIO, "biord", 0); 1858 else 1859 tsleep(bp, PRIBIO, "biowr", 0); 1860#endif 1861 splx(s); 1862 if (bp->b_flags & B_EINTR) { 1863 bp->b_flags &= ~B_EINTR; 1864 return (EINTR); 1865 } 1866 if (bp->b_flags & B_ERROR) { 1867 return (bp->b_error ? bp->b_error : EIO); 1868 } else { 1869 return (0); 1870 } 1871} 1872 1873/* 1874 * Finish I/O on a buffer, calling an optional function. 1875 * This is usually called from interrupt level, so process blocking 1876 * is not *a good idea*. 1877 */ 1878void 1879biodone(register struct buf * bp) 1880{ 1881 int s; 1882 1883 s = splbio(); 1884 1885#if !defined(MAX_PERF) 1886 if (!(bp->b_flags & B_BUSY)) 1887 panic("biodone: buffer not busy"); 1888#endif 1889 1890 if (bp->b_flags & B_DONE) { 1891 splx(s); 1892#if !defined(MAX_PERF) 1893 printf("biodone: buffer already done\n"); 1894#endif 1895 return; 1896 } 1897 bp->b_flags |= B_DONE; 1898 1899 if ((bp->b_flags & B_READ) == 0) { 1900 vwakeup(bp); 1901 } 1902 1903#ifdef BOUNCE_BUFFERS 1904 if (bp->b_flags & B_BOUNCE) { 1905 vm_bounce_free(bp); 1906 } 1907#endif 1908 1909 /* call optional completion function if requested */ 1910 if (bp->b_flags & B_CALL) { 1911 bp->b_flags &= ~B_CALL; 1912 (*bp->b_iodone) (bp); 1913 splx(s); 1914 return; 1915 } 1916 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1917 (*bioops.io_complete)(bp); 1918 1919 if (bp->b_flags & B_VMIO) { 1920 int i, resid; 1921 vm_ooffset_t foff; 1922 vm_page_t m; 1923 vm_object_t obj; 1924 int iosize; 1925 struct vnode *vp = bp->b_vp; 1926 1927 obj = vp->v_object; 1928 1929#if defined(VFS_BIO_DEBUG) 1930 if (vp->v_usecount == 0) { 1931 panic("biodone: zero vnode ref count"); 1932 } 1933 1934 if (vp->v_object == NULL) { 1935 panic("biodone: missing VM object"); 1936 } 1937 1938 if ((vp->v_flag & VOBJBUF) == 0) { 1939 panic("biodone: vnode is not setup for merged cache"); 1940 } 1941#endif 1942 1943 foff = bp->b_offset; 1944#ifdef DIAGNOSTIC 1945 if (bp->b_offset == NOOFFSET) 1946 panic("biodone: no buffer offset"); 1947#endif 1948 1949#if !defined(MAX_PERF) 1950 if (!obj) { 1951 panic("biodone: no object"); 1952 } 1953#endif 1954#if defined(VFS_BIO_DEBUG) 1955 if (obj->paging_in_progress < bp->b_npages) { 1956 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1957 obj->paging_in_progress, bp->b_npages); 1958 } 1959#endif 1960 iosize = bp->b_bufsize; 1961 for (i = 0; i < bp->b_npages; i++) { 1962 int bogusflag = 0; 1963 m = bp->b_pages[i]; 1964 if (m == bogus_page) { 1965 bogusflag = 1; 1966 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1967 if (!m) { 1968#if defined(VFS_BIO_DEBUG) 1969 printf("biodone: page disappeared\n"); 1970#endif 1971 --obj->paging_in_progress; 1972 continue; 1973 } 1974 bp->b_pages[i] = m; 1975 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1976 } 1977#if defined(VFS_BIO_DEBUG) 1978 if (OFF_TO_IDX(foff) != m->pindex) { 1979 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1980 } 1981#endif 1982 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1983 if (resid > iosize) 1984 resid = iosize; 1985 1986 /* 1987 * In the write case, the valid and clean bits are 1988 * already changed correctly, so we only need to do this 1989 * here in the read case. 1990 */ 1991 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1992 vfs_page_set_valid(bp, foff, i, m); 1993 } 1994 m->flags &= ~PG_ZERO; 1995 1996 /* 1997 * when debugging new filesystems or buffer I/O methods, this 1998 * is the most common error that pops up. if you see this, you 1999 * have not set the page busy flag correctly!!! 2000 */ 2001 if (m->busy == 0) { 2002#if !defined(MAX_PERF) 2003 printf("biodone: page busy < 0, " 2004 "pindex: %d, foff: 0x(%x,%x), " 2005 "resid: %d, index: %d\n", 2006 (int) m->pindex, (int)(foff >> 32), 2007 (int) foff & 0xffffffff, resid, i); 2008#endif 2009 if (vp->v_type != VBLK) 2010#if !defined(MAX_PERF) 2011 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 2012 bp->b_vp->v_mount->mnt_stat.f_iosize, 2013 (int) bp->b_lblkno, 2014 bp->b_flags, bp->b_npages); 2015 else 2016 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 2017 (int) bp->b_lblkno, 2018 bp->b_flags, bp->b_npages); 2019 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 2020 m->valid, m->dirty, m->wire_count); 2021#endif 2022 panic("biodone: page busy < 0\n"); 2023 } 2024 PAGE_BWAKEUP(m); 2025 --obj->paging_in_progress; 2026 foff += resid; 2027 iosize -= resid; 2028 } 2029 if (obj && 2030 (obj->paging_in_progress == 0) && 2031 (obj->flags & OBJ_PIPWNT)) { 2032 obj->flags &= ~OBJ_PIPWNT; 2033 wakeup(obj); 2034 } 2035 } 2036 /* 2037 * For asynchronous completions, release the buffer now. The brelse 2038 * checks for B_WANTED and will do the wakeup there if necessary - so 2039 * no need to do a wakeup here in the async case. 2040 */ 2041 2042 if (bp->b_flags & B_ASYNC) { 2043 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 2044 brelse(bp); 2045 else 2046 bqrelse(bp); 2047 } else { 2048 bp->b_flags &= ~B_WANTED; 2049 wakeup(bp); 2050 } 2051 splx(s); 2052} 2053 2054static int 2055count_lock_queue() 2056{ 2057 int count; 2058 struct buf *bp; 2059 2060 count = 0; 2061 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 2062 bp != NULL; 2063 bp = TAILQ_NEXT(bp, b_freelist)) 2064 count++; 2065 return (count); 2066} 2067 2068#if 0 /* not with kirks code */ 2069static int vfs_update_interval = 30; 2070 2071static void 2072vfs_update() 2073{ 2074 while (1) { 2075 tsleep(&vfs_update_wakeup, PUSER, "update", 2076 hz * vfs_update_interval); 2077 vfs_update_wakeup = 0; 2078 sync(curproc, NULL); 2079 } 2080} 2081 2082static int 2083sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2084{ 2085 int error = sysctl_handle_int(oidp, 2086 oidp->oid_arg1, oidp->oid_arg2, req); 2087 if (!error) 2088 wakeup(&vfs_update_wakeup); 2089 return error; 2090} 2091 2092SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2093 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2094 2095#endif 2096 2097 2098/* 2099 * This routine is called in lieu of iodone in the case of 2100 * incomplete I/O. This keeps the busy status for pages 2101 * consistant. 2102 */ 2103void 2104vfs_unbusy_pages(struct buf * bp) 2105{ 2106 int i; 2107 2108 if (bp->b_flags & B_VMIO) { 2109 struct vnode *vp = bp->b_vp; 2110 vm_object_t obj = vp->v_object; 2111 2112 for (i = 0; i < bp->b_npages; i++) { 2113 vm_page_t m = bp->b_pages[i]; 2114 2115 if (m == bogus_page) { 2116 m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i); 2117#if !defined(MAX_PERF) 2118 if (!m) { 2119 panic("vfs_unbusy_pages: page missing\n"); 2120 } 2121#endif 2122 bp->b_pages[i] = m; 2123 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2124 } 2125 --obj->paging_in_progress; 2126 m->flags &= ~PG_ZERO; 2127 PAGE_BWAKEUP(m); 2128 } 2129 if (obj->paging_in_progress == 0 && 2130 (obj->flags & OBJ_PIPWNT)) { 2131 obj->flags &= ~OBJ_PIPWNT; 2132 wakeup(obj); 2133 } 2134 } 2135} 2136 2137/* 2138 * Set NFS' b_validoff and b_validend fields from the valid bits 2139 * of a page. If the consumer is not NFS, and the page is not 2140 * valid for the entire range, clear the B_CACHE flag to force 2141 * the consumer to re-read the page. 2142 */ 2143static void 2144vfs_buf_set_valid(struct buf *bp, 2145 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2146 vm_page_t m) 2147{ 2148 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2149 vm_offset_t svalid, evalid; 2150 int validbits = m->valid; 2151 2152 /* 2153 * This only bothers with the first valid range in the 2154 * page. 2155 */ 2156 svalid = off; 2157 while (validbits && !(validbits & 1)) { 2158 svalid += DEV_BSIZE; 2159 validbits >>= 1; 2160 } 2161 evalid = svalid; 2162 while (validbits & 1) { 2163 evalid += DEV_BSIZE; 2164 validbits >>= 1; 2165 } 2166 /* 2167 * Make sure this range is contiguous with the range 2168 * built up from previous pages. If not, then we will 2169 * just use the range from the previous pages. 2170 */ 2171 if (svalid == bp->b_validend) { 2172 bp->b_validoff = min(bp->b_validoff, svalid); 2173 bp->b_validend = max(bp->b_validend, evalid); 2174 } 2175 } else if (!vm_page_is_valid(m, 2176 (vm_offset_t) ((foff + off) & PAGE_MASK), 2177 size)) { 2178 bp->b_flags &= ~B_CACHE; 2179 } 2180} 2181 2182/* 2183 * Set the valid bits in a page, taking care of the b_validoff, 2184 * b_validend fields which NFS uses to optimise small reads. Off is 2185 * the offset within the file and pageno is the page index within the buf. 2186 */ 2187static void 2188vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2189{ 2190 struct vnode *vp = bp->b_vp; 2191 vm_ooffset_t soff, eoff; 2192 2193 soff = off; 2194 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2195 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2196 vm_ooffset_t sv, ev; 2197 vm_page_set_invalid(m, 2198 (vm_offset_t) (soff & PAGE_MASK), 2199 (vm_offset_t) (eoff - soff)); 2200 off = off - pageno * PAGE_SIZE; 2201 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2202 ev = off + ((bp->b_validend + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2203 soff = qmax(sv, soff); 2204 eoff = qmin(ev, eoff); 2205 } 2206 if (eoff > soff) 2207 vm_page_set_validclean(m, 2208 (vm_offset_t) (soff & PAGE_MASK), 2209 (vm_offset_t) (eoff - soff)); 2210} 2211 2212/* 2213 * This routine is called before a device strategy routine. 2214 * It is used to tell the VM system that paging I/O is in 2215 * progress, and treat the pages associated with the buffer 2216 * almost as being PG_BUSY. Also the object paging_in_progress 2217 * flag is handled to make sure that the object doesn't become 2218 * inconsistant. 2219 */ 2220void 2221vfs_busy_pages(struct buf * bp, int clear_modify) 2222{ 2223 int i,s; 2224 2225 if (bp->b_flags & B_VMIO) { 2226 struct vnode *vp = bp->b_vp; 2227 vm_object_t obj = vp->v_object; 2228 vm_ooffset_t foff; 2229 2230 foff = bp->b_offset; 2231#ifdef DIAGNOSTIC 2232 if (bp->b_offset == NOOFFSET) 2233 panic("vfs_busy_pages: no buffer offset"); 2234#endif 2235 2236 vfs_setdirty(bp); 2237 2238retry: 2239 for (i = 0; i < bp->b_npages; i++) { 2240 vm_page_t m = bp->b_pages[i]; 2241 if (vm_page_sleep(m, "vbpage", NULL)) 2242 goto retry; 2243 } 2244 2245 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2246 vm_page_t m = bp->b_pages[i]; 2247 2248 m->flags &= ~PG_ZERO; 2249 if ((bp->b_flags & B_CLUSTER) == 0) { 2250 obj->paging_in_progress++; 2251 m->busy++; 2252 } 2253 2254 vm_page_protect(m, VM_PROT_NONE); 2255 if (clear_modify) 2256 vfs_page_set_valid(bp, foff, i, m); 2257 else if (bp->b_bcount >= PAGE_SIZE) { 2258 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2259 bp->b_pages[i] = bogus_page; 2260 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2261 } 2262 } 2263 } 2264 } 2265} 2266 2267/* 2268 * Tell the VM system that the pages associated with this buffer 2269 * are clean. This is used for delayed writes where the data is 2270 * going to go to disk eventually without additional VM intevention. 2271 */ 2272void 2273vfs_clean_pages(struct buf * bp) 2274{ 2275 int i; 2276 2277 if (bp->b_flags & B_VMIO) { 2278 struct vnode *vp = bp->b_vp; 2279 vm_ooffset_t foff; 2280 foff = bp->b_offset; 2281 2282#ifdef DIAGNOSTIC 2283 if (bp->b_offset == NOOFFSET) 2284 panic("vfs_clean_pages: no buffer offset"); 2285#endif 2286 2287 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2288 vm_page_t m = bp->b_pages[i]; 2289 vfs_page_set_valid(bp, foff, i, m); 2290 } 2291 } 2292} 2293 2294void 2295vfs_bio_clrbuf(struct buf *bp) { 2296 int i; 2297 if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) { 2298 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2299 int mask; 2300 mask = 0; 2301 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2302 mask |= (1 << (i/DEV_BSIZE)); 2303 if(((bp->b_pages[0]->flags & PG_ZERO) == 0) && 2304 (bp->b_pages[0]->valid != mask)) { 2305 bzero(bp->b_data, bp->b_bufsize); 2306 } 2307 bp->b_pages[0]->valid = mask; 2308 bp->b_resid = 0; 2309 return; 2310 } 2311 for(i=0;i<bp->b_npages;i++) { 2312 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2313 continue; 2314 if( bp->b_pages[i]->valid == 0) { 2315 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2316 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2317 } 2318 } else { 2319 int j; 2320 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2321 if (((bp->b_pages[i]->flags & PG_ZERO) == 0) && 2322 (bp->b_pages[i]->valid & (1<<j)) == 0) 2323 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2324 } 2325 } 2326 bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; 2327 bp->b_pages[i]->flags &= ~PG_ZERO; 2328 } 2329 bp->b_resid = 0; 2330 } else { 2331 clrbuf(bp); 2332 } 2333} 2334 2335/* 2336 * vm_hold_load_pages and vm_hold_unload pages get pages into 2337 * a buffers address space. The pages are anonymous and are 2338 * not associated with a file object. 2339 */ 2340void 2341vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2342{ 2343 vm_offset_t pg; 2344 vm_page_t p; 2345 int index; 2346 2347 to = round_page(to); 2348 from = round_page(from); 2349 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2350 2351 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2352 2353tryagain: 2354 2355 p = vm_page_alloc(kernel_object, 2356 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2357 VM_ALLOC_NORMAL); 2358 if (!p) { 2359 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2360 VM_WAIT; 2361 goto tryagain; 2362 } 2363 vm_page_wire(p); 2364 p->valid = VM_PAGE_BITS_ALL; 2365 p->flags &= ~PG_ZERO; 2366 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2367 bp->b_pages[index] = p; 2368 PAGE_WAKEUP(p); 2369 } 2370 bp->b_npages = index; 2371} 2372 2373void 2374vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2375{ 2376 vm_offset_t pg; 2377 vm_page_t p; 2378 int index, newnpages; 2379 2380 from = round_page(from); 2381 to = round_page(to); 2382 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2383 2384 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2385 p = bp->b_pages[index]; 2386 if (p && (index < bp->b_npages)) { 2387#if !defined(MAX_PERF) 2388 if (p->busy) { 2389 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2390 bp->b_blkno, bp->b_lblkno); 2391 } 2392#endif 2393 bp->b_pages[index] = NULL; 2394 pmap_kremove(pg); 2395 p->flags |= PG_BUSY; 2396 vm_page_unwire(p); 2397 vm_page_free(p); 2398 } 2399 } 2400 bp->b_npages = newnpages; 2401} 2402 2403 2404#include "opt_ddb.h" 2405#ifdef DDB 2406#include <ddb/ddb.h> 2407 2408DB_SHOW_COMMAND(buffer, db_show_buffer) 2409{ 2410 /* get args */ 2411 struct buf *bp = (struct buf *)addr; 2412 2413 if (!have_addr) { 2414 db_printf("usage: show buffer <addr>\n"); 2415 return; 2416 } 2417 2418 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2419 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2420 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2421 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2422 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2423 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2424 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2425 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2426 "b_blkno = %d, b_pblkno = %d\n", 2427 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2428 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2429 if (bp->b_npages) { 2430 int i; 2431 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2432 for (i = 0; i < bp->b_npages; i++) { 2433 vm_page_t m; 2434 m = bp->b_pages[i]; 2435 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2436 VM_PAGE_TO_PHYS(m)); 2437 if ((i + 1) < bp->b_npages) 2438 db_printf(","); 2439 } 2440 db_printf("\n"); 2441 } 2442} 2443#endif /* DDB */ 2444