vfs_bio.c revision 33181
1/* 2 * Copyright (c) 1994,1997 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Absolutely no warranty of function or purpose is made by the author 12 * John S. Dyson. 13 * 14 * $Id: vfs_bio.c,v 1.149 1998/02/06 12:13:29 eivind Exp $ 15 */ 16 17/* 18 * this file contains a new buffer I/O scheme implementing a coherent 19 * VM object and buffer cache scheme. Pains have been taken to make 20 * sure that the performance degradation associated with schemes such 21 * as this is not realized. 22 * 23 * Author: John S. Dyson 24 * Significant help during the development and debugging phases 25 * had been provided by David Greenman, also of the FreeBSD core team. 26 */ 27 28#include "opt_bounce.h" 29 30#define VMIO 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/sysproto.h> 34#include <sys/kernel.h> 35#include <sys/sysctl.h> 36#include <sys/proc.h> 37#include <sys/vnode.h> 38#include <sys/vmmeter.h> 39#include <sys/lock.h> 40#include <vm/vm.h> 41#include <vm/vm_param.h> 42#include <vm/vm_prot.h> 43#include <vm/vm_kern.h> 44#include <vm/vm_pageout.h> 45#include <vm/vm_page.h> 46#include <vm/vm_object.h> 47#include <vm/vm_extern.h> 48#include <vm/vm_map.h> 49#include <sys/buf.h> 50#include <sys/mount.h> 51#include <sys/malloc.h> 52#include <sys/resourcevar.h> 53 54static MALLOC_DEFINE(M_BIOBUF, "BIO buffer", "BIO buffer"); 55 56static void vfs_update __P((void)); 57static struct proc *updateproc; 58static struct kproc_desc up_kp = { 59 "update", 60 vfs_update, 61 &updateproc 62}; 63SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 64 65struct buf *buf; /* buffer header pool */ 66struct swqueue bswlist; 67 68static int count_lock_queue __P((void)); 69static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 70 vm_offset_t to); 71static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 72 vm_offset_t to); 73static void vfs_buf_set_valid(struct buf *bp, vm_ooffset_t foff, 74 vm_offset_t off, vm_offset_t size, 75 vm_page_t m); 76static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, 77 int pageno, vm_page_t m); 78static void vfs_clean_pages(struct buf * bp); 79static void vfs_setdirty(struct buf *bp); 80static void vfs_vmio_release(struct buf *bp); 81static void flushdirtybuffers(int slpflag, int slptimeo); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95 96/* 97 * bogus page -- for I/O to/from partially complete buffers 98 * this is a temporary solution to the problem, but it is not 99 * really that bad. it would be better to split the buffer 100 * for input in the case of buffers partially already in memory, 101 * but the code is intricate enough already. 102 */ 103vm_page_t bogus_page; 104static vm_offset_t bogus_offset; 105 106static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 107 bufmallocspace, maxbufmallocspace; 108int numdirtybuffers; 109static int lodirtybuffers, hidirtybuffers; 110static int numfreebuffers, lofreebuffers, hifreebuffers; 111static int kvafreespace; 112 113SYSCTL_INT(_vfs, OID_AUTO, numdirtybuffers, CTLFLAG_RD, 114 &numdirtybuffers, 0, ""); 115SYSCTL_INT(_vfs, OID_AUTO, lodirtybuffers, CTLFLAG_RW, 116 &lodirtybuffers, 0, ""); 117SYSCTL_INT(_vfs, OID_AUTO, hidirtybuffers, CTLFLAG_RW, 118 &hidirtybuffers, 0, ""); 119SYSCTL_INT(_vfs, OID_AUTO, numfreebuffers, CTLFLAG_RD, 120 &numfreebuffers, 0, ""); 121SYSCTL_INT(_vfs, OID_AUTO, lofreebuffers, CTLFLAG_RW, 122 &lofreebuffers, 0, ""); 123SYSCTL_INT(_vfs, OID_AUTO, hifreebuffers, CTLFLAG_RW, 124 &hifreebuffers, 0, ""); 125SYSCTL_INT(_vfs, OID_AUTO, maxbufspace, CTLFLAG_RW, 126 &maxbufspace, 0, ""); 127SYSCTL_INT(_vfs, OID_AUTO, bufspace, CTLFLAG_RD, 128 &bufspace, 0, ""); 129SYSCTL_INT(_vfs, OID_AUTO, maxvmiobufspace, CTLFLAG_RW, 130 &maxvmiobufspace, 0, ""); 131SYSCTL_INT(_vfs, OID_AUTO, vmiospace, CTLFLAG_RD, 132 &vmiospace, 0, ""); 133SYSCTL_INT(_vfs, OID_AUTO, maxmallocbufspace, CTLFLAG_RW, 134 &maxbufmallocspace, 0, ""); 135SYSCTL_INT(_vfs, OID_AUTO, bufmallocspace, CTLFLAG_RD, 136 &bufmallocspace, 0, ""); 137SYSCTL_INT(_vfs, OID_AUTO, kvafreespace, CTLFLAG_RD, 138 &kvafreespace, 0, ""); 139 140static LIST_HEAD(bufhashhdr, buf) bufhashtbl[BUFHSZ], invalhash; 141static TAILQ_HEAD(bqueues, buf) bufqueues[BUFFER_QUEUES]; 142 143extern int vm_swap_size; 144 145#define BUF_MAXUSE 24 146 147#define VFS_BIO_NEED_ANY 1 148#define VFS_BIO_NEED_LOWLIMIT 2 149#define VFS_BIO_NEED_FREE 4 150 151/* 152 * Initialize buffer headers and related structures. 153 */ 154void 155bufinit() 156{ 157 struct buf *bp; 158 int i; 159 160 TAILQ_INIT(&bswlist); 161 LIST_INIT(&invalhash); 162 163 /* first, make a null hash table */ 164 for (i = 0; i < BUFHSZ; i++) 165 LIST_INIT(&bufhashtbl[i]); 166 167 /* next, make a null set of free lists */ 168 for (i = 0; i < BUFFER_QUEUES; i++) 169 TAILQ_INIT(&bufqueues[i]); 170 171 /* finally, initialize each buffer header and stick on empty q */ 172 for (i = 0; i < nbuf; i++) { 173 bp = &buf[i]; 174 bzero(bp, sizeof *bp); 175 bp->b_flags = B_INVAL; /* we're just an empty header */ 176 bp->b_dev = NODEV; 177 bp->b_rcred = NOCRED; 178 bp->b_wcred = NOCRED; 179 bp->b_qindex = QUEUE_EMPTY; 180 bp->b_vnbufs.le_next = NOLIST; 181 bp->b_generation = 0; 182 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 183 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 184 } 185/* 186 * maxbufspace is currently calculated to support all filesystem blocks 187 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 188 * cache is still the same as it would be for 8K filesystems. This 189 * keeps the size of the buffer cache "in check" for big block filesystems. 190 */ 191 maxbufspace = (nbuf + 8) * DFLTBSIZE; 192/* 193 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 194 */ 195 maxvmiobufspace = 2 * maxbufspace / 3; 196/* 197 * Limit the amount of malloc memory since it is wired permanently into 198 * the kernel space. Even though this is accounted for in the buffer 199 * allocation, we don't want the malloced region to grow uncontrolled. 200 * The malloc scheme improves memory utilization significantly on average 201 * (small) directories. 202 */ 203 maxbufmallocspace = maxbufspace / 20; 204 205/* 206 * Remove the probability of deadlock conditions by limiting the 207 * number of dirty buffers. 208 */ 209 hidirtybuffers = nbuf / 8 + 20; 210 lodirtybuffers = nbuf / 16 + 10; 211 numdirtybuffers = 0; 212 lofreebuffers = nbuf / 18 + 5; 213 hifreebuffers = 2 * lofreebuffers; 214 numfreebuffers = nbuf; 215 kvafreespace = 0; 216 217 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 218 bogus_page = vm_page_alloc(kernel_object, 219 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 220 VM_ALLOC_NORMAL); 221 222} 223 224/* 225 * Free the kva allocation for a buffer 226 * Must be called only at splbio or higher, 227 * as this is the only locking for buffer_map. 228 */ 229static void 230bfreekva(struct buf * bp) 231{ 232 if (bp->b_kvasize == 0) 233 return; 234 235 vm_map_delete(buffer_map, 236 (vm_offset_t) bp->b_kvabase, 237 (vm_offset_t) bp->b_kvabase + bp->b_kvasize); 238 239 bp->b_kvasize = 0; 240 241} 242 243/* 244 * remove the buffer from the appropriate free list 245 */ 246void 247bremfree(struct buf * bp) 248{ 249 int s = splbio(); 250 251 if (bp->b_qindex != QUEUE_NONE) { 252 if (bp->b_qindex == QUEUE_EMPTY) { 253 kvafreespace -= bp->b_kvasize; 254 } 255 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 256 bp->b_qindex = QUEUE_NONE; 257 } else { 258#if !defined(MAX_PERF) 259 panic("bremfree: removing a buffer when not on a queue"); 260#endif 261 } 262 if ((bp->b_flags & B_INVAL) || 263 (bp->b_flags & (B_DELWRI|B_LOCKED)) == 0) 264 --numfreebuffers; 265 splx(s); 266} 267 268 269/* 270 * Get a buffer with the specified data. Look in the cache first. 271 */ 272int 273bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 274 struct buf ** bpp) 275{ 276 struct buf *bp; 277 278 bp = getblk(vp, blkno, size, 0, 0); 279 *bpp = bp; 280 281 /* if not found in cache, do some I/O */ 282 if ((bp->b_flags & B_CACHE) == 0) { 283 if (curproc != NULL) 284 curproc->p_stats->p_ru.ru_inblock++; 285 bp->b_flags |= B_READ; 286 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 287 if (bp->b_rcred == NOCRED) { 288 if (cred != NOCRED) 289 crhold(cred); 290 bp->b_rcred = cred; 291 } 292 vfs_busy_pages(bp, 0); 293 VOP_STRATEGY(bp); 294 return (biowait(bp)); 295 } 296 return (0); 297} 298 299/* 300 * Operates like bread, but also starts asynchronous I/O on 301 * read-ahead blocks. 302 */ 303int 304breadn(struct vnode * vp, daddr_t blkno, int size, 305 daddr_t * rablkno, int *rabsize, 306 int cnt, struct ucred * cred, struct buf ** bpp) 307{ 308 struct buf *bp, *rabp; 309 int i; 310 int rv = 0, readwait = 0; 311 312 *bpp = bp = getblk(vp, blkno, size, 0, 0); 313 314 /* if not found in cache, do some I/O */ 315 if ((bp->b_flags & B_CACHE) == 0) { 316 if (curproc != NULL) 317 curproc->p_stats->p_ru.ru_inblock++; 318 bp->b_flags |= B_READ; 319 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 320 if (bp->b_rcred == NOCRED) { 321 if (cred != NOCRED) 322 crhold(cred); 323 bp->b_rcred = cred; 324 } 325 vfs_busy_pages(bp, 0); 326 VOP_STRATEGY(bp); 327 ++readwait; 328 } 329 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 330 if (inmem(vp, *rablkno)) 331 continue; 332 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 333 334 if ((rabp->b_flags & B_CACHE) == 0) { 335 if (curproc != NULL) 336 curproc->p_stats->p_ru.ru_inblock++; 337 rabp->b_flags |= B_READ | B_ASYNC; 338 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 339 if (rabp->b_rcred == NOCRED) { 340 if (cred != NOCRED) 341 crhold(cred); 342 rabp->b_rcred = cred; 343 } 344 vfs_busy_pages(rabp, 0); 345 VOP_STRATEGY(rabp); 346 } else { 347 brelse(rabp); 348 } 349 } 350 351 if (readwait) { 352 rv = biowait(bp); 353 } 354 return (rv); 355} 356 357/* 358 * Write, release buffer on completion. (Done by iodone 359 * if async.) 360 */ 361int 362bwrite(struct buf * bp) 363{ 364 int oldflags = bp->b_flags; 365 366 if (bp->b_flags & B_INVAL) { 367 brelse(bp); 368 return (0); 369 } 370#if !defined(MAX_PERF) 371 if (!(bp->b_flags & B_BUSY)) 372 panic("bwrite: buffer is not busy???"); 373#endif 374 375 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 376 bp->b_flags |= B_WRITEINPROG; 377 378 if ((oldflags & B_DELWRI) == B_DELWRI) { 379 --numdirtybuffers; 380 reassignbuf(bp, bp->b_vp); 381 } 382 383 bp->b_vp->v_numoutput++; 384 vfs_busy_pages(bp, 1); 385 if (curproc != NULL) 386 curproc->p_stats->p_ru.ru_oublock++; 387 VOP_STRATEGY(bp); 388 389 if ((oldflags & B_ASYNC) == 0) { 390 int rtval = biowait(bp); 391 392 if (oldflags & B_DELWRI) { 393 reassignbuf(bp, bp->b_vp); 394 } 395 brelse(bp); 396 return (rtval); 397 } 398 return (0); 399} 400 401inline void 402vfs_bio_need_satisfy(void) { 403 ++numfreebuffers; 404 if (!needsbuffer) 405 return; 406 if (numdirtybuffers < lodirtybuffers) { 407 needsbuffer &= ~(VFS_BIO_NEED_ANY | VFS_BIO_NEED_LOWLIMIT); 408 } else { 409 needsbuffer &= ~VFS_BIO_NEED_ANY; 410 } 411 if (numfreebuffers >= hifreebuffers) { 412 needsbuffer &= ~VFS_BIO_NEED_FREE; 413 } 414 wakeup(&needsbuffer); 415} 416 417/* 418 * Delayed write. (Buffer is marked dirty). 419 */ 420void 421bdwrite(struct buf * bp) 422{ 423 424#if !defined(MAX_PERF) 425 if ((bp->b_flags & B_BUSY) == 0) { 426 panic("bdwrite: buffer is not busy"); 427 } 428#endif 429 430 if (bp->b_flags & B_INVAL) { 431 brelse(bp); 432 return; 433 } 434 if (bp->b_flags & B_TAPE) { 435 bawrite(bp); 436 return; 437 } 438 bp->b_flags &= ~(B_READ|B_RELBUF); 439 if ((bp->b_flags & B_DELWRI) == 0) { 440 bp->b_flags |= B_DONE | B_DELWRI; 441 reassignbuf(bp, bp->b_vp); 442 ++numdirtybuffers; 443 } 444 445 /* 446 * This bmap keeps the system from needing to do the bmap later, 447 * perhaps when the system is attempting to do a sync. Since it 448 * is likely that the indirect block -- or whatever other datastructure 449 * that the filesystem needs is still in memory now, it is a good 450 * thing to do this. Note also, that if the pageout daemon is 451 * requesting a sync -- there might not be enough memory to do 452 * the bmap then... So, this is important to do. 453 */ 454 if (bp->b_lblkno == bp->b_blkno) { 455 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 456 } 457 458 /* 459 * Set the *dirty* buffer range based upon the VM system dirty pages. 460 */ 461 vfs_setdirty(bp); 462 463 /* 464 * We need to do this here to satisfy the vnode_pager and the 465 * pageout daemon, so that it thinks that the pages have been 466 * "cleaned". Note that since the pages are in a delayed write 467 * buffer -- the VFS layer "will" see that the pages get written 468 * out on the next sync, or perhaps the cluster will be completed. 469 */ 470 vfs_clean_pages(bp); 471 bqrelse(bp); 472 473 if (numdirtybuffers >= hidirtybuffers) 474 flushdirtybuffers(0, 0); 475 476 return; 477} 478 479/* 480 * Asynchronous write. 481 * Start output on a buffer, but do not wait for it to complete. 482 * The buffer is released when the output completes. 483 */ 484void 485bawrite(struct buf * bp) 486{ 487 bp->b_flags |= B_ASYNC; 488 (void) VOP_BWRITE(bp); 489} 490 491/* 492 * Ordered write. 493 * Start output on a buffer, but only wait for it to complete if the 494 * output device cannot guarantee ordering in some other way. Devices 495 * that can perform asynchronous ordered writes will set the B_ASYNC 496 * flag in their strategy routine. 497 * The buffer is released when the output completes. 498 */ 499int 500bowrite(struct buf * bp) 501{ 502 /* 503 * XXX Add in B_ASYNC once the SCSI 504 * layer can deal with ordered 505 * writes properly. 506 */ 507 bp->b_flags |= B_ORDERED; 508 return (VOP_BWRITE(bp)); 509} 510 511/* 512 * Release a buffer. 513 */ 514void 515brelse(struct buf * bp) 516{ 517 int s; 518 519 if (bp->b_flags & B_CLUSTER) { 520 relpbuf(bp); 521 return; 522 } 523 /* anyone need a "free" block? */ 524 s = splbio(); 525 526 /* anyone need this block? */ 527 if (bp->b_flags & B_WANTED) { 528 bp->b_flags &= ~(B_WANTED | B_AGE); 529 wakeup(bp); 530 } 531 532 if (bp->b_flags & B_LOCKED) 533 bp->b_flags &= ~B_ERROR; 534 535 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 536 (bp->b_bufsize <= 0)) { 537 bp->b_flags |= B_INVAL; 538 if (bp->b_flags & B_DELWRI) 539 --numdirtybuffers; 540 bp->b_flags &= ~(B_DELWRI | B_CACHE); 541 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 542 if (bp->b_bufsize) 543 allocbuf(bp, 0); 544 brelvp(bp); 545 } 546 } 547 548 /* 549 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 550 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 551 * but the VM object is kept around. The B_NOCACHE flag is used to 552 * invalidate the pages in the VM object. 553 * 554 * If the buffer is a partially filled NFS buffer, keep it 555 * since invalidating it now will lose informatio. The valid 556 * flags in the vm_pages have only DEV_BSIZE resolution but 557 * the b_validoff, b_validend fields have byte resolution. 558 * This can avoid unnecessary re-reads of the buffer. 559 * XXX this seems to cause performance problems. 560 */ 561 if ((bp->b_flags & B_VMIO) 562 && !(bp->b_vp->v_tag == VT_NFS && 563 bp->b_vp->v_type != VBLK && 564 (bp->b_flags & B_DELWRI) != 0) 565#ifdef notdef 566 && (bp->b_vp->v_tag != VT_NFS 567 || bp->b_vp->v_type == VBLK 568 || (bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) 569 || bp->b_validend == 0 570 || (bp->b_validoff == 0 571 && bp->b_validend == bp->b_bufsize)) 572#endif 573 ) { 574 vm_ooffset_t foff; 575 vm_object_t obj; 576 int i, resid; 577 vm_page_t m; 578 struct vnode *vp; 579 int iototal = bp->b_bufsize; 580 581 vp = bp->b_vp; 582 583#if !defined(MAX_PERF) 584 if (!vp) 585 panic("brelse: missing vp"); 586#endif 587 588 if (bp->b_npages) { 589 vm_pindex_t poff; 590 obj = (vm_object_t) vp->v_object; 591 if (vp->v_type == VBLK) 592 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 593 else 594 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 595 poff = OFF_TO_IDX(foff); 596 for (i = 0; i < bp->b_npages; i++) { 597 m = bp->b_pages[i]; 598 if (m == bogus_page) { 599 m = vm_page_lookup(obj, poff + i); 600#if !defined(MAX_PERF) 601 if (!m) { 602 panic("brelse: page missing\n"); 603 } 604#endif 605 bp->b_pages[i] = m; 606 pmap_qenter(trunc_page(bp->b_data), 607 bp->b_pages, bp->b_npages); 608 } 609 resid = IDX_TO_OFF(m->pindex+1) - foff; 610 if (resid > iototal) 611 resid = iototal; 612 if (resid > 0) { 613 /* 614 * Don't invalidate the page if the local machine has already 615 * modified it. This is the lesser of two evils, and should 616 * be fixed. 617 */ 618 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 619 vm_page_test_dirty(m); 620 if (m->dirty == 0) { 621 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 622 if (m->valid == 0) 623 vm_page_protect(m, VM_PROT_NONE); 624 } 625 } 626 if (resid >= PAGE_SIZE) { 627 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 628 bp->b_flags |= B_INVAL; 629 } 630 } else { 631 if (!vm_page_is_valid(m, 632 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 633 bp->b_flags |= B_INVAL; 634 } 635 } 636 } 637 foff += resid; 638 iototal -= resid; 639 } 640 } 641 if (bp->b_flags & (B_INVAL | B_RELBUF)) 642 vfs_vmio_release(bp); 643 } else if (bp->b_flags & B_VMIO) { 644 if (bp->b_flags && (B_INVAL | B_RELBUF)) 645 vfs_vmio_release(bp); 646 } 647 648#if !defined(MAX_PERF) 649 if (bp->b_qindex != QUEUE_NONE) 650 panic("brelse: free buffer onto another queue???"); 651#endif 652 653 /* enqueue */ 654 /* buffers with no memory */ 655 if (bp->b_bufsize == 0) { 656 bp->b_flags |= B_INVAL; 657 bp->b_qindex = QUEUE_EMPTY; 658 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 659 LIST_REMOVE(bp, b_hash); 660 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 661 bp->b_dev = NODEV; 662 kvafreespace += bp->b_kvasize; 663 bp->b_generation++; 664 665 /* buffers with junk contents */ 666 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 667 bp->b_flags |= B_INVAL; 668 bp->b_qindex = QUEUE_AGE; 669 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 670 LIST_REMOVE(bp, b_hash); 671 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 672 bp->b_dev = NODEV; 673 bp->b_generation++; 674 675 /* buffers that are locked */ 676 } else if (bp->b_flags & B_LOCKED) { 677 bp->b_qindex = QUEUE_LOCKED; 678 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 679 680 /* buffers with stale but valid contents */ 681 } else if (bp->b_flags & B_AGE) { 682 bp->b_qindex = QUEUE_AGE; 683 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 684 685 /* buffers with valid and quite potentially reuseable contents */ 686 } else { 687 bp->b_qindex = QUEUE_LRU; 688 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 689 } 690 691 if ((bp->b_flags & B_INVAL) || 692 (bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 693 if (bp->b_flags & B_DELWRI) { 694 --numdirtybuffers; 695 bp->b_flags &= ~B_DELWRI; 696 } 697 vfs_bio_need_satisfy(); 698 } 699 700 /* unlock */ 701 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 702 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 703 splx(s); 704} 705 706/* 707 * Release a buffer. 708 */ 709void 710bqrelse(struct buf * bp) 711{ 712 int s; 713 714 s = splbio(); 715 716 /* anyone need this block? */ 717 if (bp->b_flags & B_WANTED) { 718 bp->b_flags &= ~(B_WANTED | B_AGE); 719 wakeup(bp); 720 } 721 722#if !defined(MAX_PERF) 723 if (bp->b_qindex != QUEUE_NONE) 724 panic("bqrelse: free buffer onto another queue???"); 725#endif 726 727 if (bp->b_flags & B_LOCKED) { 728 bp->b_flags &= ~B_ERROR; 729 bp->b_qindex = QUEUE_LOCKED; 730 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 731 /* buffers with stale but valid contents */ 732 } else { 733 bp->b_qindex = QUEUE_LRU; 734 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 735 } 736 737 if ((bp->b_flags & (B_LOCKED|B_DELWRI)) == 0) { 738 vfs_bio_need_satisfy(); 739 } 740 741 /* unlock */ 742 bp->b_flags &= ~(B_ORDERED | B_WANTED | B_BUSY | 743 B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 744 splx(s); 745} 746 747static void 748vfs_vmio_release(bp) 749 struct buf *bp; 750{ 751 int i; 752 vm_page_t m; 753 754 for (i = 0; i < bp->b_npages; i++) { 755 m = bp->b_pages[i]; 756 bp->b_pages[i] = NULL; 757 vm_page_unwire(m); 758 /* 759 * We don't mess with busy pages, it is 760 * the responsibility of the process that 761 * busied the pages to deal with them. 762 */ 763 if ((m->flags & PG_BUSY) || (m->busy != 0)) 764 continue; 765 766 if (m->wire_count == 0) { 767 768 if (m->flags & PG_WANTED) { 769 m->flags &= ~PG_WANTED; 770 wakeup(m); 771 } 772 773 /* 774 * If this is an async free -- we cannot place 775 * pages onto the cache queue. If it is an 776 * async free, then we don't modify any queues. 777 * This is probably in error (for perf reasons), 778 * and we will eventually need to build 779 * a more complete infrastructure to support I/O 780 * rundown. 781 */ 782 if ((bp->b_flags & B_ASYNC) == 0) { 783 784 /* 785 * In the case of sync buffer frees, we can do pretty much 786 * anything to any of the memory queues. Specifically, 787 * the cache queue is okay to be modified. 788 */ 789 if (m->valid) { 790 if(m->dirty == 0) 791 vm_page_test_dirty(m); 792 /* 793 * this keeps pressure off of the process memory 794 */ 795 if (m->dirty == 0 && m->hold_count == 0) 796 vm_page_cache(m); 797 else 798 vm_page_deactivate(m); 799 } else if (m->hold_count == 0) { 800 m->flags |= PG_BUSY; 801 vm_page_protect(m, VM_PROT_NONE); 802 vm_page_free(m); 803 } 804 } else { 805 /* 806 * If async, then at least we clear the 807 * act_count. 808 */ 809 m->act_count = 0; 810 } 811 } 812 } 813 bufspace -= bp->b_bufsize; 814 vmiospace -= bp->b_bufsize; 815 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 816 bp->b_npages = 0; 817 bp->b_bufsize = 0; 818 bp->b_flags &= ~B_VMIO; 819 if (bp->b_vp) 820 brelvp(bp); 821} 822 823/* 824 * Check to see if a block is currently memory resident. 825 */ 826struct buf * 827gbincore(struct vnode * vp, daddr_t blkno) 828{ 829 struct buf *bp; 830 struct bufhashhdr *bh; 831 832 bh = BUFHASH(vp, blkno); 833 bp = bh->lh_first; 834 835 /* Search hash chain */ 836 while (bp != NULL) { 837 /* hit */ 838 if (bp->b_vp == vp && bp->b_lblkno == blkno && 839 (bp->b_flags & B_INVAL) == 0) { 840 break; 841 } 842 bp = bp->b_hash.le_next; 843 } 844 return (bp); 845} 846 847/* 848 * this routine implements clustered async writes for 849 * clearing out B_DELWRI buffers... This is much better 850 * than the old way of writing only one buffer at a time. 851 */ 852int 853vfs_bio_awrite(struct buf * bp) 854{ 855 int i; 856 daddr_t lblkno = bp->b_lblkno; 857 struct vnode *vp = bp->b_vp; 858 int s; 859 int ncl; 860 struct buf *bpa; 861 int nwritten; 862 int size; 863 int maxcl; 864 865 s = splbio(); 866 /* 867 * right now we support clustered writing only to regular files 868 */ 869 if ((vp->v_type == VREG) && 870 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 871 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 872 873 size = vp->v_mount->mnt_stat.f_iosize; 874 maxcl = MAXPHYS / size; 875 876 for (i = 1; i < maxcl; i++) { 877 if ((bpa = gbincore(vp, lblkno + i)) && 878 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 879 (B_DELWRI | B_CLUSTEROK)) && 880 (bpa->b_bufsize == size)) { 881 if ((bpa->b_blkno == bpa->b_lblkno) || 882 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 883 break; 884 } else { 885 break; 886 } 887 } 888 ncl = i; 889 /* 890 * this is a possible cluster write 891 */ 892 if (ncl != 1) { 893 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 894 splx(s); 895 return nwritten; 896 } 897 } 898#if 0 899 else if ((vp->v_flag & VOBJBUF) && (vp->v_type == VBLK) && 900 ((size = bp->b_bufsize) >= PAGE_SIZE)) { 901 maxcl = MAXPHYS / size; 902 for (i = 1; i < maxcl; i++) { 903 if ((bpa = gbincore(vp, lblkno + i)) && 904 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 905 (B_DELWRI | B_CLUSTEROK)) && 906 (bpa->b_bufsize == size)) { 907 if (bpa->b_blkno != 908 bp->b_blkno + ((i * size) >> DEV_BSHIFT)) 909 break; 910 } else { 911 break; 912 } 913 } 914 ncl = i; 915 /* 916 * this is a possible cluster write 917 */ 918 if (ncl != 1) { 919 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 920 splx(s); 921 return nwritten; 922 } 923 } 924#endif 925 926 bremfree(bp); 927 splx(s); 928 /* 929 * default (old) behavior, writing out only one block 930 */ 931 bp->b_flags |= B_BUSY | B_ASYNC; 932 nwritten = bp->b_bufsize; 933 (void) VOP_BWRITE(bp); 934 return nwritten; 935} 936 937 938/* 939 * Find a buffer header which is available for use. 940 */ 941static struct buf * 942getnewbuf(struct vnode *vp, daddr_t blkno, 943 int slpflag, int slptimeo, int size, int maxsize) 944{ 945 struct buf *bp, *bp1; 946 int nbyteswritten = 0; 947 vm_offset_t addr; 948 static int writerecursion = 0; 949 950start: 951 if (bufspace >= maxbufspace) 952 goto trytofreespace; 953 954 /* can we constitute a new buffer? */ 955 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 956#if !defined(MAX_PERF) 957 if (bp->b_qindex != QUEUE_EMPTY) 958 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 959 bp->b_qindex); 960#endif 961 bp->b_flags |= B_BUSY; 962 bremfree(bp); 963 goto fillbuf; 964 } 965trytofreespace: 966 /* 967 * We keep the file I/O from hogging metadata I/O 968 * This is desirable because file data is cached in the 969 * VM/Buffer cache even if a buffer is freed. 970 */ 971 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 972#if !defined(MAX_PERF) 973 if (bp->b_qindex != QUEUE_AGE) 974 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 975 bp->b_qindex); 976#endif 977 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 978#if !defined(MAX_PERF) 979 if (bp->b_qindex != QUEUE_LRU) 980 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 981 bp->b_qindex); 982#endif 983 } 984 if (!bp) { 985 /* wait for a free buffer of any kind */ 986 needsbuffer |= VFS_BIO_NEED_ANY; 987 do 988 tsleep(&needsbuffer, (PRIBIO + 1) | slpflag, "newbuf", 989 slptimeo); 990 while (needsbuffer & VFS_BIO_NEED_ANY); 991 return (0); 992 } 993 994#if defined(DIAGNOSTIC) 995 if (bp->b_flags & B_BUSY) { 996 panic("getnewbuf: busy buffer on free list\n"); 997 } 998#endif 999 1000 /* 1001 * We are fairly aggressive about freeing VMIO buffers, but since 1002 * the buffering is intact without buffer headers, there is not 1003 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 1004 */ 1005 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 1006 if ((bp->b_flags & B_VMIO) == 0 || 1007 (vmiospace < maxvmiobufspace)) { 1008 --bp->b_usecount; 1009 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1010 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1011 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1012 goto start; 1013 } 1014 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1015 } 1016 } 1017 1018 1019 /* if we are a delayed write, convert to an async write */ 1020 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 1021 1022 /* 1023 * If our delayed write is likely to be used soon, then 1024 * recycle back onto the LRU queue. 1025 */ 1026 if (vp && (bp->b_vp == vp) && (bp->b_qindex == QUEUE_LRU) && 1027 (bp->b_lblkno >= blkno) && (maxsize > 0)) { 1028 1029 if (bp->b_usecount > 0) { 1030 if (bp->b_lblkno < blkno + (MAXPHYS / maxsize)) { 1031 1032 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 1033 1034 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 1035 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1036 bp->b_usecount--; 1037 goto start; 1038 } 1039 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 1040 } 1041 } 1042 } 1043 1044 /* 1045 * Certain layered filesystems can recursively re-enter the vfs_bio 1046 * code, due to delayed writes. This helps keep the system from 1047 * deadlocking. 1048 */ 1049 if (writerecursion > 0) { 1050 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1051 while (bp) { 1052 if ((bp->b_flags & B_DELWRI) == 0) 1053 break; 1054 bp = TAILQ_NEXT(bp, b_freelist); 1055 } 1056 if (bp == NULL) { 1057 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1058 while (bp) { 1059 if ((bp->b_flags & B_DELWRI) == 0) 1060 break; 1061 bp = TAILQ_NEXT(bp, b_freelist); 1062 } 1063 } 1064 if (bp == NULL) 1065 panic("getnewbuf: cannot get buffer, infinite recursion failure"); 1066 } else { 1067 ++writerecursion; 1068 nbyteswritten += vfs_bio_awrite(bp); 1069 --writerecursion; 1070 if (!slpflag && !slptimeo) { 1071 return (0); 1072 } 1073 goto start; 1074 } 1075 } 1076 1077 if (bp->b_flags & B_WANTED) { 1078 bp->b_flags &= ~B_WANTED; 1079 wakeup(bp); 1080 } 1081 bremfree(bp); 1082 bp->b_flags |= B_BUSY; 1083 1084 if (bp->b_flags & B_VMIO) { 1085 bp->b_flags &= ~B_ASYNC; 1086 vfs_vmio_release(bp); 1087 } 1088 1089 if (bp->b_vp) 1090 brelvp(bp); 1091 1092fillbuf: 1093 bp->b_generation++; 1094 1095 /* we are not free, nor do we contain interesting data */ 1096 if (bp->b_rcred != NOCRED) { 1097 crfree(bp->b_rcred); 1098 bp->b_rcred = NOCRED; 1099 } 1100 if (bp->b_wcred != NOCRED) { 1101 crfree(bp->b_wcred); 1102 bp->b_wcred = NOCRED; 1103 } 1104 1105 LIST_REMOVE(bp, b_hash); 1106 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1107 if (bp->b_bufsize) { 1108 allocbuf(bp, 0); 1109 } 1110 bp->b_flags = B_BUSY; 1111 bp->b_dev = NODEV; 1112 bp->b_vp = NULL; 1113 bp->b_blkno = bp->b_lblkno = 0; 1114 bp->b_iodone = 0; 1115 bp->b_error = 0; 1116 bp->b_resid = 0; 1117 bp->b_bcount = 0; 1118 bp->b_npages = 0; 1119 bp->b_dirtyoff = bp->b_dirtyend = 0; 1120 bp->b_validoff = bp->b_validend = 0; 1121 bp->b_usecount = 5; 1122 1123 maxsize = (maxsize + PAGE_MASK) & ~PAGE_MASK; 1124 1125 /* 1126 * we assume that buffer_map is not at address 0 1127 */ 1128 addr = 0; 1129 if (maxsize != bp->b_kvasize) { 1130 bfreekva(bp); 1131 1132findkvaspace: 1133 /* 1134 * See if we have buffer kva space 1135 */ 1136 if (vm_map_findspace(buffer_map, 1137 vm_map_min(buffer_map), maxsize, &addr)) { 1138 if (kvafreespace > 0) { 1139 int totfree = 0, freed; 1140 do { 1141 freed = 0; 1142 for (bp1 = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]); 1143 bp1 != NULL; bp1 = TAILQ_NEXT(bp1, b_freelist)) { 1144 if (bp1->b_kvasize != 0) { 1145 totfree += bp1->b_kvasize; 1146 freed = bp1->b_kvasize; 1147 bremfree(bp1); 1148 bfreekva(bp1); 1149 brelse(bp1); 1150 break; 1151 } 1152 } 1153 } while (freed); 1154 /* 1155 * if we found free space, then retry with the same buffer. 1156 */ 1157 if (totfree) 1158 goto findkvaspace; 1159 } 1160 bp->b_flags |= B_INVAL; 1161 brelse(bp); 1162 goto trytofreespace; 1163 } 1164 } 1165 1166 /* 1167 * See if we are below are allocated minimum 1168 */ 1169 if (bufspace >= (maxbufspace + nbyteswritten)) { 1170 bp->b_flags |= B_INVAL; 1171 brelse(bp); 1172 goto trytofreespace; 1173 } 1174 1175 /* 1176 * create a map entry for the buffer -- in essence 1177 * reserving the kva space. 1178 */ 1179 if (addr) { 1180 vm_map_insert(buffer_map, NULL, 0, 1181 addr, addr + maxsize, 1182 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 1183 1184 bp->b_kvabase = (caddr_t) addr; 1185 bp->b_kvasize = maxsize; 1186 } 1187 bp->b_data = bp->b_kvabase; 1188 1189 return (bp); 1190} 1191 1192static void 1193waitfreebuffers(int slpflag, int slptimeo) { 1194 while (numfreebuffers < hifreebuffers) { 1195 flushdirtybuffers(slpflag, slptimeo); 1196 if (numfreebuffers < hifreebuffers) 1197 break; 1198 needsbuffer |= VFS_BIO_NEED_FREE; 1199 if (tsleep(&needsbuffer, PRIBIO|slpflag, "biofre", slptimeo)) 1200 break; 1201 } 1202} 1203 1204static void 1205flushdirtybuffers(int slpflag, int slptimeo) { 1206 int s; 1207 static pid_t flushing = 0; 1208 1209 s = splbio(); 1210 1211 if (flushing) { 1212 if (flushing == curproc->p_pid) { 1213 splx(s); 1214 return; 1215 } 1216 while (flushing) { 1217 if (tsleep(&flushing, PRIBIO|slpflag, "biofls", slptimeo)) { 1218 splx(s); 1219 return; 1220 } 1221 } 1222 } 1223 flushing = curproc->p_pid; 1224 1225 while (numdirtybuffers > lodirtybuffers) { 1226 struct buf *bp; 1227 needsbuffer |= VFS_BIO_NEED_LOWLIMIT; 1228 bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]); 1229 if (bp == NULL) 1230 bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]); 1231 1232 while (bp && ((bp->b_flags & B_DELWRI) == 0)) { 1233 bp = TAILQ_NEXT(bp, b_freelist); 1234 } 1235 1236 if (bp) { 1237 vfs_bio_awrite(bp); 1238 continue; 1239 } 1240 break; 1241 } 1242 1243 flushing = 0; 1244 wakeup(&flushing); 1245 splx(s); 1246} 1247 1248/* 1249 * Check to see if a block is currently memory resident. 1250 */ 1251struct buf * 1252incore(struct vnode * vp, daddr_t blkno) 1253{ 1254 struct buf *bp; 1255 1256 int s = splbio(); 1257 bp = gbincore(vp, blkno); 1258 splx(s); 1259 return (bp); 1260} 1261 1262/* 1263 * Returns true if no I/O is needed to access the 1264 * associated VM object. This is like incore except 1265 * it also hunts around in the VM system for the data. 1266 */ 1267 1268int 1269inmem(struct vnode * vp, daddr_t blkno) 1270{ 1271 vm_object_t obj; 1272 vm_offset_t toff, tinc; 1273 vm_page_t m; 1274 vm_ooffset_t off; 1275 1276 if (incore(vp, blkno)) 1277 return 1; 1278 if (vp->v_mount == NULL) 1279 return 0; 1280 if ((vp->v_object == NULL) || (vp->v_flag & VOBJBUF) == 0) 1281 return 0; 1282 1283 obj = vp->v_object; 1284 tinc = PAGE_SIZE; 1285 if (tinc > vp->v_mount->mnt_stat.f_iosize) 1286 tinc = vp->v_mount->mnt_stat.f_iosize; 1287 off = blkno * vp->v_mount->mnt_stat.f_iosize; 1288 1289 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 1290 1291 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 1292 if (!m) 1293 return 0; 1294 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 1295 return 0; 1296 } 1297 return 1; 1298} 1299 1300/* 1301 * now we set the dirty range for the buffer -- 1302 * for NFS -- if the file is mapped and pages have 1303 * been written to, let it know. We want the 1304 * entire range of the buffer to be marked dirty if 1305 * any of the pages have been written to for consistancy 1306 * with the b_validoff, b_validend set in the nfs write 1307 * code, and used by the nfs read code. 1308 */ 1309static void 1310vfs_setdirty(struct buf *bp) { 1311 int i; 1312 vm_object_t object; 1313 vm_offset_t boffset, offset; 1314 /* 1315 * We qualify the scan for modified pages on whether the 1316 * object has been flushed yet. The OBJ_WRITEABLE flag 1317 * is not cleared simply by protecting pages off. 1318 */ 1319 if ((bp->b_flags & B_VMIO) && 1320 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 1321 /* 1322 * test the pages to see if they have been modified directly 1323 * by users through the VM system. 1324 */ 1325 for (i = 0; i < bp->b_npages; i++) 1326 vm_page_test_dirty(bp->b_pages[i]); 1327 1328 /* 1329 * scan forwards for the first page modified 1330 */ 1331 for (i = 0; i < bp->b_npages; i++) { 1332 if (bp->b_pages[i]->dirty) { 1333 break; 1334 } 1335 } 1336 boffset = (i << PAGE_SHIFT); 1337 if (boffset < bp->b_dirtyoff) { 1338 bp->b_dirtyoff = boffset; 1339 } 1340 1341 /* 1342 * scan backwards for the last page modified 1343 */ 1344 for (i = bp->b_npages - 1; i >= 0; --i) { 1345 if (bp->b_pages[i]->dirty) { 1346 break; 1347 } 1348 } 1349 boffset = (i + 1); 1350 offset = boffset + bp->b_pages[0]->pindex; 1351 if (offset >= object->size) 1352 boffset = object->size - bp->b_pages[0]->pindex; 1353 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 1354 bp->b_dirtyend = (boffset << PAGE_SHIFT); 1355 } 1356} 1357 1358/* 1359 * Get a block given a specified block and offset into a file/device. 1360 */ 1361struct buf * 1362getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1363{ 1364 struct buf *bp; 1365 int s; 1366 struct bufhashhdr *bh; 1367 int maxsize; 1368 int generation; 1369 1370 if (vp->v_mount) { 1371 maxsize = vp->v_mount->mnt_stat.f_iosize; 1372 /* 1373 * This happens on mount points. 1374 */ 1375 if (maxsize < size) 1376 maxsize = size; 1377 } else { 1378 maxsize = size; 1379 } 1380 1381#if !defined(MAX_PERF) 1382 if (size > MAXBSIZE) 1383 panic("getblk: size(%d) > MAXBSIZE(%d)\n", size, MAXBSIZE); 1384#endif 1385 1386 s = splbio(); 1387loop: 1388 if (numfreebuffers < lofreebuffers) { 1389 waitfreebuffers(slpflag, slptimeo); 1390 } 1391 1392 if ((bp = gbincore(vp, blkno))) { 1393loop1: 1394 generation = bp->b_generation; 1395 if (bp->b_flags & B_BUSY) { 1396 bp->b_flags |= B_WANTED; 1397 if (bp->b_usecount < BUF_MAXUSE) 1398 ++bp->b_usecount; 1399 if (!tsleep(bp, 1400 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) { 1401 if (bp->b_generation != generation) 1402 goto loop; 1403 goto loop1; 1404 } else { 1405 splx(s); 1406 return (struct buf *) NULL; 1407 } 1408 } 1409 bp->b_flags |= B_BUSY | B_CACHE; 1410 bremfree(bp); 1411 1412 /* 1413 * check for size inconsistancies (note that they shouldn't 1414 * happen but do when filesystems don't handle the size changes 1415 * correctly.) We are conservative on metadata and don't just 1416 * extend the buffer but write and re-constitute it. 1417 */ 1418 1419 if (bp->b_bcount != size) { 1420 bp->b_generation++; 1421 if ((bp->b_flags & B_VMIO) && (size <= bp->b_kvasize)) { 1422 allocbuf(bp, size); 1423 } else { 1424 bp->b_flags |= B_NOCACHE; 1425 VOP_BWRITE(bp); 1426 goto loop; 1427 } 1428 } 1429 1430 if (bp->b_usecount < BUF_MAXUSE) 1431 ++bp->b_usecount; 1432 splx(s); 1433 return (bp); 1434 } else { 1435 vm_object_t obj; 1436 1437 if ((bp = getnewbuf(vp, blkno, 1438 slpflag, slptimeo, size, maxsize)) == 0) { 1439 if (slpflag || slptimeo) { 1440 splx(s); 1441 return NULL; 1442 } 1443 goto loop; 1444 } 1445 1446 /* 1447 * This code is used to make sure that a buffer is not 1448 * created while the getnewbuf routine is blocked. 1449 * Normally the vnode is locked so this isn't a problem. 1450 * VBLK type I/O requests, however, don't lock the vnode. 1451 */ 1452 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1453 bp->b_flags |= B_INVAL; 1454 brelse(bp); 1455 goto loop; 1456 } 1457 1458 /* 1459 * Insert the buffer into the hash, so that it can 1460 * be found by incore. 1461 */ 1462 bp->b_blkno = bp->b_lblkno = blkno; 1463 bgetvp(vp, bp); 1464 LIST_REMOVE(bp, b_hash); 1465 bh = BUFHASH(vp, blkno); 1466 LIST_INSERT_HEAD(bh, bp, b_hash); 1467 1468 if ((obj = vp->v_object) && (vp->v_flag & VOBJBUF)) { 1469 bp->b_flags |= (B_VMIO | B_CACHE); 1470#if defined(VFS_BIO_DEBUG) 1471 if (vp->v_type != VREG && vp->v_type != VBLK) 1472 printf("getblk: vmioing file type %d???\n", vp->v_type); 1473#endif 1474 } else { 1475 bp->b_flags &= ~B_VMIO; 1476 } 1477 splx(s); 1478 1479 allocbuf(bp, size); 1480#ifdef PC98 1481 /* 1482 * 1024byte/sector support 1483 */ 1484#define B_XXX2 0x8000000 1485 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1486#endif 1487 return (bp); 1488 } 1489} 1490 1491/* 1492 * Get an empty, disassociated buffer of given size. 1493 */ 1494struct buf * 1495geteblk(int size) 1496{ 1497 struct buf *bp; 1498 int s; 1499 1500 s = splbio(); 1501 while ((bp = getnewbuf(0, (daddr_t) 0, 0, 0, size, MAXBSIZE)) == 0); 1502 splx(s); 1503 allocbuf(bp, size); 1504 bp->b_flags |= B_INVAL; 1505 return (bp); 1506} 1507 1508 1509/* 1510 * This code constitutes the buffer memory from either anonymous system 1511 * memory (in the case of non-VMIO operations) or from an associated 1512 * VM object (in the case of VMIO operations). 1513 * 1514 * Note that this code is tricky, and has many complications to resolve 1515 * deadlock or inconsistant data situations. Tread lightly!!! 1516 * 1517 * Modify the length of a buffer's underlying buffer storage without 1518 * destroying information (unless, of course the buffer is shrinking). 1519 */ 1520int 1521allocbuf(struct buf * bp, int size) 1522{ 1523 1524 int s; 1525 int newbsize, mbsize; 1526 int i; 1527 1528#if !defined(MAX_PERF) 1529 if (!(bp->b_flags & B_BUSY)) 1530 panic("allocbuf: buffer not busy"); 1531 1532 if (bp->b_kvasize < size) 1533 panic("allocbuf: buffer too small"); 1534#endif 1535 1536 if ((bp->b_flags & B_VMIO) == 0) { 1537 caddr_t origbuf; 1538 int origbufsize; 1539 /* 1540 * Just get anonymous memory from the kernel 1541 */ 1542 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1543#if !defined(NO_B_MALLOC) 1544 if (bp->b_flags & B_MALLOC) 1545 newbsize = mbsize; 1546 else 1547#endif 1548 newbsize = round_page(size); 1549 1550 if (newbsize < bp->b_bufsize) { 1551#if !defined(NO_B_MALLOC) 1552 /* 1553 * malloced buffers are not shrunk 1554 */ 1555 if (bp->b_flags & B_MALLOC) { 1556 if (newbsize) { 1557 bp->b_bcount = size; 1558 } else { 1559 free(bp->b_data, M_BIOBUF); 1560 bufspace -= bp->b_bufsize; 1561 bufmallocspace -= bp->b_bufsize; 1562 bp->b_data = bp->b_kvabase; 1563 bp->b_bufsize = 0; 1564 bp->b_bcount = 0; 1565 bp->b_flags &= ~B_MALLOC; 1566 } 1567 return 1; 1568 } 1569#endif 1570 vm_hold_free_pages( 1571 bp, 1572 (vm_offset_t) bp->b_data + newbsize, 1573 (vm_offset_t) bp->b_data + bp->b_bufsize); 1574 } else if (newbsize > bp->b_bufsize) { 1575#if !defined(NO_B_MALLOC) 1576 /* 1577 * We only use malloced memory on the first allocation. 1578 * and revert to page-allocated memory when the buffer grows. 1579 */ 1580 if ( (bufmallocspace < maxbufmallocspace) && 1581 (bp->b_bufsize == 0) && 1582 (mbsize <= PAGE_SIZE/2)) { 1583 1584 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1585 bp->b_bufsize = mbsize; 1586 bp->b_bcount = size; 1587 bp->b_flags |= B_MALLOC; 1588 bufspace += mbsize; 1589 bufmallocspace += mbsize; 1590 return 1; 1591 } 1592#endif 1593 origbuf = NULL; 1594 origbufsize = 0; 1595#if !defined(NO_B_MALLOC) 1596 /* 1597 * If the buffer is growing on it's other-than-first allocation, 1598 * then we revert to the page-allocation scheme. 1599 */ 1600 if (bp->b_flags & B_MALLOC) { 1601 origbuf = bp->b_data; 1602 origbufsize = bp->b_bufsize; 1603 bp->b_data = bp->b_kvabase; 1604 bufspace -= bp->b_bufsize; 1605 bufmallocspace -= bp->b_bufsize; 1606 bp->b_bufsize = 0; 1607 bp->b_flags &= ~B_MALLOC; 1608 newbsize = round_page(newbsize); 1609 } 1610#endif 1611 vm_hold_load_pages( 1612 bp, 1613 (vm_offset_t) bp->b_data + bp->b_bufsize, 1614 (vm_offset_t) bp->b_data + newbsize); 1615#if !defined(NO_B_MALLOC) 1616 if (origbuf) { 1617 bcopy(origbuf, bp->b_data, origbufsize); 1618 free(origbuf, M_BIOBUF); 1619 } 1620#endif 1621 } 1622 } else { 1623 vm_page_t m; 1624 int desiredpages; 1625 1626 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1627 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1628 1629#if !defined(NO_B_MALLOC) 1630 if (bp->b_flags & B_MALLOC) 1631 panic("allocbuf: VMIO buffer can't be malloced"); 1632#endif 1633 1634 if (newbsize < bp->b_bufsize) { 1635 if (desiredpages < bp->b_npages) { 1636 for (i = desiredpages; i < bp->b_npages; i++) { 1637 /* 1638 * the page is not freed here -- it 1639 * is the responsibility of vnode_pager_setsize 1640 */ 1641 m = bp->b_pages[i]; 1642#if defined(DIAGNOSTIC) 1643 if (m == bogus_page) 1644 panic("allocbuf: bogus page found"); 1645#endif 1646 s = splvm(); 1647 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1648 m->flags |= PG_WANTED; 1649 tsleep(m, PVM, "biodep", 0); 1650 } 1651 splx(s); 1652 1653 bp->b_pages[i] = NULL; 1654 vm_page_unwire(m); 1655 } 1656 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1657 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1658 bp->b_npages = desiredpages; 1659 } 1660 } else if (newbsize > bp->b_bufsize) { 1661 vm_object_t obj; 1662 vm_offset_t tinc, toff; 1663 vm_ooffset_t off; 1664 vm_pindex_t objoff; 1665 int pageindex, curbpnpages; 1666 struct vnode *vp; 1667 int bsize; 1668 1669 vp = bp->b_vp; 1670 1671 if (vp->v_type == VBLK) 1672 bsize = DEV_BSIZE; 1673 else 1674 bsize = vp->v_mount->mnt_stat.f_iosize; 1675 1676 if (bp->b_npages < desiredpages) { 1677 obj = vp->v_object; 1678 tinc = PAGE_SIZE; 1679 if (tinc > bsize) 1680 tinc = bsize; 1681 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1682 curbpnpages = bp->b_npages; 1683 doretry: 1684 bp->b_flags |= B_CACHE; 1685 bp->b_validoff = bp->b_validend = 0; 1686 for (toff = 0; toff < newbsize; toff += tinc) { 1687 int bytesinpage; 1688 1689 pageindex = toff >> PAGE_SHIFT; 1690 objoff = OFF_TO_IDX(off + toff); 1691 if (pageindex < curbpnpages) { 1692 1693 m = bp->b_pages[pageindex]; 1694#ifdef VFS_BIO_DIAG 1695 if (m->pindex != objoff) 1696 panic("allocbuf: page changed offset??!!!?"); 1697#endif 1698 bytesinpage = tinc; 1699 if (tinc > (newbsize - toff)) 1700 bytesinpage = newbsize - toff; 1701 if (bp->b_flags & B_CACHE) 1702 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1703 continue; 1704 } 1705 m = vm_page_lookup(obj, objoff); 1706 if (!m) { 1707 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1708 if (!m) { 1709 VM_WAIT; 1710 vm_pageout_deficit += (desiredpages - bp->b_npages); 1711 goto doretry; 1712 } 1713 /* 1714 * Normally it is unwise to clear PG_BUSY without 1715 * PAGE_WAKEUP -- but it is okay here, as there is 1716 * no chance for blocking between here and vm_page_alloc 1717 */ 1718 m->flags &= ~PG_BUSY; 1719 vm_page_wire(m); 1720 bp->b_flags &= ~B_CACHE; 1721 } else if (m->flags & PG_BUSY) { 1722 s = splvm(); 1723 if (m->flags & PG_BUSY) { 1724 m->flags |= PG_WANTED; 1725 tsleep(m, PVM, "pgtblk", 0); 1726 } 1727 splx(s); 1728 goto doretry; 1729 } else { 1730 if ((curproc != pageproc) && 1731 ((m->queue - m->pc) == PQ_CACHE) && 1732 ((cnt.v_free_count + cnt.v_cache_count) < 1733 (cnt.v_free_min + cnt.v_cache_min))) { 1734 pagedaemon_wakeup(); 1735 } 1736 bytesinpage = tinc; 1737 if (tinc > (newbsize - toff)) 1738 bytesinpage = newbsize - toff; 1739 if (bp->b_flags & B_CACHE) 1740 vfs_buf_set_valid(bp, off, toff, bytesinpage, m); 1741 vm_page_wire(m); 1742 } 1743 bp->b_pages[pageindex] = m; 1744 curbpnpages = pageindex + 1; 1745 } 1746 if (vp->v_tag == VT_NFS && 1747 vp->v_type != VBLK) { 1748 if (bp->b_dirtyend > 0) { 1749 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff); 1750 bp->b_validend = max(bp->b_validend, bp->b_dirtyend); 1751 } 1752 if (bp->b_validend == 0) 1753 bp->b_flags &= ~B_CACHE; 1754 } 1755 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1756 bp->b_npages = curbpnpages; 1757 pmap_qenter((vm_offset_t) bp->b_data, 1758 bp->b_pages, bp->b_npages); 1759 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1760 } 1761 } 1762 } 1763 if (bp->b_flags & B_VMIO) 1764 vmiospace += (newbsize - bp->b_bufsize); 1765 bufspace += (newbsize - bp->b_bufsize); 1766 bp->b_bufsize = newbsize; 1767 bp->b_bcount = size; 1768 return 1; 1769} 1770 1771/* 1772 * Wait for buffer I/O completion, returning error status. 1773 */ 1774int 1775biowait(register struct buf * bp) 1776{ 1777 int s; 1778 1779 s = splbio(); 1780 while ((bp->b_flags & B_DONE) == 0) 1781#if defined(NO_SCHEDULE_MODS) 1782 tsleep(bp, PRIBIO, "biowait", 0); 1783#else 1784 if (bp->b_flags & B_READ) 1785 tsleep(bp, PRIBIO, "biord", 0); 1786 else 1787 tsleep(bp, curproc->p_usrpri, "biowr", 0); 1788#endif 1789 splx(s); 1790 if (bp->b_flags & B_EINTR) { 1791 bp->b_flags &= ~B_EINTR; 1792 return (EINTR); 1793 } 1794 if (bp->b_flags & B_ERROR) { 1795 return (bp->b_error ? bp->b_error : EIO); 1796 } else { 1797 return (0); 1798 } 1799} 1800 1801/* 1802 * Finish I/O on a buffer, calling an optional function. 1803 * This is usually called from interrupt level, so process blocking 1804 * is not *a good idea*. 1805 */ 1806void 1807biodone(register struct buf * bp) 1808{ 1809 int s; 1810 1811 s = splbio(); 1812 1813#if !defined(MAX_PERF) 1814 if (!(bp->b_flags & B_BUSY)) 1815 panic("biodone: buffer not busy"); 1816#endif 1817 1818 if (bp->b_flags & B_DONE) { 1819 splx(s); 1820#if !defined(MAX_PERF) 1821 printf("biodone: buffer already done\n"); 1822#endif 1823 return; 1824 } 1825 bp->b_flags |= B_DONE; 1826 1827 if ((bp->b_flags & B_READ) == 0) { 1828 vwakeup(bp); 1829 } 1830#ifdef BOUNCE_BUFFERS 1831 if (bp->b_flags & B_BOUNCE) 1832 vm_bounce_free(bp); 1833#endif 1834 1835 /* call optional completion function if requested */ 1836 if (bp->b_flags & B_CALL) { 1837 bp->b_flags &= ~B_CALL; 1838 (*bp->b_iodone) (bp); 1839 splx(s); 1840 return; 1841 } 1842 if (bp->b_flags & B_VMIO) { 1843 int i, resid; 1844 vm_ooffset_t foff; 1845 vm_page_t m; 1846 vm_object_t obj; 1847 int iosize; 1848 struct vnode *vp = bp->b_vp; 1849 1850 obj = vp->v_object; 1851 1852#if defined(VFS_BIO_DEBUG) 1853 if (vp->v_usecount == 0) { 1854 panic("biodone: zero vnode ref count"); 1855 } 1856 1857 if (vp->v_object == NULL) { 1858 panic("biodone: missing VM object"); 1859 } 1860 1861 if ((vp->v_flag & VOBJBUF) == 0) { 1862 panic("biodone: vnode is not setup for merged cache"); 1863 } 1864#endif 1865 1866 if (vp->v_type == VBLK) 1867 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1868 else 1869 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1870#if !defined(MAX_PERF) 1871 if (!obj) { 1872 panic("biodone: no object"); 1873 } 1874#endif 1875#if defined(VFS_BIO_DEBUG) 1876 if (obj->paging_in_progress < bp->b_npages) { 1877 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1878 obj->paging_in_progress, bp->b_npages); 1879 } 1880#endif 1881 iosize = bp->b_bufsize; 1882 for (i = 0; i < bp->b_npages; i++) { 1883 int bogusflag = 0; 1884 m = bp->b_pages[i]; 1885 if (m == bogus_page) { 1886 bogusflag = 1; 1887 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1888 if (!m) { 1889#if defined(VFS_BIO_DEBUG) 1890 printf("biodone: page disappeared\n"); 1891#endif 1892 --obj->paging_in_progress; 1893 continue; 1894 } 1895 bp->b_pages[i] = m; 1896 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1897 } 1898#if defined(VFS_BIO_DEBUG) 1899 if (OFF_TO_IDX(foff) != m->pindex) { 1900 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1901 } 1902#endif 1903 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1904 if (resid > iosize) 1905 resid = iosize; 1906 /* 1907 * In the write case, the valid and clean bits are 1908 * already changed correctly, so we only need to do this 1909 * here in the read case. 1910 */ 1911 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1912 vfs_page_set_valid(bp, foff, i, m); 1913 } 1914 1915 /* 1916 * when debugging new filesystems or buffer I/O methods, this 1917 * is the most common error that pops up. if you see this, you 1918 * have not set the page busy flag correctly!!! 1919 */ 1920 if (m->busy == 0) { 1921#if !defined(MAX_PERF) 1922 printf("biodone: page busy < 0, " 1923 "pindex: %d, foff: 0x(%x,%x), " 1924 "resid: %d, index: %d\n", 1925 (int) m->pindex, (int)(foff >> 32), 1926 (int) foff & 0xffffffff, resid, i); 1927#endif 1928 if (vp->v_type != VBLK) 1929#if !defined(MAX_PERF) 1930 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1931 bp->b_vp->v_mount->mnt_stat.f_iosize, 1932 (int) bp->b_lblkno, 1933 bp->b_flags, bp->b_npages); 1934 else 1935 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1936 (int) bp->b_lblkno, 1937 bp->b_flags, bp->b_npages); 1938 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1939 m->valid, m->dirty, m->wire_count); 1940#endif 1941 panic("biodone: page busy < 0\n"); 1942 } 1943 --m->busy; 1944 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1945 m->flags &= ~PG_WANTED; 1946 wakeup(m); 1947 } 1948 --obj->paging_in_progress; 1949 foff += resid; 1950 iosize -= resid; 1951 } 1952 if (obj && obj->paging_in_progress == 0 && 1953 (obj->flags & OBJ_PIPWNT)) { 1954 obj->flags &= ~OBJ_PIPWNT; 1955 wakeup(obj); 1956 } 1957 } 1958 /* 1959 * For asynchronous completions, release the buffer now. The brelse 1960 * checks for B_WANTED and will do the wakeup there if necessary - so 1961 * no need to do a wakeup here in the async case. 1962 */ 1963 1964 if (bp->b_flags & B_ASYNC) { 1965 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1966 brelse(bp); 1967 else 1968 bqrelse(bp); 1969 } else { 1970 bp->b_flags &= ~B_WANTED; 1971 wakeup(bp); 1972 } 1973 splx(s); 1974} 1975 1976static int 1977count_lock_queue() 1978{ 1979 int count; 1980 struct buf *bp; 1981 1982 count = 0; 1983 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1984 bp != NULL; 1985 bp = TAILQ_NEXT(bp, b_freelist)) 1986 count++; 1987 return (count); 1988} 1989 1990static int vfs_update_interval = 30; 1991 1992static void 1993vfs_update() 1994{ 1995 while (1) { 1996 tsleep(&vfs_update_wakeup, PUSER, "update", 1997 hz * vfs_update_interval); 1998 vfs_update_wakeup = 0; 1999 sync(curproc, NULL); 2000 } 2001} 2002 2003static int 2004sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 2005{ 2006 int error = sysctl_handle_int(oidp, 2007 oidp->oid_arg1, oidp->oid_arg2, req); 2008 if (!error) 2009 wakeup(&vfs_update_wakeup); 2010 return error; 2011} 2012 2013SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 2014 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 2015 2016 2017/* 2018 * This routine is called in lieu of iodone in the case of 2019 * incomplete I/O. This keeps the busy status for pages 2020 * consistant. 2021 */ 2022void 2023vfs_unbusy_pages(struct buf * bp) 2024{ 2025 int i; 2026 2027 if (bp->b_flags & B_VMIO) { 2028 struct vnode *vp = bp->b_vp; 2029 vm_object_t obj = vp->v_object; 2030 vm_ooffset_t foff; 2031 2032 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2033 2034 for (i = 0; i < bp->b_npages; i++) { 2035 vm_page_t m = bp->b_pages[i]; 2036 2037 if (m == bogus_page) { 2038 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 2039#if !defined(MAX_PERF) 2040 if (!m) { 2041 panic("vfs_unbusy_pages: page missing\n"); 2042 } 2043#endif 2044 bp->b_pages[i] = m; 2045 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2046 } 2047 --obj->paging_in_progress; 2048 --m->busy; 2049 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 2050 m->flags &= ~PG_WANTED; 2051 wakeup(m); 2052 } 2053 } 2054 if (obj->paging_in_progress == 0 && 2055 (obj->flags & OBJ_PIPWNT)) { 2056 obj->flags &= ~OBJ_PIPWNT; 2057 wakeup(obj); 2058 } 2059 } 2060} 2061 2062/* 2063 * Set NFS' b_validoff and b_validend fields from the valid bits 2064 * of a page. If the consumer is not NFS, and the page is not 2065 * valid for the entire range, clear the B_CACHE flag to force 2066 * the consumer to re-read the page. 2067 */ 2068static void 2069vfs_buf_set_valid(struct buf *bp, 2070 vm_ooffset_t foff, vm_offset_t off, vm_offset_t size, 2071 vm_page_t m) 2072{ 2073 if (bp->b_vp->v_tag == VT_NFS && bp->b_vp->v_type != VBLK) { 2074 vm_offset_t svalid, evalid; 2075 int validbits = m->valid; 2076 2077 /* 2078 * This only bothers with the first valid range in the 2079 * page. 2080 */ 2081 svalid = off; 2082 while (validbits && !(validbits & 1)) { 2083 svalid += DEV_BSIZE; 2084 validbits >>= 1; 2085 } 2086 evalid = svalid; 2087 while (validbits & 1) { 2088 evalid += DEV_BSIZE; 2089 validbits >>= 1; 2090 } 2091 /* 2092 * Make sure this range is contiguous with the range 2093 * built up from previous pages. If not, then we will 2094 * just use the range from the previous pages. 2095 */ 2096 if (svalid == bp->b_validend) { 2097 bp->b_validoff = min(bp->b_validoff, svalid); 2098 bp->b_validend = max(bp->b_validend, evalid); 2099 } 2100 } else if (!vm_page_is_valid(m, 2101 (vm_offset_t) ((foff + off) & PAGE_MASK), 2102 size)) { 2103 bp->b_flags &= ~B_CACHE; 2104 } 2105} 2106 2107/* 2108 * Set the valid bits in a page, taking care of the b_validoff, 2109 * b_validend fields which NFS uses to optimise small reads. Off is 2110 * the offset within the file and pageno is the page index within the buf. 2111 */ 2112static void 2113vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m) 2114{ 2115 struct vnode *vp = bp->b_vp; 2116 vm_ooffset_t soff, eoff; 2117 2118 soff = off; 2119 eoff = off + min(PAGE_SIZE, bp->b_bufsize); 2120 vm_page_set_invalid(m, 2121 (vm_offset_t) (soff & PAGE_MASK), 2122 (vm_offset_t) (eoff - soff)); 2123 if (vp->v_tag == VT_NFS && vp->v_type != VBLK) { 2124 vm_ooffset_t sv, ev; 2125 off = off - pageno * PAGE_SIZE; 2126 sv = off + ((bp->b_validoff + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1)); 2127 ev = off + (bp->b_validend & ~(DEV_BSIZE - 1)); 2128 soff = max(sv, soff); 2129 eoff = min(ev, eoff); 2130 } 2131 if (eoff > soff) 2132 vm_page_set_validclean(m, 2133 (vm_offset_t) (soff & PAGE_MASK), 2134 (vm_offset_t) (eoff - soff)); 2135} 2136 2137/* 2138 * This routine is called before a device strategy routine. 2139 * It is used to tell the VM system that paging I/O is in 2140 * progress, and treat the pages associated with the buffer 2141 * almost as being PG_BUSY. Also the object paging_in_progress 2142 * flag is handled to make sure that the object doesn't become 2143 * inconsistant. 2144 */ 2145void 2146vfs_busy_pages(struct buf * bp, int clear_modify) 2147{ 2148 int i,s; 2149 2150 if (bp->b_flags & B_VMIO) { 2151 struct vnode *vp = bp->b_vp; 2152 vm_object_t obj = vp->v_object; 2153 vm_ooffset_t foff; 2154 2155 if (vp->v_type == VBLK) 2156 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2157 else 2158 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2159 2160 vfs_setdirty(bp); 2161 2162retry: 2163 for (i = 0; i < bp->b_npages; i++) { 2164 vm_page_t m = bp->b_pages[i]; 2165 2166 if (m && (m->flags & PG_BUSY)) { 2167 s = splvm(); 2168 while (m->flags & PG_BUSY) { 2169 m->flags |= PG_WANTED; 2170 tsleep(m, PVM, "vbpage", 0); 2171 } 2172 splx(s); 2173 goto retry; 2174 } 2175 } 2176 2177 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2178 vm_page_t m = bp->b_pages[i]; 2179 2180 if ((bp->b_flags & B_CLUSTER) == 0) { 2181 obj->paging_in_progress++; 2182 m->busy++; 2183 } 2184 2185 vm_page_protect(m, VM_PROT_NONE); 2186 if (clear_modify) 2187 vfs_page_set_valid(bp, foff, i, m); 2188 else if (bp->b_bcount >= PAGE_SIZE) { 2189 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 2190 bp->b_pages[i] = bogus_page; 2191 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 2192 } 2193 } 2194 } 2195 } 2196} 2197 2198/* 2199 * Tell the VM system that the pages associated with this buffer 2200 * are clean. This is used for delayed writes where the data is 2201 * going to go to disk eventually without additional VM intevention. 2202 */ 2203void 2204vfs_clean_pages(struct buf * bp) 2205{ 2206 int i; 2207 2208 if (bp->b_flags & B_VMIO) { 2209 struct vnode *vp = bp->b_vp; 2210 vm_ooffset_t foff; 2211 2212 if (vp->v_type == VBLK) 2213 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 2214 else 2215 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 2216 for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) { 2217 vm_page_t m = bp->b_pages[i]; 2218 2219 vfs_page_set_valid(bp, foff, i, m); 2220 } 2221 } 2222} 2223 2224void 2225vfs_bio_clrbuf(struct buf *bp) { 2226 int i; 2227 if( bp->b_flags & B_VMIO) { 2228 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 2229 int mask; 2230 mask = 0; 2231 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 2232 mask |= (1 << (i/DEV_BSIZE)); 2233 if( bp->b_pages[0]->valid != mask) { 2234 bzero(bp->b_data, bp->b_bufsize); 2235 } 2236 bp->b_pages[0]->valid = mask; 2237 bp->b_resid = 0; 2238 return; 2239 } 2240 for(i=0;i<bp->b_npages;i++) { 2241 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 2242 continue; 2243 if( bp->b_pages[i]->valid == 0) { 2244 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 2245 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 2246 } 2247 } else { 2248 int j; 2249 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 2250 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 2251 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 2252 } 2253 } 2254 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 2255 } 2256 bp->b_resid = 0; 2257 } else { 2258 clrbuf(bp); 2259 } 2260} 2261 2262/* 2263 * vm_hold_load_pages and vm_hold_unload pages get pages into 2264 * a buffers address space. The pages are anonymous and are 2265 * not associated with a file object. 2266 */ 2267void 2268vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2269{ 2270 vm_offset_t pg; 2271 vm_page_t p; 2272 int index; 2273 2274 to = round_page(to); 2275 from = round_page(from); 2276 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2277 2278 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2279 2280tryagain: 2281 2282 p = vm_page_alloc(kernel_object, 2283 ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 2284 VM_ALLOC_NORMAL); 2285 if (!p) { 2286 vm_pageout_deficit += (to - from) >> PAGE_SHIFT; 2287 VM_WAIT; 2288 goto tryagain; 2289 } 2290 vm_page_wire(p); 2291 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 2292 bp->b_pages[index] = p; 2293 PAGE_WAKEUP(p); 2294 } 2295 bp->b_npages = index; 2296} 2297 2298void 2299vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 2300{ 2301 vm_offset_t pg; 2302 vm_page_t p; 2303 int index, newnpages; 2304 2305 from = round_page(from); 2306 to = round_page(to); 2307 newnpages = index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 2308 2309 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 2310 p = bp->b_pages[index]; 2311 if (p && (index < bp->b_npages)) { 2312#if !defined(MAX_PERF) 2313 if (p->busy) { 2314 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 2315 bp->b_blkno, bp->b_lblkno); 2316 } 2317#endif 2318 bp->b_pages[index] = NULL; 2319 pmap_kremove(pg); 2320 p->flags |= PG_BUSY; 2321 vm_page_unwire(p); 2322 vm_page_free(p); 2323 } 2324 } 2325 bp->b_npages = newnpages; 2326} 2327 2328 2329#include "opt_ddb.h" 2330#ifdef DDB 2331#include <ddb/ddb.h> 2332 2333DB_SHOW_COMMAND(buffer, db_show_buffer) 2334{ 2335 /* get args */ 2336 struct buf *bp = (struct buf *)addr; 2337 2338 if (!have_addr) { 2339 db_printf("usage: show buffer <addr>\n"); 2340 return; 2341 } 2342 2343 db_printf("b_proc = %p,\nb_flags = 0x%b\n", (void *)bp->b_proc, 2344 bp->b_flags, "\20\40bounce\37cluster\36vmio\35ram\34ordered" 2345 "\33paging\32xxx\31writeinprog\30wanted\27relbuf\26tape" 2346 "\25read\24raw\23phys\22clusterok\21malloc\20nocache" 2347 "\17locked\16inval\15gathered\14error\13eintr\12done\11dirty" 2348 "\10delwri\7call\6cache\5busy\4bad\3async\2needcommit\1age"); 2349 db_printf("b_error = %d, b_bufsize = %ld, b_bcount = %ld, " 2350 "b_resid = %ld\nb_dev = 0x%x, b_data = %p, " 2351 "b_blkno = %d, b_pblkno = %d\n", 2352 bp->b_error, bp->b_bufsize, bp->b_bcount, bp->b_resid, 2353 bp->b_dev, bp->b_data, bp->b_blkno, bp->b_pblkno); 2354 if (bp->b_npages) { 2355 int i; 2356 db_printf("b_npages = %d, pages(OBJ, IDX, PA): ", bp->b_npages); 2357 for (i = 0; i < bp->b_npages; i++) { 2358 vm_page_t m; 2359 m = bp->b_pages[i]; 2360 db_printf("(0x%x, 0x%x, 0x%x)", m->object, m->pindex, 2361 VM_PAGE_TO_PHYS(m)); 2362 if ((i + 1) < bp->b_npages) 2363 db_printf(","); 2364 } 2365 db_printf("\n"); 2366 } 2367} 2368#endif /* DDB */ 2369